ins-pricing 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. ins_pricing/README.md +60 -0
  2. ins_pricing/__init__.py +102 -0
  3. ins_pricing/governance/README.md +18 -0
  4. ins_pricing/governance/__init__.py +20 -0
  5. ins_pricing/governance/approval.py +93 -0
  6. ins_pricing/governance/audit.py +37 -0
  7. ins_pricing/governance/registry.py +99 -0
  8. ins_pricing/governance/release.py +159 -0
  9. ins_pricing/modelling/BayesOpt.py +146 -0
  10. ins_pricing/modelling/BayesOpt_USAGE.md +925 -0
  11. ins_pricing/modelling/BayesOpt_entry.py +575 -0
  12. ins_pricing/modelling/BayesOpt_incremental.py +731 -0
  13. ins_pricing/modelling/Explain_Run.py +36 -0
  14. ins_pricing/modelling/Explain_entry.py +539 -0
  15. ins_pricing/modelling/Pricing_Run.py +36 -0
  16. ins_pricing/modelling/README.md +33 -0
  17. ins_pricing/modelling/__init__.py +44 -0
  18. ins_pricing/modelling/bayesopt/__init__.py +98 -0
  19. ins_pricing/modelling/bayesopt/config_preprocess.py +303 -0
  20. ins_pricing/modelling/bayesopt/core.py +1476 -0
  21. ins_pricing/modelling/bayesopt/models.py +2196 -0
  22. ins_pricing/modelling/bayesopt/trainers.py +2446 -0
  23. ins_pricing/modelling/bayesopt/utils.py +1021 -0
  24. ins_pricing/modelling/cli_common.py +136 -0
  25. ins_pricing/modelling/explain/__init__.py +55 -0
  26. ins_pricing/modelling/explain/gradients.py +334 -0
  27. ins_pricing/modelling/explain/metrics.py +176 -0
  28. ins_pricing/modelling/explain/permutation.py +155 -0
  29. ins_pricing/modelling/explain/shap_utils.py +146 -0
  30. ins_pricing/modelling/notebook_utils.py +284 -0
  31. ins_pricing/modelling/plotting/__init__.py +45 -0
  32. ins_pricing/modelling/plotting/common.py +63 -0
  33. ins_pricing/modelling/plotting/curves.py +572 -0
  34. ins_pricing/modelling/plotting/diagnostics.py +139 -0
  35. ins_pricing/modelling/plotting/geo.py +362 -0
  36. ins_pricing/modelling/plotting/importance.py +121 -0
  37. ins_pricing/modelling/run_logging.py +133 -0
  38. ins_pricing/modelling/tests/conftest.py +8 -0
  39. ins_pricing/modelling/tests/test_cross_val_generic.py +66 -0
  40. ins_pricing/modelling/tests/test_distributed_utils.py +18 -0
  41. ins_pricing/modelling/tests/test_explain.py +56 -0
  42. ins_pricing/modelling/tests/test_geo_tokens_split.py +49 -0
  43. ins_pricing/modelling/tests/test_graph_cache.py +33 -0
  44. ins_pricing/modelling/tests/test_plotting.py +63 -0
  45. ins_pricing/modelling/tests/test_plotting_library.py +150 -0
  46. ins_pricing/modelling/tests/test_preprocessor.py +48 -0
  47. ins_pricing/modelling/watchdog_run.py +211 -0
  48. ins_pricing/pricing/README.md +44 -0
  49. ins_pricing/pricing/__init__.py +27 -0
  50. ins_pricing/pricing/calibration.py +39 -0
  51. ins_pricing/pricing/data_quality.py +117 -0
  52. ins_pricing/pricing/exposure.py +85 -0
  53. ins_pricing/pricing/factors.py +91 -0
  54. ins_pricing/pricing/monitoring.py +99 -0
  55. ins_pricing/pricing/rate_table.py +78 -0
  56. ins_pricing/production/__init__.py +21 -0
  57. ins_pricing/production/drift.py +30 -0
  58. ins_pricing/production/monitoring.py +143 -0
  59. ins_pricing/production/scoring.py +40 -0
  60. ins_pricing/reporting/README.md +20 -0
  61. ins_pricing/reporting/__init__.py +11 -0
  62. ins_pricing/reporting/report_builder.py +72 -0
  63. ins_pricing/reporting/scheduler.py +45 -0
  64. ins_pricing/setup.py +41 -0
  65. ins_pricing v2/__init__.py +23 -0
  66. ins_pricing v2/governance/__init__.py +20 -0
  67. ins_pricing v2/governance/approval.py +93 -0
  68. ins_pricing v2/governance/audit.py +37 -0
  69. ins_pricing v2/governance/registry.py +99 -0
  70. ins_pricing v2/governance/release.py +159 -0
  71. ins_pricing v2/modelling/Explain_Run.py +36 -0
  72. ins_pricing v2/modelling/Pricing_Run.py +36 -0
  73. ins_pricing v2/modelling/__init__.py +151 -0
  74. ins_pricing v2/modelling/cli_common.py +141 -0
  75. ins_pricing v2/modelling/config.py +249 -0
  76. ins_pricing v2/modelling/config_preprocess.py +254 -0
  77. ins_pricing v2/modelling/core.py +741 -0
  78. ins_pricing v2/modelling/data_container.py +42 -0
  79. ins_pricing v2/modelling/explain/__init__.py +55 -0
  80. ins_pricing v2/modelling/explain/gradients.py +334 -0
  81. ins_pricing v2/modelling/explain/metrics.py +176 -0
  82. ins_pricing v2/modelling/explain/permutation.py +155 -0
  83. ins_pricing v2/modelling/explain/shap_utils.py +146 -0
  84. ins_pricing v2/modelling/features.py +215 -0
  85. ins_pricing v2/modelling/model_manager.py +148 -0
  86. ins_pricing v2/modelling/model_plotting.py +463 -0
  87. ins_pricing v2/modelling/models.py +2203 -0
  88. ins_pricing v2/modelling/notebook_utils.py +294 -0
  89. ins_pricing v2/modelling/plotting/__init__.py +45 -0
  90. ins_pricing v2/modelling/plotting/common.py +63 -0
  91. ins_pricing v2/modelling/plotting/curves.py +572 -0
  92. ins_pricing v2/modelling/plotting/diagnostics.py +139 -0
  93. ins_pricing v2/modelling/plotting/geo.py +362 -0
  94. ins_pricing v2/modelling/plotting/importance.py +121 -0
  95. ins_pricing v2/modelling/run_logging.py +133 -0
  96. ins_pricing v2/modelling/tests/conftest.py +8 -0
  97. ins_pricing v2/modelling/tests/test_cross_val_generic.py +66 -0
  98. ins_pricing v2/modelling/tests/test_distributed_utils.py +18 -0
  99. ins_pricing v2/modelling/tests/test_explain.py +56 -0
  100. ins_pricing v2/modelling/tests/test_geo_tokens_split.py +49 -0
  101. ins_pricing v2/modelling/tests/test_graph_cache.py +33 -0
  102. ins_pricing v2/modelling/tests/test_plotting.py +63 -0
  103. ins_pricing v2/modelling/tests/test_plotting_library.py +150 -0
  104. ins_pricing v2/modelling/tests/test_preprocessor.py +48 -0
  105. ins_pricing v2/modelling/trainers.py +2447 -0
  106. ins_pricing v2/modelling/utils.py +1020 -0
  107. ins_pricing v2/modelling/watchdog_run.py +211 -0
  108. ins_pricing v2/pricing/__init__.py +27 -0
  109. ins_pricing v2/pricing/calibration.py +39 -0
  110. ins_pricing v2/pricing/data_quality.py +117 -0
  111. ins_pricing v2/pricing/exposure.py +85 -0
  112. ins_pricing v2/pricing/factors.py +91 -0
  113. ins_pricing v2/pricing/monitoring.py +99 -0
  114. ins_pricing v2/pricing/rate_table.py +78 -0
  115. ins_pricing v2/production/__init__.py +21 -0
  116. ins_pricing v2/production/drift.py +30 -0
  117. ins_pricing v2/production/monitoring.py +143 -0
  118. ins_pricing v2/production/scoring.py +40 -0
  119. ins_pricing v2/reporting/__init__.py +11 -0
  120. ins_pricing v2/reporting/report_builder.py +72 -0
  121. ins_pricing v2/reporting/scheduler.py +45 -0
  122. ins_pricing v2/scripts/BayesOpt_incremental.py +722 -0
  123. ins_pricing v2/scripts/Explain_entry.py +545 -0
  124. ins_pricing v2/scripts/__init__.py +1 -0
  125. ins_pricing v2/scripts/train.py +568 -0
  126. ins_pricing v2/setup.py +55 -0
  127. ins_pricing v2/smoke_test.py +28 -0
  128. ins_pricing-0.1.6.dist-info/METADATA +78 -0
  129. ins_pricing-0.1.6.dist-info/RECORD +169 -0
  130. ins_pricing-0.1.6.dist-info/WHEEL +5 -0
  131. ins_pricing-0.1.6.dist-info/top_level.txt +4 -0
  132. user_packages/__init__.py +105 -0
  133. user_packages legacy/BayesOpt.py +5659 -0
  134. user_packages legacy/BayesOpt_entry.py +513 -0
  135. user_packages legacy/BayesOpt_incremental.py +685 -0
  136. user_packages legacy/Pricing_Run.py +36 -0
  137. user_packages legacy/Try/BayesOpt Legacy251213.py +3719 -0
  138. user_packages legacy/Try/BayesOpt Legacy251215.py +3758 -0
  139. user_packages legacy/Try/BayesOpt lagecy251201.py +3506 -0
  140. user_packages legacy/Try/BayesOpt lagecy251218.py +3992 -0
  141. user_packages legacy/Try/BayesOpt legacy.py +3280 -0
  142. user_packages legacy/Try/BayesOpt.py +838 -0
  143. user_packages legacy/Try/BayesOptAll.py +1569 -0
  144. user_packages legacy/Try/BayesOptAllPlatform.py +909 -0
  145. user_packages legacy/Try/BayesOptCPUGPU.py +1877 -0
  146. user_packages legacy/Try/BayesOptSearch.py +830 -0
  147. user_packages legacy/Try/BayesOptSearchOrigin.py +829 -0
  148. user_packages legacy/Try/BayesOptV1.py +1911 -0
  149. user_packages legacy/Try/BayesOptV10.py +2973 -0
  150. user_packages legacy/Try/BayesOptV11.py +3001 -0
  151. user_packages legacy/Try/BayesOptV12.py +3001 -0
  152. user_packages legacy/Try/BayesOptV2.py +2065 -0
  153. user_packages legacy/Try/BayesOptV3.py +2209 -0
  154. user_packages legacy/Try/BayesOptV4.py +2342 -0
  155. user_packages legacy/Try/BayesOptV5.py +2372 -0
  156. user_packages legacy/Try/BayesOptV6.py +2759 -0
  157. user_packages legacy/Try/BayesOptV7.py +2832 -0
  158. user_packages legacy/Try/BayesOptV8Codex.py +2731 -0
  159. user_packages legacy/Try/BayesOptV8Gemini.py +2614 -0
  160. user_packages legacy/Try/BayesOptV9.py +2927 -0
  161. user_packages legacy/Try/BayesOpt_entry legacy.py +313 -0
  162. user_packages legacy/Try/ModelBayesOptSearch.py +359 -0
  163. user_packages legacy/Try/ResNetBayesOptSearch.py +249 -0
  164. user_packages legacy/Try/XgbBayesOptSearch.py +121 -0
  165. user_packages legacy/Try/xgbbayesopt.py +523 -0
  166. user_packages legacy/__init__.py +19 -0
  167. user_packages legacy/cli_common.py +124 -0
  168. user_packages legacy/notebook_utils.py +228 -0
  169. user_packages legacy/watchdog_run.py +202 -0
@@ -0,0 +1,2342 @@
1
+ # 数据在CPU和GPU之间传输会带来较大开销,但可以多CUDA流同时传输数据和计算,从而实现更大数据集的操作。
2
+
3
+ import copy
4
+ from email.mime import base
5
+ import gc
6
+ import math
7
+ import os
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from re import X
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ import joblib
14
+ import matplotlib.pyplot as plt
15
+ import numpy as np # 1.26.2
16
+ import optuna # 4.3.0
17
+ import pandas as pd # 2.2.3
18
+ import shap
19
+
20
+ import torch # 版本: 1.10.1+cu111
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ import xgboost as xgb # 1.7.0
24
+
25
+ from torch.utils.data import Dataset, DataLoader, TensorDataset
26
+ from torch.cuda.amp import autocast, GradScaler
27
+ from torch.nn.utils import clip_grad_norm_
28
+ from sklearn.model_selection import ShuffleSplit, cross_val_score # 1.2.2
29
+ from sklearn.preprocessing import StandardScaler
30
+ from sklearn.metrics import make_scorer, mean_tweedie_deviance
31
+
32
+ # =============================================================================
33
+ # Constants & utilities
34
+ # =============================================================================
35
+ EPS = 1e-8
36
+
37
+
38
+ def ensure_parent_dir(file_path: str) -> None:
39
+ # 若目标文件所在目录不存在则自动创建
40
+ directory = os.path.dirname(file_path)
41
+ if directory:
42
+ os.makedirs(directory, exist_ok=True)
43
+
44
+
45
+ def compute_batch_size(data_size: int, learning_rate: float, batch_num: int, minimum: int) -> int:
46
+ # 按学习率和样本量给出估算 batch,再夹在 [1, data_size] 范围内
47
+ estimated = int((learning_rate / 1e-4) ** 0.5 *
48
+ (data_size / max(batch_num, 1)))
49
+ return max(1, min(data_size, max(minimum, estimated)))
50
+
51
+
52
+ # 定义在 PyTorch 环境下的 Tweedie 偏差损失函数
53
+ # 参考文档:https://scikit-learn.org/stable/modules/model_evaluation.html#mean-poisson-gamma-and-tweedie-deviances
54
+
55
+
56
+ def tweedie_loss(pred, target, p=1.5, eps=1e-6, max_clip=1e6):
57
+ # 为确保稳定性先将预测值裁剪为正数
58
+ pred_clamped = torch.clamp(pred, min=eps)
59
+ # 计算 Tweedie 偏差的各部分
60
+ if p == 1:
61
+ # 对应泊松分布
62
+ term1 = target * torch.log(target / pred_clamped + eps)
63
+ term2 = -target + pred_clamped
64
+ term3 = 0
65
+ elif p == 0:
66
+ # 对应高斯分布
67
+ term1 = 0.5 * torch.pow(target - pred_clamped, 2)
68
+ term2 = 0
69
+ term3 = 0
70
+ elif p == 2:
71
+ # 对应伽马分布
72
+ term1 = torch.log(pred_clamped / target + eps)
73
+ term2 = -target / pred_clamped + 1
74
+ term3 = 0
75
+ else:
76
+ term1 = torch.pow(target, 2 - p) / ((1 - p) * (2 - p))
77
+ term2 = target * torch.pow(pred_clamped, 1 - p) / (1 - p)
78
+ term3 = torch.pow(pred_clamped, 2 - p) / (2 - p)
79
+ # Tweedie 负对数似然(忽略常数项)
80
+ return torch.nan_to_num(2 * (term1 - term2 + term3), nan=eps, posinf=max_clip, neginf=-max_clip)
81
+
82
+ # 定义释放CUDA内存函数
83
+
84
+
85
+ def free_cuda():
86
+ print(">>> Moving all models to CPU...")
87
+ for obj in gc.get_objects():
88
+ try:
89
+ if hasattr(obj, "to") and callable(obj.to):
90
+ # 跳过 torch.device 等不可移动对象
91
+ obj.to("cpu")
92
+ except:
93
+ pass
94
+
95
+ print(">>> Deleting tensors, optimizers, dataloaders...")
96
+ gc.collect()
97
+
98
+ print(">>> Emptying CUDA cache...")
99
+ torch.cuda.empty_cache()
100
+ torch.cuda.synchronize()
101
+
102
+ print(">>> CUDA memory freed.")
103
+
104
+
105
+ # =============================================================================
106
+ # Plotting helpers
107
+ # =============================================================================
108
+
109
+ # 定义分箱函数
110
+
111
+
112
+ def split_data(data, col_nme, wgt_nme, n_bins=10):
113
+ # 避免修改原始数据帧,先创建排序后的副本
114
+ data_sorted = data.sort_values(by=col_nme, ascending=True).copy()
115
+ data_sorted['cum_weight'] = data_sorted[wgt_nme].cumsum()
116
+ w_sum = data_sorted[wgt_nme].sum()
117
+ if w_sum <= EPS:
118
+ data_sorted.loc[:, 'bins'] = 0
119
+ else:
120
+ data_sorted.loc[:, 'bins'] = np.floor(
121
+ data_sorted['cum_weight'] * float(n_bins) / w_sum
122
+ )
123
+ data_sorted.loc[(data_sorted['bins'] == n_bins), 'bins'] = n_bins - 1
124
+ return data_sorted.groupby(['bins'], observed=True).sum(numeric_only=True)
125
+
126
+ # 定义提纯曲线(Lift)绘制函数
127
+
128
+
129
+ def plot_lift_list(pred_model, w_pred_list, w_act_list,
130
+ weight_list, tgt_nme, n_bins=10,
131
+ fig_nme='Lift Chart'):
132
+ lift_data = pd.DataFrame()
133
+ lift_data.loc[:, 'pred'] = pred_model
134
+ lift_data.loc[:, 'w_pred'] = w_pred_list
135
+ lift_data.loc[:, 'act'] = w_act_list
136
+ lift_data.loc[:, 'weight'] = weight_list
137
+ plot_data = split_data(lift_data, 'pred', 'weight', n_bins)
138
+ plot_data['exp_v'] = plot_data['w_pred'] / plot_data['weight']
139
+ plot_data['act_v'] = plot_data['act'] / plot_data['weight']
140
+ plot_data.reset_index(inplace=True)
141
+ fig = plt.figure(figsize=(7, 5))
142
+ ax = fig.add_subplot(111)
143
+ ax.plot(plot_data.index, plot_data['act_v'],
144
+ label='Actual', color='red')
145
+ ax.plot(plot_data.index, plot_data['exp_v'],
146
+ label='Predicted', color='blue')
147
+ ax.set_title(
148
+ 'Lift Chart of %s' % tgt_nme, fontsize=8)
149
+ plt.xticks(plot_data.index,
150
+ plot_data.index,
151
+ rotation=90, fontsize=6)
152
+ plt.yticks(fontsize=6)
153
+ plt.legend(loc='upper left',
154
+ fontsize=5, frameon=False)
155
+ plt.margins(0.05)
156
+ ax2 = ax.twinx()
157
+ ax2.bar(plot_data.index, plot_data['weight'],
158
+ alpha=0.5, color='seagreen',
159
+ label='Earned Exposure')
160
+ plt.yticks(fontsize=6)
161
+ plt.legend(loc='upper right',
162
+ fontsize=5, frameon=False)
163
+ plt.subplots_adjust(wspace=0.3)
164
+ save_path = os.path.join(
165
+ os.getcwd(), 'plot', f'05_{tgt_nme}_{fig_nme}.png')
166
+ ensure_parent_dir(save_path)
167
+ plt.savefig(save_path, dpi=300)
168
+ plt.close(fig)
169
+
170
+ # 定义双提纯曲线绘制函数
171
+
172
+
173
+ def plot_dlift_list(pred_model_1, pred_model_2,
174
+ model_nme_1, model_nme_2,
175
+ tgt_nme,
176
+ w_list, w_act_list, n_bins=10,
177
+ fig_nme='Double Lift Chart'):
178
+ lift_data = pd.DataFrame()
179
+ lift_data.loc[:, 'pred1'] = pred_model_1
180
+ lift_data.loc[:, 'pred2'] = pred_model_2
181
+ lift_data.loc[:, 'diff_ly'] = lift_data['pred1'] / lift_data['pred2']
182
+ lift_data.loc[:, 'act'] = w_act_list
183
+ lift_data.loc[:, 'weight'] = w_list
184
+ lift_data.loc[:, 'w_pred1'] = lift_data['pred1'] * lift_data['weight']
185
+ lift_data.loc[:, 'w_pred2'] = lift_data['pred2'] * lift_data['weight']
186
+ plot_data = split_data(lift_data, 'diff_ly', 'weight', n_bins)
187
+ plot_data['exp_v1'] = plot_data['w_pred1'] / plot_data['act']
188
+ plot_data['exp_v2'] = plot_data['w_pred2'] / plot_data['act']
189
+ plot_data['act_v'] = plot_data['act']/plot_data['act']
190
+ plot_data.reset_index(inplace=True)
191
+ fig = plt.figure(figsize=(7, 5))
192
+ ax = fig.add_subplot(111)
193
+ ax.plot(plot_data.index, plot_data['act_v'],
194
+ label='Actual', color='red')
195
+ ax.plot(plot_data.index, plot_data['exp_v1'],
196
+ label=model_nme_1, color='blue')
197
+ ax.plot(plot_data.index, plot_data['exp_v2'],
198
+ label=model_nme_2, color='black')
199
+ ax.set_title(
200
+ 'Double Lift Chart of %s' % tgt_nme, fontsize=8)
201
+ plt.xticks(plot_data.index,
202
+ plot_data.index,
203
+ rotation=90, fontsize=6)
204
+ plt.xlabel('%s / %s' % (model_nme_1, model_nme_2), fontsize=6)
205
+ plt.yticks(fontsize=6)
206
+ plt.legend(loc='upper left',
207
+ fontsize=5, frameon=False)
208
+ plt.margins(0.1)
209
+ plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8)
210
+ ax2 = ax.twinx()
211
+ ax2.bar(plot_data.index, plot_data['weight'],
212
+ alpha=0.5, color='seagreen',
213
+ label='Earned Exposure')
214
+ plt.yticks(fontsize=6)
215
+ plt.legend(loc='upper right',
216
+ fontsize=5, frameon=False)
217
+ plt.subplots_adjust(wspace=0.3)
218
+ save_path = os.path.join(
219
+ os.getcwd(), 'plot', f'06_{tgt_nme}_{fig_nme}.png')
220
+ ensure_parent_dir(save_path)
221
+ plt.savefig(save_path, dpi=300)
222
+ plt.close(fig)
223
+
224
+
225
+ # =============================================================================
226
+ # ResNet model & sklearn-style wrapper
227
+ # =============================================================================
228
+
229
+ # 开始定义ResNet模型结构
230
+ # 残差块:两层线性 + ReLU + 残差连接
231
+ # ResBlock 继承 nn.Module
232
+ class ResBlock(nn.Module):
233
+ def __init__(self, dim: int, dropout: float = 0.1,
234
+ use_layernorm: bool = False, residual_scale: float = 0.1
235
+ ):
236
+ super().__init__()
237
+ self.use_layernorm = use_layernorm
238
+
239
+ if use_layernorm:
240
+ Norm = nn.LayerNorm # 对最后一维做归一化
241
+ else:
242
+ def Norm(d): return nn.BatchNorm1d(d) # 保留一个开关,想试 BN 时也能用
243
+
244
+ self.norm1 = Norm(dim)
245
+ self.fc1 = nn.Linear(dim, dim, bias=True)
246
+ self.act = nn.ReLU(inplace=True)
247
+ self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
248
+ self.norm2 = Norm(dim)
249
+ self.fc2 = nn.Linear(dim, dim, bias=True)
250
+
251
+ # 残差缩放,防止一开始就把主干搞炸
252
+ self.res_scale = nn.Parameter(
253
+ torch.tensor(residual_scale, dtype=torch.float32)
254
+ )
255
+
256
+ def forward(self, x):
257
+ # 前置激活结构
258
+ out = self.norm1(x)
259
+ out = self.fc1(out)
260
+ out = self.act(out)
261
+ out = self.dropout(out)
262
+ out = self.norm2(out)
263
+ out = self.fc2(out)
264
+ # 残差缩放再相加
265
+ return F.relu(x + self.res_scale * out)
266
+
267
+ # ResNetSequential 继承 nn.Module,定义整个网络结构
268
+
269
+
270
+ class ResNetSequential(nn.Module):
271
+ # 输入张量形状:(batch, input_dim)
272
+ # 网络结构:全连接 + 归一化 + ReLU,再堆叠若干残差块,最后输出 Softplus
273
+
274
+ def __init__(self, input_dim: int, hidden_dim: int = 64, block_num: int = 2,
275
+ use_layernorm: bool = True, dropout: float = 0.1,
276
+ residual_scale: float = 0.1):
277
+ super(ResNetSequential, self).__init__()
278
+
279
+ self.net = nn.Sequential()
280
+ self.net.add_module('fc1', nn.Linear(input_dim, hidden_dim))
281
+
282
+ if use_layernorm:
283
+ self.net.add_module('norm1', nn.LayerNorm(hidden_dim))
284
+ else:
285
+ self.net.add_module('norm1', nn.BatchNorm1d(hidden_dim))
286
+
287
+ self.net.add_module('relu1', nn.ReLU(inplace=True))
288
+
289
+ # 多个残差块
290
+ for i in range(block_num):
291
+ self.net.add_module(
292
+ f'ResBlk_{i+1}',
293
+ ResBlock(
294
+ hidden_dim,
295
+ dropout=dropout,
296
+ use_layernorm=use_layernorm,
297
+ residual_scale=residual_scale)
298
+ )
299
+
300
+ self.net.add_module('fc_out', nn.Linear(hidden_dim, 1))
301
+ self.net.add_module('softplus', nn.Softplus())
302
+
303
+ def forward(self, x):
304
+ return self.net(x)
305
+
306
+ # 定义ResNet模型的Scikit-Learn接口类
307
+
308
+
309
+ class ResNetSklearn(nn.Module):
310
+ def __init__(self, model_nme: str, input_dim: int, hidden_dim: int = 64,
311
+ block_num: int = 2, batch_num: int = 100, epochs: int = 100,
312
+ tweedie_power: float = 1.5, learning_rate: float = 0.01, patience: int = 10,
313
+ use_layernorm: bool = True, dropout: float = 0.1,
314
+ residual_scale: float = 0.1):
315
+ super(ResNetSklearn, self).__init__()
316
+
317
+ self.input_dim = input_dim
318
+ self.hidden_dim = hidden_dim
319
+ self.block_num = block_num
320
+ self.batch_num = batch_num
321
+ self.epochs = epochs
322
+ self.model_nme = model_nme
323
+ self.learning_rate = learning_rate
324
+ self.patience = patience
325
+ self.use_layernorm = use_layernorm
326
+ self.dropout = dropout
327
+ self.residual_scale = residual_scale
328
+
329
+ # 设备选择:cuda > mps > cpu
330
+ if torch.cuda.is_available():
331
+ self.device = torch.device('cuda')
332
+ elif torch.backends.mps.is_available():
333
+ self.device = torch.device('mps')
334
+ else:
335
+ self.device = torch.device('cpu')
336
+
337
+ # Tweedie 幂指数设定
338
+ if 'f' in self.model_nme:
339
+ self.tw_power = 1
340
+ elif 's' in self.model_nme:
341
+ self.tw_power = 2
342
+ else:
343
+ self.tw_power = tweedie_power
344
+
345
+ # 搭建网络
346
+ self.resnet = ResNetSequential(
347
+ self.input_dim,
348
+ self.hidden_dim,
349
+ self.block_num,
350
+ use_layernorm=self.use_layernorm,
351
+ dropout=self.dropout,
352
+ residual_scale=self.residual_scale
353
+ ).to(self.device)
354
+
355
+ def forward(self, x):
356
+ # 重写 forward 方法以处理 SHAP 的输入
357
+ # SHAP (KernelExplainer) 会传入一个 NumPy 数组
358
+ if isinstance(x, np.ndarray):
359
+ # 1. 从 NumPy 数组创建张量
360
+ x_tensor = torch.tensor(x, dtype=torch.float32)
361
+ else:
362
+ # 2. 保持对现有张量输入的兼容
363
+ x_tensor = x
364
+
365
+ # 3. 确保输入张量在正确的设备上
366
+ x_tensor = x_tensor.to(self.device)
367
+
368
+ # 4. 通过底层 ResNet 模型进行预测
369
+ # self.resnet 已经在初始化时被 .to(self.device)
370
+ y_pred = self.resnet(x_tensor)
371
+ return y_pred
372
+
373
+ def fit(self, X_train, y_train, w_train=None,
374
+ X_val=None, y_val=None, w_val=None):
375
+
376
+ # === 1. 训练集:先留在 CPU,交给 DataLoader 批量搬运到 GPU ===
377
+ # 注意:从 pandas DataFrame 转 tensor 时要复制数据,避免后续视图修改
378
+ X_tensor = torch.tensor(X_train.values, dtype=torch.float32)
379
+ y_tensor = torch.tensor(
380
+ y_train.values, dtype=torch.float32).view(-1, 1)
381
+ if w_train is not None:
382
+ w_tensor = torch.tensor(
383
+ w_train.values, dtype=torch.float32).view(-1, 1)
384
+ else:
385
+ w_tensor = torch.ones_like(y_tensor)
386
+
387
+ # === 2. 验证集:先在 CPU 上构造,后续一次性搬到目标设备 ===
388
+ has_val = X_val is not None and y_val is not None
389
+ if has_val:
390
+ X_val_tensor = torch.tensor(X_val.values, dtype=torch.float32)
391
+ y_val_tensor = torch.tensor(
392
+ y_val.values, dtype=torch.float32).view(-1, 1)
393
+ if w_val is not None:
394
+ w_val_tensor = torch.tensor(
395
+ w_val.values, dtype=torch.float32).view(-1, 1)
396
+ else:
397
+ w_val_tensor = torch.ones_like(y_val_tensor)
398
+ else:
399
+ X_val_tensor = y_val_tensor = w_val_tensor = None
400
+
401
+ # === 3. 构建 DataLoader ===
402
+ dataset = TensorDataset(X_tensor, y_tensor, w_tensor)
403
+ batch_size = compute_batch_size(
404
+ data_size=len(dataset),
405
+ learning_rate=self.learning_rate,
406
+ batch_num=self.batch_num,
407
+ minimum=64
408
+ )
409
+
410
+ dataloader = DataLoader(
411
+ dataset,
412
+ batch_size=batch_size,
413
+ shuffle=True,
414
+ num_workers=1, # 表格数据通常 0~1 个线程即可
415
+ pin_memory=(self.device.type == 'cuda')
416
+ )
417
+
418
+ # === 4. 优化器与 AMP ===
419
+ # 建议使用 Adam + AMP 主要是为了稳定损失,同时保持 GPU 性能
420
+ self.optimizer = torch.optim.Adam(
421
+ self.resnet.parameters(), lr=self.learning_rate)
422
+ self.scaler = GradScaler(enabled=(self.device.type == 'cuda'))
423
+
424
+ # === 5. 早停机制 ===
425
+ best_loss, patience_counter = float('inf'), 0
426
+ best_model_state = None
427
+
428
+ # 若存在验证集则一次性搬到目标设备
429
+ if has_val:
430
+ X_val_dev = X_val_tensor.to(self.device, non_blocking=True)
431
+ y_val_dev = y_val_tensor.to(self.device, non_blocking=True)
432
+ w_val_dev = w_val_tensor.to(self.device, non_blocking=True)
433
+
434
+ # === 6. 训练循环 ===
435
+ for epoch in range(1, self.epochs + 1):
436
+ self.resnet.train()
437
+ for X_batch, y_batch, w_batch in dataloader:
438
+ self.optimizer.zero_grad()
439
+
440
+ X_batch = X_batch.to(self.device, non_blocking=True)
441
+ y_batch = y_batch.to(self.device, non_blocking=True)
442
+ w_batch = w_batch.to(self.device, non_blocking=True)
443
+
444
+ with autocast(enabled=(self.device.type == 'cuda')):
445
+ y_pred = self.resnet(X_batch)
446
+ y_pred = torch.clamp(y_pred, min=1e-6)
447
+
448
+ losses = tweedie_loss(
449
+ y_pred, y_batch, p=self.tw_power).view(-1)
450
+ weighted_loss = (losses * w_batch.view(-1)
451
+ ).sum() / torch.clamp(w_batch.sum(), min=EPS)
452
+
453
+ self.scaler.scale(weighted_loss).backward()
454
+
455
+ if self.device.type == 'cuda':
456
+ self.scaler.unscale_(self.optimizer)
457
+ clip_grad_norm_(self.resnet.parameters(), max_norm=1.0)
458
+
459
+ self.scaler.step(self.optimizer)
460
+ self.scaler.update()
461
+
462
+ # === 7. 验证损失与早停判断 ===
463
+ if has_val:
464
+ self.resnet.eval()
465
+ with torch.no_grad(), autocast(enabled=(self.device.type == 'cuda')):
466
+ y_val_pred = self.resnet(X_val_dev)
467
+ y_val_pred = torch.clamp(y_val_pred, min=1e-6)
468
+
469
+ val_loss_values = tweedie_loss(
470
+ y_val_pred, y_val_dev, p=self.tw_power
471
+ ).view(-1)
472
+ val_weighted_loss = (
473
+ val_loss_values * w_val_dev.view(-1)
474
+ ).sum() / torch.clamp(w_val_dev.sum(), min=EPS)
475
+
476
+ if val_weighted_loss < best_loss:
477
+ best_loss = val_weighted_loss
478
+ patience_counter = 0
479
+ best_model_state = copy.deepcopy(self.resnet.state_dict())
480
+ else:
481
+ patience_counter += 1
482
+
483
+ if patience_counter >= self.patience and best_model_state is not None:
484
+ self.resnet.load_state_dict(best_model_state)
485
+ break
486
+ if has_val and best_model_state is not None:
487
+ self.resnet.load_state_dict(best_model_state)
488
+
489
+ # ---------------- 预测 ----------------
490
+
491
+ def predict(self, X_test):
492
+ self.resnet.eval()
493
+ # 如果输入是 DataFrame,先转为 NumPy 数组
494
+ if isinstance(X_test, pd.DataFrame):
495
+ X_np = X_test.values.astype(np.float32)
496
+ else:
497
+ X_np = X_test
498
+
499
+ with torch.no_grad():
500
+ # 直接调用 self (即 ResNetSklearn 实例),这将触发 forward 方法
501
+ y_pred = self(X_np).cpu().numpy()
502
+ y_pred = np.clip(y_pred, 1e-6, None)
503
+ return y_pred.flatten()
504
+
505
+ # ---------------- 设置参数 ----------------
506
+
507
+ def set_params(self, params):
508
+ for key, value in params.items():
509
+ if hasattr(self, key):
510
+ setattr(self, key, value)
511
+ else:
512
+ raise ValueError(f"Parameter {key} not found in model.")
513
+
514
+ # =============================================================================
515
+ # FT-Transformer model & sklearn-style wrapper
516
+ # =============================================================================
517
+ # 开始定义FT Transformer模型结构
518
+
519
+
520
+ class FeatureTokenizer(nn.Module):
521
+ # 将数值与类别特征映射为 token,输出形状 (batch, token 数, d_model)
522
+ # 设定:
523
+ # - X_num 表示数值特征,形状 (batch, num_numeric)
524
+ # - X_cat 表示类别特征,形状 (batch, num_categorical),每列为编码后的整数标签 [0, card-1]
525
+
526
+ def __init__(self, num_numeric: int, cat_cardinalities, d_model: int):
527
+ super().__init__()
528
+
529
+ self.num_numeric = num_numeric
530
+ self.has_numeric = num_numeric > 0
531
+
532
+ if self.has_numeric:
533
+ self.num_linear = nn.Linear(num_numeric, d_model)
534
+
535
+ self.embeddings = nn.ModuleList([
536
+ nn.Embedding(card, d_model) for card in cat_cardinalities
537
+ ])
538
+
539
+ def forward(self, X_num, X_cat):
540
+ tokens = []
541
+
542
+ if self.has_numeric:
543
+ # 数值特征映射为单个 token
544
+ num_token = self.num_linear(X_num) # 形状 (batch, d_model)
545
+ tokens.append(num_token)
546
+
547
+ # 每个类别特征生成一个嵌入 token
548
+ for i, emb in enumerate(self.embeddings):
549
+ tok = emb(X_cat[:, i]) # 形状 (batch, d_model)
550
+ tokens.append(tok)
551
+
552
+ # 最终堆叠为 (batch, token 数, d_model)
553
+ x = torch.stack(tokens, dim=1)
554
+ return x
555
+
556
+ # 定义具有残差缩放的Encoder层
557
+
558
+
559
+ class ScaledTransformerEncoderLayer(nn.Module):
560
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048,
561
+ dropout: float = 0.1, residual_scale_attn: float = 1.0,
562
+ residual_scale_ffn: float = 1.0, norm_first: bool = True,
563
+ ):
564
+ super().__init__()
565
+ self.self_attn = nn.MultiheadAttention(
566
+ embed_dim=d_model,
567
+ num_heads=nhead,
568
+ dropout=dropout,
569
+ batch_first=True
570
+ )
571
+
572
+ # 前馈网络部分
573
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
574
+ self.dropout = nn.Dropout(dropout)
575
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
576
+
577
+ # 归一化与 Dropout
578
+ self.norm1 = nn.LayerNorm(d_model)
579
+ self.norm2 = nn.LayerNorm(d_model)
580
+ self.dropout1 = nn.Dropout(dropout)
581
+ self.dropout2 = nn.Dropout(dropout)
582
+
583
+ self.activation = nn.GELU()
584
+ # self.activation = nn.ReLU()
585
+ self.norm_first = norm_first
586
+
587
+ # 残差缩放系数
588
+ self.res_scale_attn = residual_scale_attn
589
+ self.res_scale_ffn = residual_scale_ffn
590
+
591
+ def forward(self, src, src_mask=None, src_key_padding_mask=None):
592
+ # 输入张量形状:(batch, 序列长度, d_model)
593
+ x = src
594
+
595
+ if self.norm_first:
596
+ # 先归一化再做注意力
597
+ x = x + self._sa_block(self.norm1(x), src_mask,
598
+ src_key_padding_mask)
599
+ x = x + self._ff_block(self.norm2(x))
600
+ else:
601
+ # 后归一化(一般不启用)
602
+ x = self.norm1(
603
+ x + self._sa_block(x, src_mask, src_key_padding_mask))
604
+ x = self.norm2(x + self._ff_block(x))
605
+
606
+ return x
607
+
608
+ def _sa_block(self, x, attn_mask, key_padding_mask):
609
+ # 自注意力并附带残差缩放
610
+ attn_out, _ = self.self_attn(
611
+ x, x, x,
612
+ attn_mask=attn_mask,
613
+ key_padding_mask=key_padding_mask,
614
+ need_weights=False
615
+ )
616
+ return self.res_scale_attn * self.dropout1(attn_out)
617
+
618
+ def _ff_block(self, x):
619
+ # 前馈网络并附带残差缩放
620
+ x2 = self.linear2(self.dropout(self.activation(self.linear1(x))))
621
+ return self.res_scale_ffn * self.dropout2(x2)
622
+
623
+ # 定义FT-Transformer核心模型
624
+
625
+
626
+ class FTTransformerCore(nn.Module):
627
+ # 最小可用版本的 FT-Transformer:
628
+ # - FeatureTokenizer:将数值与类别特征转换为 token
629
+ # - TransformerEncoder:捕捉特征之间的交互
630
+ # - 池化 + MLP + Softplus:保证输出为正值(适配 Tweedie/Gamma)
631
+
632
+ def __init__(self, num_numeric: int, cat_cardinalities, d_model: int = 64,
633
+ n_heads: int = 8, n_layers: int = 4, dropout: float = 0.1,
634
+ ):
635
+ super().__init__()
636
+
637
+ self.tokenizer = FeatureTokenizer(
638
+ num_numeric=num_numeric,
639
+ cat_cardinalities=cat_cardinalities,
640
+ d_model=d_model
641
+ )
642
+ scale = 1.0 / math.sqrt(n_layers) # 推荐一个默认值
643
+ encoder_layer = ScaledTransformerEncoderLayer(
644
+ d_model=d_model,
645
+ nhead=n_heads,
646
+ dim_feedforward=d_model * 4,
647
+ dropout=dropout,
648
+ residual_scale_attn=scale,
649
+ residual_scale_ffn=scale,
650
+ norm_first=True,
651
+ )
652
+ self.encoder = nn.TransformerEncoder(
653
+ encoder_layer,
654
+ num_layers=n_layers
655
+ )
656
+ self.n_layers = n_layers
657
+
658
+ self.head = nn.Sequential(
659
+ nn.LayerNorm(d_model),
660
+ nn.Linear(d_model, d_model),
661
+ nn.GELU(),
662
+ # nn.ReLU(),
663
+ nn.Linear(d_model, 1),
664
+ nn.Softplus() # 保证输出为正,适合 Tweedie / Gamma
665
+ )
666
+
667
+ def forward(self, X_num, X_cat):
668
+
669
+ # X_num: (batch, 数值特征数),float32
670
+ # X_cat: (batch, 类别特征数),long
671
+
672
+ tokens = self.tokenizer(X_num, X_cat) # 形状 (batch, token 数, d_model)
673
+ x = self.encoder(tokens) # 形状 (batch, token 数, d_model)
674
+
675
+ # 对 token 做平均池化
676
+ x = x.mean(dim=1) # 形状 (batch, d_model)
677
+
678
+ out = self.head(x) # 形状 (batch, 1),Softplus 保证为正
679
+ return out
680
+
681
+ # 定义TabularDataset类
682
+
683
+
684
+ class TabularDataset(Dataset):
685
+ def __init__(self, X_num, X_cat, y, w):
686
+
687
+ # X_num: torch.float32, 形状 (N, 数值特征数)
688
+ # X_cat: torch.long, 形状 (N, 类别特征数)
689
+ # y: torch.float32, 形状 (N, 1)
690
+ # w: torch.float32, 形状 (N, 1)
691
+
692
+ self.X_num = X_num
693
+ self.X_cat = X_cat
694
+ self.y = y
695
+ self.w = w
696
+
697
+ def __len__(self):
698
+ return self.y.shape[0]
699
+
700
+ def __getitem__(self, idx):
701
+ return (
702
+ self.X_num[idx],
703
+ self.X_cat[idx],
704
+ self.y[idx],
705
+ self.w[idx],
706
+ )
707
+
708
+ # 定义FTTransformer的Scikit-Learn接口类
709
+
710
+
711
+ class FTTransformerSklearn(nn.Module):
712
+
713
+ # sklearn 风格包装:
714
+ # - num_cols:数值特征列名列表
715
+ # - cat_cols:类别特征列名列表(需提前做标签编码,取值 [0, n_classes-1])
716
+
717
+ def __init__(self, model_nme: str, num_cols, cat_cols, d_model: int = 64, n_heads: int = 8,
718
+ n_layers: int = 4, dropout: float = 0.1, batch_num: int = 100, epochs: int = 100,
719
+ tweedie_power: float = 1.5, learning_rate: float = 1e-3, patience: int = 10,
720
+ ):
721
+ super().__init__()
722
+
723
+ self.model_nme = model_nme
724
+ self.num_cols = list(num_cols)
725
+ self.cat_cols = list(cat_cols)
726
+ self.d_model = d_model
727
+ self.n_heads = n_heads
728
+ self.n_layers = n_layers
729
+ self.dropout = dropout
730
+ self.batch_num = batch_num
731
+ self.epochs = epochs
732
+ self.learning_rate = learning_rate
733
+ self.patience = patience
734
+ if 'f' in self.model_nme:
735
+ self.tw_power = 1.0
736
+ elif 's' in self.model_nme:
737
+ self.tw_power = 2.0
738
+ else:
739
+ self.tw_power = tweedie_power
740
+ if torch.cuda.is_available():
741
+ self.device = torch.device("cuda")
742
+ elif torch.backends.mps.is_available():
743
+ self.device = torch.device("mps")
744
+ else:
745
+ self.device = torch.device("cpu")
746
+ self.cat_cardinalities = None
747
+ self.cat_categories = {}
748
+ self.ft = None
749
+
750
+ def _build_model(self, X_train):
751
+ num_numeric = len(self.num_cols)
752
+ cat_cardinalities = []
753
+
754
+ for col in self.cat_cols:
755
+ cats = X_train[col].astype('category')
756
+ categories = cats.cat.categories
757
+ self.cat_categories[col] = categories # 保存训练集类别全集
758
+
759
+ card = len(categories) + 1 # 多预留 1 类给“未知/缺失”
760
+ cat_cardinalities.append(card)
761
+
762
+ self.cat_cardinalities = cat_cardinalities
763
+
764
+ self.ft = FTTransformerCore(
765
+ num_numeric=num_numeric,
766
+ cat_cardinalities=cat_cardinalities,
767
+ d_model=self.d_model,
768
+ n_heads=self.n_heads,
769
+ n_layers=self.n_layers,
770
+ dropout=self.dropout,
771
+ ).to(self.device)
772
+
773
+ def _encode_cats(self, X):
774
+ # 输入 DataFrame 至少需要包含所有类别特征列
775
+ # 返回形状 (N, 类别特征数) 的 int64 数组
776
+
777
+ if not self.cat_cols:
778
+ return np.zeros((len(X), 0), dtype='int64')
779
+
780
+ X_cat_list = []
781
+ for col in self.cat_cols:
782
+ # 使用训练阶段记录的类别全集
783
+ categories = self.cat_categories[col]
784
+ # 按固定类别构造 Categorical
785
+ cats = pd.Categorical(X[col], categories=categories)
786
+ codes = cats.codes.astype('int64', copy=True) # -1 表示未知或缺失
787
+ # 未知或缺失映射到额外的“未知”索引 len(categories)
788
+ codes[codes < 0] = len(categories)
789
+ X_cat_list.append(codes)
790
+
791
+ X_cat_np = np.stack(X_cat_list, axis=1) # 形状 (N, 类别特征数)
792
+ return X_cat_np
793
+
794
+ def fit(self, X_train, y_train, w_train=None,
795
+ X_val=None, y_val=None, w_val=None):
796
+
797
+ # 首次拟合时需要构建底层模型结构
798
+ if self.ft is None:
799
+ self._build_model(X_train)
800
+
801
+ # --- 构建训练张量(全部先放在 CPU,后续按批搬运) ---
802
+ # 复制数据确保与原 DataFrame 脱钩,这样标准化或采样不会污染原始数据
803
+ X_num_train = X_train[self.num_cols].to_numpy(
804
+ dtype=np.float32, copy=True)
805
+ X_num_train = torch.tensor(
806
+ X_num_train,
807
+ dtype=torch.float32
808
+ )
809
+
810
+ if self.cat_cols:
811
+ X_cat_train_np = self._encode_cats(X_train)
812
+ X_cat_train = torch.tensor(X_cat_train_np, dtype=torch.long)
813
+ else:
814
+ X_cat_train = torch.zeros(
815
+ (X_num_train.shape[0], 0), dtype=torch.long)
816
+
817
+ y_tensor = torch.tensor(
818
+ y_train.values,
819
+ dtype=torch.float32
820
+ ).view(-1, 1)
821
+
822
+ if w_train is not None:
823
+ w_tensor = torch.tensor(
824
+ w_train.values,
825
+ dtype=torch.float32
826
+ ).view(-1, 1)
827
+ else:
828
+ w_tensor = torch.ones_like(y_tensor)
829
+
830
+ # --- 验证集张量(一次性搬到目标设备) ---
831
+ has_val = X_val is not None and y_val is not None
832
+ if has_val:
833
+ # ---------- 数值特征 ----------
834
+ X_num_val_np = X_val[self.num_cols].to_numpy(
835
+ dtype=np.float32, copy=True)
836
+ X_num_val = torch.tensor(X_num_val_np, dtype=torch.float32)
837
+
838
+ # ---------- 类别特征 ----------
839
+ if self.cat_cols:
840
+ X_cat_val_np = self._encode_cats(X_val)
841
+ X_cat_val = torch.tensor(X_cat_val_np, dtype=torch.long)
842
+ else:
843
+ X_cat_val = torch.zeros(
844
+ (X_num_val.shape[0], 0), dtype=torch.long)
845
+
846
+ # ---------- 目标 & 权重 ----------
847
+ y_val_np = y_val.values.astype(np.float32, copy=True)
848
+ y_val_tensor = torch.tensor(
849
+ y_val_np, dtype=torch.float32).view(-1, 1)
850
+
851
+ if w_val is not None:
852
+ w_val_np = w_val.values.astype(np.float32, copy=True)
853
+ w_val_tensor = torch.tensor(
854
+ w_val_np, dtype=torch.float32).view(-1, 1)
855
+ else:
856
+ w_val_tensor = torch.ones_like(y_val_tensor)
857
+
858
+ else:
859
+ X_num_val = X_cat_val = y_val_tensor = w_val_tensor = None
860
+
861
+ # --- 构建 DataLoader ---
862
+ dataset = TabularDataset(
863
+ X_num_train, X_cat_train, y_tensor, w_tensor
864
+ )
865
+
866
+ batch_size = compute_batch_size(
867
+ data_size=len(dataset),
868
+ learning_rate=self.learning_rate,
869
+ batch_num=self.batch_num,
870
+ minimum=64
871
+ )
872
+
873
+ dataloader = DataLoader(
874
+ dataset,
875
+ batch_size=batch_size,
876
+ shuffle=True,
877
+ num_workers=1,
878
+ pin_memory=(self.device.type == 'cuda')
879
+ )
880
+
881
+ # --- 优化器与 AMP ---
882
+ # 这部分与 ResNet 一致,仍建议使用 Adam + AMP 来避免数值不稳定
883
+ optimizer = torch.optim.Adam(
884
+ self.ft.parameters(),
885
+ lr=self.learning_rate
886
+ )
887
+ scaler = GradScaler(enabled=(self.device.type == 'cuda'))
888
+
889
+ # --- 早停机制 ---
890
+ best_loss = float('inf')
891
+ patience_counter = 0
892
+ best_model_state = None
893
+
894
+ # 若存在验证集则整体迁移到目标设备
895
+ if has_val:
896
+ X_num_val_dev = X_num_val.to(self.device, non_blocking=True)
897
+ X_cat_val_dev = X_cat_val.to(self.device, non_blocking=True)
898
+ y_val_dev = y_val_tensor.to(self.device, non_blocking=True)
899
+ w_val_dev = w_val_tensor.to(self.device, non_blocking=True)
900
+
901
+ # --- 训练循环 ---
902
+ for epoch in range(1, self.epochs + 1):
903
+ self.ft.train()
904
+ for X_num_b, X_cat_b, y_b, w_b in dataloader:
905
+ optimizer.zero_grad()
906
+
907
+ X_num_b = X_num_b.to(self.device, non_blocking=True)
908
+ X_cat_b = X_cat_b.to(self.device, non_blocking=True)
909
+ y_b = y_b.to(self.device, non_blocking=True)
910
+ w_b = w_b.to(self.device, non_blocking=True)
911
+
912
+ with autocast(enabled=(self.device.type == 'cuda')):
913
+ y_pred = self.ft(X_num_b, X_cat_b)
914
+ y_pred = torch.clamp(y_pred, min=1e-6)
915
+
916
+ losses = tweedie_loss(
917
+ y_pred, y_b, p=self.tw_power
918
+ ).view(-1)
919
+
920
+ weighted_loss = (losses * w_b.view(-1)).sum() / \
921
+ torch.clamp(w_b.sum(), min=EPS)
922
+
923
+ scaler.scale(weighted_loss).backward()
924
+
925
+ if self.device.type == 'cuda':
926
+ scaler.unscale_(optimizer)
927
+ clip_grad_norm_(self.ft.parameters(), max_norm=1.0)
928
+
929
+ scaler.step(optimizer)
930
+ scaler.update()
931
+
932
+ # --- 验证阶段与早停判断 ---
933
+ if has_val:
934
+ self.ft.eval()
935
+ with torch.no_grad(), autocast(enabled=(self.device.type == 'cuda')):
936
+ y_val_pred = self.ft(X_num_val_dev, X_cat_val_dev)
937
+ y_val_pred = torch.clamp(y_val_pred, min=1e-6)
938
+
939
+ val_losses = tweedie_loss(
940
+ y_val_pred, y_val_dev, p=self.tw_power
941
+ ).view(-1)
942
+
943
+ val_weighted_loss = (
944
+ val_losses * w_val_dev.view(-1)
945
+ ).sum() / torch.clamp(w_val_dev.sum(), min=EPS)
946
+
947
+ if val_weighted_loss < best_loss:
948
+ best_loss = val_weighted_loss
949
+ patience_counter = 0
950
+ best_model_state = copy.deepcopy(self.ft.state_dict())
951
+ else:
952
+ patience_counter += 1
953
+
954
+ if patience_counter >= self.patience and best_model_state is not None:
955
+ self.ft.load_state_dict(best_model_state)
956
+ break
957
+ if has_val and best_model_state is not None:
958
+ self.ft.load_state_dict(best_model_state)
959
+
960
+ def predict(self, X_test):
961
+ # X_test 需要包含所有数值列与类别列
962
+
963
+ self.ft.eval()
964
+ X_num = X_test[self.num_cols].to_numpy(dtype=np.float32, copy=True)
965
+ X_num = torch.tensor(
966
+ X_num,
967
+ dtype=torch.float32
968
+ )
969
+ if self.cat_cols:
970
+ X_cat_np = self._encode_cats(X_test)
971
+ X_cat = torch.tensor(X_cat_np, dtype=torch.long)
972
+ else:
973
+ X_cat = torch.zeros((X_num.size(0), 0), dtype=torch.long)
974
+
975
+ with torch.no_grad():
976
+ X_num = X_num.to(self.device, non_blocking=True)
977
+ X_cat = X_cat.to(self.device, non_blocking=True)
978
+ y_pred = self.ft(X_num, X_cat).cpu().numpy()
979
+
980
+ y_pred = np.clip(y_pred, 1e-6, None)
981
+ return y_pred.ravel()
982
+
983
+ def set_params(self, params: dict):
984
+
985
+ # 和 sklearn 风格保持一致。
986
+ # 注意:对结构性参数(如 d_model/n_heads)修改后,需要重新 fit 才会生效。
987
+
988
+ for key, value in params.items():
989
+ if hasattr(self, key):
990
+ setattr(self, key, value)
991
+ else:
992
+ raise ValueError(f"Parameter {key} not found in model.")
993
+ return self
994
+
995
+
996
+ # ===== 基础组件与训练封装 =====================================================
997
+
998
+ # =============================================================================
999
+ # Config, preprocessing, and trainer base
1000
+ # =============================================================================
1001
+ @dataclass
1002
+ class BayesOptConfig:
1003
+ model_nme: str
1004
+ resp_nme: str
1005
+ weight_nme: str
1006
+ factor_nmes: List[str]
1007
+ cate_list: Optional[List[str]] = None
1008
+ prop_test: float = 0.25
1009
+ rand_seed: Optional[int] = None
1010
+ epochs: int = 100
1011
+ use_gpu: bool = True
1012
+
1013
+
1014
+ class OutputManager:
1015
+ # 统一管理结果、图表与模型的输出路径
1016
+
1017
+ def __init__(self, root: Optional[str] = None, model_name: str = "model") -> None:
1018
+ self.root = Path(root or os.getcwd())
1019
+ self.model_name = model_name
1020
+ self.plot_dir = self.root / 'plot'
1021
+ self.result_dir = self.root / 'Results'
1022
+ self.model_dir = self.root / 'model'
1023
+
1024
+ def _prepare(self, path: Path) -> str:
1025
+ ensure_parent_dir(str(path))
1026
+ return str(path)
1027
+
1028
+ def plot_path(self, filename: str) -> str:
1029
+ return self._prepare(self.plot_dir / filename)
1030
+
1031
+ def result_path(self, filename: str) -> str:
1032
+ return self._prepare(self.result_dir / filename)
1033
+
1034
+ def model_path(self, filename: str) -> str:
1035
+ return self._prepare(self.model_dir / filename)
1036
+
1037
+
1038
+ class DatasetPreprocessor:
1039
+ # 为各训练器准备通用的训练/测试数据视图
1040
+
1041
+ def __init__(self, train_df: pd.DataFrame, test_df: pd.DataFrame,
1042
+ config: BayesOptConfig) -> None:
1043
+ self.config = config
1044
+ self.train_data = train_df.copy(deep=True)
1045
+ self.test_data = test_df.copy(deep=True)
1046
+ self.num_features: List[str] = []
1047
+ self.train_oht_scl_data: Optional[pd.DataFrame] = None
1048
+ self.test_oht_scl_data: Optional[pd.DataFrame] = None
1049
+ self.var_nmes: List[str] = []
1050
+ self.cat_categories_for_shap: Dict[str, List[Any]] = {}
1051
+
1052
+ def run(self) -> "DatasetPreprocessor":
1053
+ cfg = self.config
1054
+ # 预先计算加权实际值,后续画图、校验都依赖该字段
1055
+ self.train_data.loc[:, 'w_act'] = self.train_data[cfg.resp_nme] * \
1056
+ self.train_data[cfg.weight_nme]
1057
+ self.test_data.loc[:, 'w_act'] = self.test_data[cfg.resp_nme] * \
1058
+ self.test_data[cfg.weight_nme]
1059
+ # 高分位裁剪用来吸收离群值;若删除会导致极端点主导损失
1060
+ q99 = self.train_data[cfg.resp_nme].quantile(0.999)
1061
+ self.train_data[cfg.resp_nme] = self.train_data[cfg.resp_nme].clip(
1062
+ upper=q99)
1063
+ cate_list = list(cfg.cate_list or [])
1064
+ if cate_list:
1065
+ for cate in cate_list:
1066
+ self.train_data[cate] = self.train_data[cate].astype(
1067
+ 'category')
1068
+ self.test_data[cate] = self.test_data[cate].astype('category')
1069
+ cats = self.train_data[cate].cat.categories
1070
+ self.cat_categories_for_shap[cate] = list(cats)
1071
+ self.num_features = [
1072
+ nme for nme in cfg.factor_nmes if nme not in cate_list]
1073
+ train_oht = self.train_data[cfg.factor_nmes +
1074
+ [cfg.weight_nme] + [cfg.resp_nme]].copy()
1075
+ test_oht = self.test_data[cfg.factor_nmes +
1076
+ [cfg.weight_nme] + [cfg.resp_nme]].copy()
1077
+ train_oht = pd.get_dummies(
1078
+ train_oht,
1079
+ columns=cate_list,
1080
+ drop_first=True,
1081
+ dtype=np.int8
1082
+ )
1083
+ test_oht = pd.get_dummies(
1084
+ test_oht,
1085
+ columns=cate_list,
1086
+ drop_first=True,
1087
+ dtype=np.int8
1088
+ )
1089
+ for num_chr in self.num_features:
1090
+ # 逐列标准化保障每个特征在同一量级,否则神经网络会难以收敛
1091
+ scaler = StandardScaler()
1092
+ train_oht[num_chr] = scaler.fit_transform(
1093
+ train_oht[num_chr].values.reshape(-1, 1))
1094
+ test_oht[num_chr] = scaler.transform(
1095
+ test_oht[num_chr].values.reshape(-1, 1))
1096
+ # reindex 时将缺失的哑变量列补零,避免测试集列数与训练集不一致
1097
+ test_oht = test_oht.reindex(columns=train_oht.columns, fill_value=0)
1098
+ self.train_oht_scl_data = train_oht
1099
+ self.test_oht_scl_data = test_oht
1100
+ self.var_nmes = list(
1101
+ set(list(train_oht.columns)) - set([cfg.weight_nme, cfg.resp_nme])
1102
+ )
1103
+ return self
1104
+
1105
+ # =============================================================================
1106
+ # Trainers
1107
+ # =============================================================================
1108
+
1109
+
1110
+ class TrainerBase:
1111
+ def __init__(self, context: "BayesOptModel", label: str) -> None:
1112
+ self.ctx = context
1113
+ self.label = label
1114
+
1115
+ @property
1116
+ def config(self) -> BayesOptConfig:
1117
+ return self.ctx.config
1118
+
1119
+ @property
1120
+ def output(self) -> OutputManager:
1121
+ return self.ctx.output_manager
1122
+
1123
+ def tune(self, max_evals: int) -> None: # pragma: no cover 子类会覆盖
1124
+ raise NotImplementedError
1125
+
1126
+ def train(self) -> None: # pragma: no cover 子类会覆盖
1127
+ raise NotImplementedError
1128
+
1129
+ def save(self) -> None:
1130
+ pass
1131
+
1132
+ def load(self) -> None:
1133
+ pass
1134
+
1135
+
1136
+ class XGBTrainer(TrainerBase):
1137
+ def __init__(self, context: "BayesOptModel") -> None:
1138
+ super().__init__(context, 'Xgboost')
1139
+ self.model: Optional[xgb.XGBRegressor] = None
1140
+ self.best_params: Optional[Dict[str, Any]] = None
1141
+ self.best_trial = None
1142
+
1143
+ def _build_estimator(self) -> xgb.XGBRegressor:
1144
+ params = dict(
1145
+ objective=self.ctx.obj,
1146
+ random_state=self.ctx.rand_seed,
1147
+ subsample=0.9,
1148
+ tree_method='gpu_hist' if self.ctx.use_gpu else 'hist',
1149
+ enable_categorical=True,
1150
+ predictor='gpu_predictor' if self.ctx.use_gpu else 'cpu_predictor'
1151
+ )
1152
+ if self.ctx.use_gpu:
1153
+ params['gpu_id'] = 0
1154
+ return xgb.XGBRegressor(**params)
1155
+
1156
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1157
+ learning_rate = trial.suggest_float(
1158
+ 'learning_rate', 1e-5, 1e-1, log=True)
1159
+ gamma = trial.suggest_float('gamma', 0, 10000)
1160
+ max_depth = trial.suggest_int('max_depth', 3, 25)
1161
+ n_estimators = trial.suggest_int('n_estimators', 10, 500, step=10)
1162
+ min_child_weight = trial.suggest_int(
1163
+ 'min_child_weight', 100, 10000, step=100)
1164
+ reg_alpha = trial.suggest_float('reg_alpha', 1e-10, 1, log=True)
1165
+ reg_lambda = trial.suggest_float('reg_lambda', 1e-10, 1, log=True)
1166
+ if self.ctx.obj == 'reg:tweedie':
1167
+ tweedie_variance_power = trial.suggest_float(
1168
+ 'tweedie_variance_power', 1, 2)
1169
+ elif self.ctx.obj == 'count:poisson':
1170
+ tweedie_variance_power = 1
1171
+ elif self.ctx.obj == 'reg:gamma':
1172
+ tweedie_variance_power = 2
1173
+ else:
1174
+ tweedie_variance_power = 1.5
1175
+ clf = self._build_estimator()
1176
+ params = {
1177
+ 'learning_rate': learning_rate,
1178
+ 'gamma': gamma,
1179
+ 'max_depth': max_depth,
1180
+ 'n_estimators': n_estimators,
1181
+ 'min_child_weight': min_child_weight,
1182
+ 'reg_alpha': reg_alpha,
1183
+ 'reg_lambda': reg_lambda
1184
+ }
1185
+ if self.ctx.obj == 'reg:tweedie':
1186
+ params['tweedie_variance_power'] = tweedie_variance_power
1187
+ clf.set_params(**params)
1188
+ n_jobs = 1 if self.ctx.use_gpu else int(1 / self.ctx.prop_test)
1189
+ acc = cross_val_score(
1190
+ clf,
1191
+ self.ctx.train_data[self.ctx.factor_nmes],
1192
+ self.ctx.train_data[self.ctx.resp_nme].values,
1193
+ fit_params=self.ctx.fit_params,
1194
+ cv=self.ctx.cv,
1195
+ scoring=make_scorer(
1196
+ mean_tweedie_deviance,
1197
+ power=tweedie_variance_power,
1198
+ greater_is_better=False),
1199
+ error_score='raise',
1200
+ n_jobs=n_jobs
1201
+ ).mean()
1202
+ return -acc
1203
+
1204
+ def tune(self, max_evals: int = 100) -> None:
1205
+ study = optuna.create_study(
1206
+ direction='minimize',
1207
+ sampler=optuna.samplers.TPESampler(seed=self.ctx.rand_seed)
1208
+ )
1209
+ study.optimize(self.cross_val, n_trials=max_evals)
1210
+ self.best_params = study.best_params
1211
+ self.best_trial = study.best_trial
1212
+ params_path = self.output.result_path(
1213
+ f'{self.ctx.model_nme}_bestparams_xgb.csv'
1214
+ )
1215
+ pd.DataFrame(self.best_params, index=[0]).to_csv(params_path)
1216
+
1217
+ def train(self) -> None:
1218
+ if not self.best_params:
1219
+ raise RuntimeError('请先运行 tune() 以获得 XGB 最优参数。')
1220
+ self.model = self._build_estimator()
1221
+ self.model.set_params(**self.best_params)
1222
+ self.model.fit(self.ctx.train_data[self.ctx.factor_nmes],
1223
+ self.ctx.train_data[self.ctx.resp_nme].values,
1224
+ **self.ctx.fit_params)
1225
+ self.ctx.model_label += [self.label]
1226
+ self.ctx.train_data['pred_xgb'] = self.model.predict(
1227
+ self.ctx.train_data[self.ctx.factor_nmes])
1228
+ self.ctx.test_data['pred_xgb'] = self.model.predict(
1229
+ self.ctx.test_data[self.ctx.factor_nmes])
1230
+ self.ctx.train_data.loc[:, 'w_pred_xgb'] = self.ctx.train_data['pred_xgb'] * \
1231
+ self.ctx.train_data[self.ctx.weight_nme]
1232
+ self.ctx.test_data.loc[:, 'w_pred_xgb'] = self.ctx.test_data['pred_xgb'] * \
1233
+ self.ctx.test_data[self.ctx.weight_nme]
1234
+ self.ctx.xgb_best = self.model
1235
+
1236
+ def save(self) -> None:
1237
+ if self.model is not None:
1238
+ joblib.dump(self.model, self.output.model_path(
1239
+ f'01_{self.ctx.model_nme}_Xgboost.pkl'))
1240
+
1241
+ def load(self) -> None:
1242
+ path = self.output.model_path(
1243
+ f'01_{self.ctx.model_nme}_Xgboost.pkl')
1244
+ if os.path.exists(path):
1245
+ self.model = joblib.load(path)
1246
+ self.ctx.xgb_best = self.model
1247
+ else:
1248
+ print(f"[load_model] Warning: 未找到 Xgboost 模型文件:{path}")
1249
+
1250
+
1251
+ class ResNetTrainer(TrainerBase):
1252
+ def __init__(self, context: "BayesOptModel") -> None:
1253
+ super().__init__(context, 'ResNet')
1254
+ self.model: Optional[ResNetSklearn] = None
1255
+ self.best_params: Optional[Dict[str, Any]] = None
1256
+ self.best_trial = None
1257
+
1258
+ # ========= 交叉验证(BayesOpt 用) =========
1259
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1260
+ """
1261
+ 对 ResNet 做交叉验证。
1262
+ 为了防止显存 OOM:
1263
+ - 每个 fold 独立创建一个 ResNetSklearn
1264
+ - fold 结束就把模型挪到 CPU + 删除 + gc + empty_cache
1265
+ - 可选:BayesOpt 阶段只用训练集子样本
1266
+ """
1267
+
1268
+ # 1. 超参空间(基本沿用你之前的设定)
1269
+ learning_rate = trial.suggest_float(
1270
+ 'learning_rate', 1e-6, 1e-2, log=True
1271
+ )
1272
+ # hidden_dim = trial.suggest_int('hidden_dim', 32, 256, step=32) # 不宜过大
1273
+ hidden_dim = trial.suggest_int('hidden_dim', 8, 32, step=2)
1274
+ block_num = trial.suggest_int('block_num', 2, 10)
1275
+ batch_num = trial.suggest_int(
1276
+ 'batch_num',
1277
+ 10 if self.ctx.obj == 'reg:gamma' else 100,
1278
+ 100 if self.ctx.obj == 'reg:gamma' else 1000,
1279
+ step=10 if self.ctx.obj == 'reg:gamma' else 100
1280
+ )
1281
+
1282
+ if self.ctx.obj == 'reg:tweedie':
1283
+ tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
1284
+ elif self.ctx.obj == 'count:poisson':
1285
+ tw_power = 1.0
1286
+ elif self.ctx.obj == 'reg:gamma':
1287
+ tw_power = 2.0
1288
+ else:
1289
+ tw_power = 1.5
1290
+
1291
+ loss = 0.0
1292
+
1293
+ # 2. (可选)BayesOpt 只在子样本上做 CV,减轻显存 & 时间压力
1294
+ data_for_cv = self.ctx.train_oht_scl_data
1295
+ max_rows_for_resnet_bo = min(100000, int(
1296
+ len(data_for_cv)/5)) # 你可以按 A30 情况调小,比如 50_000
1297
+ if len(data_for_cv) > max_rows_for_resnet_bo:
1298
+ data_for_cv = data_for_cv.sample(
1299
+ max_rows_for_resnet_bo,
1300
+ random_state=self.ctx.rand_seed
1301
+ )
1302
+
1303
+ X_all = data_for_cv[self.ctx.var_nmes]
1304
+ y_all = data_for_cv[self.ctx.resp_nme]
1305
+ w_all = data_for_cv[self.ctx.weight_nme]
1306
+
1307
+ # 用局部 ShuffleSplit,避免子样本时索引不一致
1308
+ cv_local = ShuffleSplit(
1309
+ n_splits=int(1 / self.ctx.prop_test),
1310
+ test_size=self.ctx.prop_test,
1311
+ random_state=self.ctx.rand_seed
1312
+ )
1313
+
1314
+ for fold, (train_idx, val_idx) in enumerate(cv_local.split(X_all)):
1315
+ X_train_fold = X_all.iloc[train_idx]
1316
+ y_train_fold = y_all.iloc[train_idx]
1317
+ w_train_fold = w_all.iloc[train_idx]
1318
+
1319
+ X_val_fold = X_all.iloc[val_idx]
1320
+ y_val_fold = y_all.iloc[val_idx]
1321
+ w_val_fold = w_all.iloc[val_idx]
1322
+
1323
+ # 3. 每个 fold 创建一个临时 ResNet 模型
1324
+ cv_net = ResNetSklearn(
1325
+ model_nme=self.ctx.model_nme,
1326
+ input_dim=X_all.shape[1],
1327
+ hidden_dim=hidden_dim,
1328
+ block_num=block_num,
1329
+ batch_num=batch_num,
1330
+ epochs=self.ctx.epochs,
1331
+ tweedie_power=tw_power,
1332
+ learning_rate=learning_rate,
1333
+ patience=5
1334
+ )
1335
+
1336
+ try:
1337
+ # 4. 训练(内部仍然用你自己的 tweedie_loss)
1338
+ cv_net.fit(
1339
+ X_train_fold,
1340
+ y_train_fold,
1341
+ w_train_fold,
1342
+ X_val_fold,
1343
+ y_val_fold,
1344
+ w_val_fold
1345
+ )
1346
+
1347
+ # 5. 验证集预测
1348
+ y_pred_fold = cv_net.predict(X_val_fold)
1349
+
1350
+ # 6. 评估:Tweedie deviance(评估用,训练 loss 不动)
1351
+ loss += mean_tweedie_deviance(
1352
+ y_val_fold,
1353
+ y_pred_fold,
1354
+ sample_weight=w_val_fold,
1355
+ power=tw_power
1356
+ )
1357
+
1358
+ finally:
1359
+ # 7. ★ 每个 fold 结束后释放 GPU 资源 ★
1360
+ try:
1361
+ if hasattr(cv_net, "resnet"):
1362
+ cv_net.resnet.to("cpu")
1363
+ except Exception:
1364
+ pass
1365
+ del cv_net
1366
+ gc.collect()
1367
+ if torch.cuda.is_available():
1368
+ torch.cuda.empty_cache()
1369
+
1370
+ return loss / int(1 / self.ctx.prop_test)
1371
+
1372
+ # ========= Optuna 调参 =========
1373
+ def tune(self, max_evals: int = 50) -> None:
1374
+ """
1375
+ 使用 Optuna 对 ResNet 做贝叶斯优化。
1376
+ 每个 trial 完成以后再做一次全局的显存清理。
1377
+ """
1378
+ def objective(trial: optuna.trial.Trial) -> float:
1379
+ result = self.cross_val(trial)
1380
+ # trial 级别兜底清理
1381
+ gc.collect()
1382
+ if torch.cuda.is_available():
1383
+ torch.cuda.empty_cache()
1384
+ return result
1385
+
1386
+ study = optuna.create_study(
1387
+ direction='minimize',
1388
+ sampler=optuna.samplers.TPESampler(seed=self.ctx.rand_seed)
1389
+ )
1390
+ study.optimize(objective, n_trials=max_evals)
1391
+
1392
+ self.best_params = study.best_params
1393
+ self.best_trial = study.best_trial
1394
+
1395
+ params_path = self.output.result_path(
1396
+ f'{self.ctx.model_nme}_bestparams_resn.csv'
1397
+ )
1398
+ pd.DataFrame(self.best_params, index=[0]).to_csv(params_path)
1399
+
1400
+ # ========= 用最优超参训练最终 ResNet =========
1401
+ def train(self) -> None:
1402
+ if not self.best_params:
1403
+ raise RuntimeError('请先运行 tune() 以获得 ResNet 最优参数。')
1404
+
1405
+ self.model = ResNetSklearn(
1406
+ model_nme=self.ctx.model_nme,
1407
+ input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1]
1408
+ )
1409
+ self.model.set_params(self.best_params)
1410
+
1411
+ # 在全量 one-hot + 标准化数据上训练最终模型
1412
+ self.model.fit(
1413
+ self.ctx.train_oht_scl_data[self.ctx.var_nmes],
1414
+ self.ctx.train_oht_scl_data[self.ctx.resp_nme],
1415
+ self.ctx.train_oht_scl_data[self.ctx.weight_nme]
1416
+ )
1417
+
1418
+ # 记录标签
1419
+ self.ctx.model_label += [self.label]
1420
+
1421
+ # 训练集 / 测试集预测
1422
+ self.ctx.train_data['pred_resn'] = self.model.predict(
1423
+ self.ctx.train_oht_scl_data[self.ctx.var_nmes]
1424
+ )
1425
+ self.ctx.test_data['pred_resn'] = self.model.predict(
1426
+ self.ctx.test_oht_scl_data[self.ctx.var_nmes]
1427
+ )
1428
+
1429
+ # 加权赔付
1430
+ self.ctx.train_data.loc[:, 'w_pred_resn'] = (
1431
+ self.ctx.train_data['pred_resn'] *
1432
+ self.ctx.train_data[self.ctx.weight_nme]
1433
+ )
1434
+ self.ctx.test_data.loc[:, 'w_pred_resn'] = (
1435
+ self.ctx.test_data['pred_resn'] *
1436
+ self.ctx.test_data[self.ctx.weight_nme]
1437
+ )
1438
+
1439
+ # 方便外部调用
1440
+ self.ctx.resn_best = self.model
1441
+
1442
+ # ========= 保存 / 加载 =========
1443
+ def save(self) -> None:
1444
+ """
1445
+ 只保存 ResNet 的 state_dict(轻量,不含优化器)。
1446
+ """
1447
+ if self.model is not None:
1448
+ path = self.output.model_path(
1449
+ f'01_{self.ctx.model_nme}_ResNet.pth'
1450
+ )
1451
+ torch.save(self.model.resnet.state_dict(), path)
1452
+
1453
+ def load(self) -> None:
1454
+ """
1455
+ 从文件加载 ResNet 模型到合适的 device。
1456
+ """
1457
+ path = self.output.model_path(
1458
+ f'01_{self.ctx.model_nme}_ResNet.pth'
1459
+ )
1460
+ if os.path.exists(path):
1461
+ resn_loaded = ResNetSklearn(
1462
+ model_nme=self.ctx.model_nme,
1463
+ input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1]
1464
+ )
1465
+ state_dict = torch.load(path, map_location='cpu')
1466
+ resn_loaded.resnet.load_state_dict(state_dict)
1467
+
1468
+ # 根据当前环境设置 device
1469
+ if torch.cuda.is_available():
1470
+ resn_loaded.device = torch.device('cuda')
1471
+ elif torch.backends.mps.is_available():
1472
+ resn_loaded.device = torch.device('mps')
1473
+ else:
1474
+ resn_loaded.device = torch.device('cpu')
1475
+
1476
+ resn_loaded.resnet.to(resn_loaded.device)
1477
+ self.model = resn_loaded
1478
+ self.ctx.resn_best = self.model
1479
+ else:
1480
+ print(f"[ResNetTrainer.load] 未找到模型文件:{path}")
1481
+
1482
+
1483
+ class FTTrainer(TrainerBase):
1484
+ def __init__(self, context: "BayesOptModel") -> None:
1485
+ super().__init__(context, 'FTTransformer')
1486
+ self.model: Optional[FTTransformerSklearn] = None
1487
+ self.best_params: Optional[Dict[str, Any]] = None
1488
+ self.best_trial = None
1489
+
1490
+ def cross_val(self, trial: optuna.trial.Trial) -> float:
1491
+ """
1492
+ 对 FT-Transformer 做交叉验证。
1493
+ 这里是显存最容易爆的地方,所以加入了:
1494
+ - 较保守的超参搜索空间
1495
+ - 每个 fold 结束后强制释放 GPU 显存
1496
+ """
1497
+ # 超参空间适当缩小一点,避免特别大的模型
1498
+ learning_rate = trial.suggest_float(
1499
+ 'learning_rate', 1e-5, 5e-4, log=True
1500
+ )
1501
+ d_model = trial.suggest_int('d_model', 32, 256, step=32)
1502
+ # n_heads = trial.suggest_categorical('n_heads', [2, 4]) 避免欠拟合
1503
+ n_heads = trial.suggest_categorical('n_heads', [2, 4, 8])
1504
+ # n_layers = trial.suggest_int('n_layers', 2, 4) 避免欠拟合
1505
+ n_layers = trial.suggest_int('n_layers', 2, 8)
1506
+ dropout = trial.suggest_float('dropout', 0.0, 0.2)
1507
+ batch_num = trial.suggest_int(
1508
+ 'batch_num',
1509
+ 5 if self.ctx.obj == 'reg:gamma' else 10,
1510
+ 10 if self.ctx.obj == 'reg:gamma' else 50,
1511
+ step=1 if self.ctx.obj == 'reg:gamma' else 10
1512
+ )
1513
+
1514
+ if self.ctx.obj == 'reg:tweedie':
1515
+ tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
1516
+ elif self.ctx.obj == 'count:poisson':
1517
+ tw_power = 1.0
1518
+ elif self.ctx.obj == 'reg:gamma':
1519
+ tw_power = 2.0
1520
+ else:
1521
+ tw_power = 1.5
1522
+
1523
+ loss = 0.0
1524
+
1525
+ # 👉 可选:只在子样本上做 BO,避免大数据直接压垮显存
1526
+ data_for_cv = self.ctx.train_data
1527
+ max_rows_for_ft_bo = min(1000000, int(
1528
+ len(data_for_cv)/2)) # 你可以根据显存情况调小或调大
1529
+ if len(data_for_cv) > max_rows_for_ft_bo:
1530
+ data_for_cv = data_for_cv.sample(
1531
+ max_rows_for_ft_bo,
1532
+ random_state=self.ctx.rand_seed
1533
+ )
1534
+
1535
+ for _, (train_idx, test_idx) in enumerate(
1536
+ self.ctx.cv.split(data_for_cv[self.ctx.factor_nmes])
1537
+ ):
1538
+ X_train_fold = data_for_cv.iloc[train_idx][self.ctx.factor_nmes]
1539
+ y_train_fold = data_for_cv.iloc[train_idx][self.ctx.resp_nme]
1540
+ w_train_fold = data_for_cv.iloc[train_idx][self.ctx.weight_nme]
1541
+ X_val_fold = data_for_cv.iloc[test_idx][self.ctx.factor_nmes]
1542
+ y_val_fold = data_for_cv.iloc[test_idx][self.ctx.resp_nme]
1543
+ w_val_fold = data_for_cv.iloc[test_idx][self.ctx.weight_nme]
1544
+
1545
+ cv_ft = FTTransformerSklearn(
1546
+ model_nme=self.ctx.model_nme,
1547
+ num_cols=self.ctx.num_features,
1548
+ cat_cols=self.ctx.cate_list,
1549
+ d_model=d_model,
1550
+ n_heads=n_heads,
1551
+ n_layers=n_layers,
1552
+ dropout=dropout,
1553
+ batch_num=batch_num,
1554
+ epochs=self.ctx.epochs,
1555
+ tweedie_power=tw_power,
1556
+ learning_rate=learning_rate,
1557
+ patience=5
1558
+ )
1559
+
1560
+ try:
1561
+ cv_ft.fit(
1562
+ X_train_fold, y_train_fold, w_train_fold,
1563
+ X_val_fold, y_val_fold, w_val_fold
1564
+ )
1565
+ y_pred_fold = cv_ft.predict(X_val_fold)
1566
+ loss += mean_tweedie_deviance(
1567
+ y_val_fold,
1568
+ y_pred_fold,
1569
+ sample_weight=w_val_fold,
1570
+ power=tw_power
1571
+ )
1572
+ finally:
1573
+ # 🧹 每个 fold 用完就立即释放 GPU 资源
1574
+ try:
1575
+ # 如果模型在 GPU 上,先挪回 CPU
1576
+ if hasattr(cv_ft, "ft"):
1577
+ cv_ft.ft.to("cpu")
1578
+ except Exception:
1579
+ pass
1580
+ del cv_ft
1581
+ gc.collect()
1582
+ if torch.cuda.is_available():
1583
+ torch.cuda.empty_cache()
1584
+
1585
+ return loss / int(1 / self.ctx.prop_test)
1586
+
1587
+ def tune(self, max_evals: int = 50) -> None:
1588
+ """
1589
+ 用 Optuna 做超参搜索。
1590
+ 在每个 trial 结束后再做一次显存清理,避免 trial 间显存碎片堆积。
1591
+ """
1592
+ def objective(trial: optuna.trial.Trial) -> float:
1593
+ result = self.cross_val(trial)
1594
+ # trial 级别的兜底清理
1595
+ gc.collect()
1596
+ if torch.cuda.is_available():
1597
+ torch.cuda.empty_cache()
1598
+ return result
1599
+
1600
+ study = optuna.create_study(
1601
+ direction='minimize',
1602
+ sampler=optuna.samplers.TPESampler(seed=self.ctx.rand_seed)
1603
+ )
1604
+ study.optimize(objective, n_trials=max_evals)
1605
+ self.best_params = study.best_params
1606
+ self.best_trial = study.best_trial
1607
+ params_path = self.output.result_path(
1608
+ f'{self.ctx.model_nme}_bestparams_ft.csv'
1609
+ )
1610
+ pd.DataFrame(self.best_params, index=[0]).to_csv(params_path)
1611
+
1612
+ def train(self) -> None:
1613
+ if not self.best_params:
1614
+ raise RuntimeError('请先运行 tune() 以获得 FT-Transformer 最优参数。')
1615
+ self.model = FTTransformerSklearn(
1616
+ model_nme=self.ctx.model_nme,
1617
+ num_cols=self.ctx.num_features,
1618
+ cat_cols=self.ctx.cate_list
1619
+ )
1620
+ self.model.set_params(self.best_params)
1621
+ self.model.fit(
1622
+ self.ctx.train_data[self.ctx.factor_nmes],
1623
+ self.ctx.train_data[self.ctx.resp_nme],
1624
+ self.ctx.train_data[self.ctx.weight_nme]
1625
+ )
1626
+ self.ctx.model_label += [self.label]
1627
+ self.ctx.train_data['pred_ft'] = self.model.predict(
1628
+ self.ctx.train_data[self.ctx.factor_nmes]
1629
+ )
1630
+ self.ctx.test_data['pred_ft'] = self.model.predict(
1631
+ self.ctx.test_data[self.ctx.factor_nmes]
1632
+ )
1633
+ self.ctx.train_data.loc[:, 'w_pred_ft'] = (
1634
+ self.ctx.train_data['pred_ft'] *
1635
+ self.ctx.train_data[self.ctx.weight_nme]
1636
+ )
1637
+ self.ctx.test_data.loc[:, 'w_pred_ft'] = (
1638
+ self.ctx.test_data['pred_ft'] *
1639
+ self.ctx.test_data[self.ctx.weight_nme]
1640
+ )
1641
+ self.ctx.ft_best = self.model
1642
+
1643
+ def save(self) -> None:
1644
+ if self.model is not None:
1645
+ torch.save(
1646
+ self.model,
1647
+ self.output.model_path(
1648
+ f'01_{self.ctx.model_nme}_FTTransformer.pth')
1649
+ )
1650
+
1651
+ def load(self) -> None:
1652
+ path = self.output.model_path(
1653
+ f'01_{self.ctx.model_nme}_FTTransformer.pth')
1654
+ if os.path.exists(path):
1655
+ ft_loaded = torch.load(path, map_location='cpu')
1656
+ if torch.cuda.is_available():
1657
+ ft_loaded.device = torch.device('cuda')
1658
+ elif torch.backends.mps.is_available():
1659
+ ft_loaded.device = torch.device('mps')
1660
+ else:
1661
+ ft_loaded.device = torch.device('cpu')
1662
+ ft_loaded.ft.to(ft_loaded.device)
1663
+ self.model = ft_loaded
1664
+ self.ctx.ft_best = self.model
1665
+ else:
1666
+ print(f"[load_model] Warning: 未找到 FT-Transformer 模型文件:{path}")
1667
+
1668
+
1669
+ # =============================================================================
1670
+ # BayesOpt orchestration & SHAP utilities
1671
+ # =============================================================================
1672
+ class BayesOptModel:
1673
+ def __init__(self, train_data, test_data,
1674
+ model_nme, resp_nme, weight_nme, factor_nmes,
1675
+ cate_list=None, prop_test=0.25, rand_seed=None,
1676
+ epochs=100, use_gpu=True):
1677
+ cfg = BayesOptConfig(
1678
+ model_nme=model_nme,
1679
+ resp_nme=resp_nme,
1680
+ weight_nme=weight_nme,
1681
+ factor_nmes=list(factor_nmes),
1682
+ cate_list=list(cate_list) if cate_list else None,
1683
+ prop_test=prop_test,
1684
+ rand_seed=rand_seed,
1685
+ epochs=epochs,
1686
+ use_gpu=use_gpu
1687
+ )
1688
+ self.config = cfg
1689
+ self.model_nme = cfg.model_nme
1690
+ self.resp_nme = cfg.resp_nme
1691
+ self.weight_nme = cfg.weight_nme
1692
+ self.factor_nmes = cfg.factor_nmes
1693
+ self.cate_list = list(cfg.cate_list or [])
1694
+ self.prop_test = cfg.prop_test
1695
+ self.epochs = cfg.epochs
1696
+ self.rand_seed = cfg.rand_seed if cfg.rand_seed is not None else np.random.randint(
1697
+ 1, 10000)
1698
+ self.use_gpu = bool(cfg.use_gpu and torch.cuda.is_available())
1699
+ self.output_manager = OutputManager(os.getcwd(), self.model_nme)
1700
+
1701
+ preprocessor = DatasetPreprocessor(train_data, test_data, cfg).run()
1702
+ self.train_data = preprocessor.train_data
1703
+ self.test_data = preprocessor.test_data
1704
+ self.train_oht_scl_data = preprocessor.train_oht_scl_data
1705
+ self.test_oht_scl_data = preprocessor.test_oht_scl_data
1706
+ self.var_nmes = preprocessor.var_nmes
1707
+ self.num_features = preprocessor.num_features
1708
+ self.cat_categories_for_shap = preprocessor.cat_categories_for_shap
1709
+
1710
+ self.cv = ShuffleSplit(n_splits=int(1/self.prop_test),
1711
+ test_size=self.prop_test,
1712
+ random_state=self.rand_seed)
1713
+ if self.model_nme.find('f') != -1:
1714
+ self.obj = 'count:poisson'
1715
+ elif self.model_nme.find('s') != -1:
1716
+ self.obj = 'reg:gamma'
1717
+ elif self.model_nme.find('bc') != -1:
1718
+ self.obj = 'reg:tweedie'
1719
+ else:
1720
+ self.obj = 'reg:tweedie'
1721
+ self.fit_params = {
1722
+ 'sample_weight': self.train_data[self.weight_nme].values
1723
+ }
1724
+ self.model_label: List[str] = []
1725
+
1726
+ # 记录各模型训练器,后续统一通过标签访问,方便扩展新模型
1727
+ self.trainers: Dict[str, TrainerBase] = {
1728
+ 'xgb': XGBTrainer(self),
1729
+ 'resn': ResNetTrainer(self),
1730
+ 'ft': FTTrainer(self)
1731
+ }
1732
+ self.xgb_best = None
1733
+ self.resn_best = None
1734
+ self.ft_best = None
1735
+ self.best_xgb_params = None
1736
+ self.best_resn_params = None
1737
+ self.best_ft_params = None
1738
+ self.best_xgb_trial = None
1739
+ self.best_resn_trial = None
1740
+ self.best_ft_trial = None
1741
+ self.xgb_load = None
1742
+ self.resn_load = None
1743
+ self.ft_load = None
1744
+
1745
+ # 定义单因素画图函数
1746
+ def plot_oneway(self, n_bins=10):
1747
+ for c in self.factor_nmes:
1748
+ fig = plt.figure(figsize=(7, 5))
1749
+ if c in self.cate_list:
1750
+ group_col = c
1751
+ plot_source = self.train_data
1752
+ else:
1753
+ group_col = f'{c}_bins'
1754
+ bins = pd.qcut(
1755
+ self.train_data[c],
1756
+ n_bins,
1757
+ duplicates='drop' # 注意:如果分位数重复会丢 bin,避免异常终止
1758
+ )
1759
+ plot_source = self.train_data.assign(**{group_col: bins})
1760
+ plot_data = plot_source.groupby(
1761
+ [group_col], observed=True).sum(numeric_only=True)
1762
+ plot_data.reset_index(inplace=True)
1763
+ plot_data['act_v'] = plot_data['w_act'] / \
1764
+ plot_data[self.weight_nme]
1765
+ plot_data.head()
1766
+ ax = fig.add_subplot(111)
1767
+ ax.plot(plot_data.index, plot_data['act_v'],
1768
+ label='Actual', color='red')
1769
+ ax.set_title(
1770
+ 'Analysis of %s : Train Data' % group_col,
1771
+ fontsize=8)
1772
+ plt.xticks(plot_data.index,
1773
+ list(plot_data[group_col].astype(str)),
1774
+ rotation=90)
1775
+ if len(list(plot_data[group_col].astype(str))) > 50:
1776
+ plt.xticks(fontsize=3)
1777
+ else:
1778
+ plt.xticks(fontsize=6)
1779
+ plt.yticks(fontsize=6)
1780
+ ax2 = ax.twinx()
1781
+ ax2.bar(plot_data.index,
1782
+ plot_data[self.weight_nme],
1783
+ alpha=0.5, color='seagreen')
1784
+ plt.yticks(fontsize=6)
1785
+ plt.margins(0.05)
1786
+ plt.subplots_adjust(wspace=0.3)
1787
+ save_path = self.output_manager.plot_path(
1788
+ f'00_{self.model_nme}_{group_col}_oneway.png')
1789
+ plt.savefig(save_path, dpi=300)
1790
+ plt.close(fig)
1791
+
1792
+ # 定义Xgboost贝叶斯优化函数
1793
+ def bayesopt_xgb(self, max_evals=100):
1794
+ trainer = self.trainers['xgb']
1795
+ trainer.tune(max_evals)
1796
+ trainer.train()
1797
+ self.xgb_best = trainer.model
1798
+ # 记录最优参数及 trial 以便排查或复现结果
1799
+ self.best_xgb_params = trainer.best_params
1800
+ self.best_xgb_trial = trainer.best_trial
1801
+
1802
+ # 定义ResNet贝叶斯优化函数
1803
+ def bayesopt_resnet(self, max_evals=100):
1804
+ trainer = self.trainers['resn']
1805
+ trainer.tune(max_evals)
1806
+ trainer.train()
1807
+ self.resn_best = trainer.model
1808
+ # 保存最优 trial 相关信息,方便后续调参分析
1809
+ self.best_resn_params = trainer.best_params
1810
+ self.best_resn_trial = trainer.best_trial
1811
+
1812
+ # 定义 FT-Transformer 贝叶斯优化函数
1813
+ def bayesopt_ft(self, max_evals=50):
1814
+ trainer = self.trainers['ft']
1815
+ trainer.tune(max_evals)
1816
+ trainer.train()
1817
+ self.ft_best = trainer.model
1818
+ # FT-Transformer 参数较多,留存配置信息尤其重要
1819
+ self.best_ft_params = trainer.best_params
1820
+ self.best_ft_trial = trainer.best_trial
1821
+
1822
+ # 定义分箱函数
1823
+
1824
+ def _split_data(self, data, col_nme, wgt_nme, n_bins=10):
1825
+ # 先按得分排序再按累计权重等分,能保证每个分箱曝光量接近
1826
+ sorted_data = data.sort_values(by=col_nme, ascending=True).copy()
1827
+ sorted_data['cum_weight'] = sorted_data[wgt_nme].cumsum()
1828
+ w_sum = sorted_data[wgt_nme].sum()
1829
+ if w_sum <= EPS:
1830
+ sorted_data.loc[:, 'bins'] = 0
1831
+ else:
1832
+ sorted_data.loc[:, 'bins'] = np.floor(
1833
+ sorted_data['cum_weight'] * float(n_bins) / w_sum
1834
+ )
1835
+ sorted_data.loc[(sorted_data['bins'] == n_bins),
1836
+ 'bins'] = n_bins - 1
1837
+ return sorted_data.groupby(['bins'], observed=True).sum(numeric_only=True)
1838
+
1839
+ # 构建提纯曲线所需的数据
1840
+ def _plot_data_lift(self,
1841
+ pred_list, w_pred_list,
1842
+ w_act_list, weight_list, n_bins=10):
1843
+ lift_data = pd.DataFrame()
1844
+ lift_data.loc[:, 'pred'] = pred_list
1845
+ lift_data.loc[:, 'w_pred'] = w_pred_list
1846
+ lift_data.loc[:, 'act'] = w_act_list
1847
+ lift_data.loc[:, 'weight'] = weight_list
1848
+ plot_data = self._split_data(
1849
+ lift_data, 'pred', 'weight', n_bins)
1850
+ denom = np.maximum(plot_data['weight'], EPS)
1851
+ plot_data['exp_v'] = plot_data['w_pred'] / denom
1852
+ plot_data['act_v'] = plot_data['act'] / denom
1853
+ plot_data.reset_index(inplace=True)
1854
+ return plot_data
1855
+
1856
+ # 绘制提纯曲线
1857
+ def plot_lift(self, model_label, pred_nme, n_bins=10):
1858
+ # 绘制建模集上结果
1859
+ figpos_list = [121, 122]
1860
+ plot_dict = {
1861
+ 121: self.train_data,
1862
+ 122: self.test_data
1863
+ }
1864
+ name_list = {
1865
+ 121: 'Train Data',
1866
+ 122: 'Test Data'
1867
+ }
1868
+ if model_label == 'Xgboost':
1869
+ pred_nme = 'pred_xgb'
1870
+ elif model_label == 'ResNet':
1871
+ pred_nme = 'pred_resn'
1872
+ elif model_label == 'FTTransformer':
1873
+ pred_nme = 'pred_ft'
1874
+ # pred_nme 映射保证后续取列统一,否则新模型加入时需同步更新
1875
+
1876
+ fig = plt.figure(figsize=(11, 5))
1877
+ for figpos in figpos_list:
1878
+ plot_data = self._plot_data_lift(
1879
+ plot_dict[figpos][pred_nme].values,
1880
+ plot_dict[figpos]['w_'+pred_nme].values,
1881
+ plot_dict[figpos]['w_act'].values,
1882
+ plot_dict[figpos][self.weight_nme].values,
1883
+ n_bins)
1884
+ ax = fig.add_subplot(figpos)
1885
+ ax.plot(plot_data.index, plot_data['act_v'],
1886
+ label='Actual', color='red')
1887
+ ax.plot(plot_data.index, plot_data['exp_v'],
1888
+ label='Predicted', color='blue')
1889
+ ax.set_title(
1890
+ 'Lift Chart on %s' % name_list[figpos], fontsize=8)
1891
+ plt.xticks(plot_data.index,
1892
+ plot_data.index,
1893
+ rotation=90, fontsize=6)
1894
+ plt.yticks(fontsize=6)
1895
+ plt.legend(loc='upper left',
1896
+ fontsize=5, frameon=False)
1897
+ plt.margins(0.05)
1898
+ ax2 = ax.twinx()
1899
+ ax2.bar(plot_data.index, plot_data['weight'],
1900
+ alpha=0.5, color='seagreen',
1901
+ label='Earned Exposure')
1902
+ plt.yticks(fontsize=6)
1903
+ plt.legend(loc='upper right',
1904
+ fontsize=5, frameon=False)
1905
+ plt.subplots_adjust(wspace=0.3)
1906
+ save_path = self.output_manager.plot_path(
1907
+ f'01_{self.model_nme}_{model_label}_lift.png')
1908
+ plt.savefig(save_path, dpi=300)
1909
+ plt.show()
1910
+ plt.close(fig)
1911
+
1912
+ # 构建双提纯曲线所需的数据
1913
+ def _plot_data_dlift(self,
1914
+ pred_list_model1, pred_list_model2,
1915
+ w_list, w_act_list, n_bins=10):
1916
+ lift_data = pd.DataFrame()
1917
+ lift_data.loc[:, 'pred1'] = pred_list_model1
1918
+ lift_data.loc[:, 'pred2'] = pred_list_model2
1919
+ lift_data.loc[:, 'diff_ly'] = lift_data['pred1'] / lift_data['pred2']
1920
+ lift_data.loc[:, 'act'] = w_act_list
1921
+ lift_data.loc[:, 'weight'] = w_list
1922
+ plot_data = self._split_data(lift_data, 'diff_ly', 'weight', n_bins)
1923
+ denom = np.maximum(plot_data['act'], EPS)
1924
+ plot_data['exp_v1'] = plot_data['pred1'] / denom
1925
+ plot_data['exp_v2'] = plot_data['pred2'] / denom
1926
+ plot_data['act_v'] = plot_data['act'] / denom
1927
+ plot_data.reset_index(inplace=True)
1928
+ return plot_data
1929
+
1930
+ # 绘制双提纯曲线
1931
+ def plot_dlift(self, model_comp: List[str] = ['xgb', 'resn'], n_bins: int = 10) -> None:
1932
+ """
1933
+ 绘制双提纯曲线,对比两个模型的预测效果。
1934
+
1935
+ Args:
1936
+ model_comp: 包含两个模型简称的列表,例如 ['xgb', 'resn']。
1937
+ 支持 'xgb', 'resn', 'ft'。
1938
+ n_bins: 分箱数量。
1939
+ """
1940
+ if len(model_comp) != 2:
1941
+ raise ValueError("`model_comp` 必须包含两个模型进行对比。")
1942
+
1943
+ model_name_map = {
1944
+ 'xgb': 'Xgboost',
1945
+ 'resn': 'ResNet',
1946
+ 'ft': 'FTTransformer'
1947
+ }
1948
+
1949
+ name1, name2 = model_comp
1950
+ if name1 not in model_name_map or name2 not in model_name_map:
1951
+ raise ValueError(f"不支持的模型简称。请从 {list(model_name_map.keys())} 中选择。")
1952
+
1953
+ fig, axes = plt.subplots(1, 2, figsize=(11, 5))
1954
+ datasets = {
1955
+ 'Train Data': self.train_data,
1956
+ 'Test Data': self.test_data
1957
+ }
1958
+
1959
+ for ax, (data_name, data) in zip(axes, datasets.items()):
1960
+ pred1_col = f'w_pred_{name1}'
1961
+ pred2_col = f'w_pred_{name2}'
1962
+
1963
+ if pred1_col not in data.columns or pred2_col not in data.columns:
1964
+ print(
1965
+ f"警告: 在 {data_name} 中找不到预测列 {pred1_col} 或 {pred2_col}。跳过绘图。")
1966
+ continue
1967
+
1968
+ plot_data = self._plot_data_dlift(
1969
+ data[pred1_col].values,
1970
+ data[pred2_col].values,
1971
+ data[self.weight_nme].values,
1972
+ data['w_act'].values,
1973
+ n_bins
1974
+ )
1975
+
1976
+ label1 = model_name_map[name1]
1977
+ label2 = model_name_map[name2]
1978
+
1979
+ ax.plot(plot_data.index,
1980
+ plot_data['act_v'], label='Actual', color='red')
1981
+ ax.plot(plot_data.index,
1982
+ plot_data['exp_v1'], label=label1, color='blue')
1983
+ ax.plot(plot_data.index,
1984
+ plot_data['exp_v2'], label=label2, color='black')
1985
+
1986
+ ax.set_title(f'Double Lift Chart on {data_name}', fontsize=8)
1987
+ ax.set_xticks(plot_data.index)
1988
+ ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
1989
+ ax.set_xlabel(f'{label1} / {label2}', fontsize=6)
1990
+ ax.tick_params(axis='y', labelsize=6)
1991
+ ax.legend(loc='upper left', fontsize=5, frameon=False)
1992
+ ax.margins(0.1)
1993
+
1994
+ ax2 = ax.twinx()
1995
+ ax2.bar(plot_data.index, plot_data['weight'],
1996
+ alpha=0.5, color='seagreen', label='Earned Exposure')
1997
+ ax2.tick_params(axis='y', labelsize=6)
1998
+ ax2.legend(loc='upper right', fontsize=5, frameon=False)
1999
+
2000
+ plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8, wspace=0.3)
2001
+ save_path = self.output_manager.plot_path(
2002
+ f'02_{self.model_nme}_dlift_{name1}_vs_{name2}.png')
2003
+ plt.savefig(save_path, dpi=300)
2004
+ plt.show()
2005
+ plt.close(fig)
2006
+
2007
+ # 保存模型
2008
+
2009
+ def save_model(self, model_name=None):
2010
+
2011
+ # model_name 可以是:
2012
+ # - None: 保存全部可用模型
2013
+ # - 'xgb': 只保存 Xgboost
2014
+ # - 'resn': 只保存 ResNet
2015
+ # - 'ft': 只保存 FT-Transformer
2016
+ if model_name in (None, 'xgb'):
2017
+ trainer = self.trainers['xgb']
2018
+ if trainer.model is not None:
2019
+ trainer.save()
2020
+ else:
2021
+ print("[save_model] Warning: xgb_best 不存在,未保存 Xgboost 模型。")
2022
+
2023
+ if model_name in (None, 'resn'):
2024
+ trainer = self.trainers['resn']
2025
+ if trainer.model is not None:
2026
+ trainer.save()
2027
+ else:
2028
+ print("[save_model] Warning: resn_best 不存在,未保存 ResNet 模型。")
2029
+
2030
+ if model_name in (None, 'ft'):
2031
+ trainer = self.trainers['ft']
2032
+ if trainer.model is not None:
2033
+ trainer.save()
2034
+ else:
2035
+ print("[save_model] Warning: ft_best 不存在,未保存 FT-Transformer 模型。")
2036
+
2037
+ def load_model(self, model_name=None):
2038
+ # model_name 可以是:
2039
+ # - None: 加载全部能找到的模型
2040
+ # - 'xgb': 只加载 Xgboost
2041
+ # - 'resn': 只加载 ResNet
2042
+ # - 'ft': 只加载 FT-Transformer
2043
+
2044
+ if model_name in (None, 'xgb'):
2045
+ trainer = self.trainers['xgb']
2046
+ trainer.load()
2047
+ self.xgb_best = trainer.model
2048
+ self.xgb_load = trainer.model
2049
+
2050
+ if model_name in (None, 'resn'):
2051
+ trainer = self.trainers['resn']
2052
+ trainer.load()
2053
+ self.resn_best = trainer.model
2054
+ self.resn_load = trainer.model
2055
+
2056
+ if model_name in (None, 'ft'):
2057
+ trainer = self.trainers['ft']
2058
+ trainer.load()
2059
+ self.ft_best = trainer.model
2060
+ self.ft_load = trainer.model
2061
+
2062
+ def _build_ft_shap_matrix(self, data: pd.DataFrame) -> np.ndarray:
2063
+
2064
+ # 将原始特征 DataFrame (包含 self.factor_nmes) 转成
2065
+ # 纯数值矩阵: 数值列为 float64,类别列为整数 code(float64 存储)。
2066
+ # 列顺序与 self.factor_nmes 保持一致。
2067
+
2068
+ matrices = []
2069
+
2070
+ for col in self.factor_nmes:
2071
+ s = data[col]
2072
+
2073
+ if col in self.cate_list:
2074
+ # 类别列:按训练时的类别全集编码
2075
+ cats = pd.Categorical(
2076
+ s,
2077
+ categories=self.cat_categories_for_shap[col]
2078
+ )
2079
+ # cats.codes 是一个 Index / ndarray,用 np.asarray 包一下再 reshape
2080
+ codes = np.asarray(cats.codes, dtype=np.float64).reshape(-1, 1)
2081
+ matrices.append(codes)
2082
+ else:
2083
+ # 数值列:转成 Series -> numpy -> reshape
2084
+ vals = pd.to_numeric(s, errors="coerce")
2085
+ arr = vals.to_numpy(dtype=np.float64, copy=True).reshape(-1, 1)
2086
+ matrices.append(arr)
2087
+
2088
+ X_mat = np.concatenate(matrices, axis=1) # (N, F)
2089
+ return X_mat
2090
+
2091
+ def _decode_ft_shap_matrix_to_df(self, X_mat: np.ndarray) -> pd.DataFrame:
2092
+
2093
+ # 将 SHAP 的数值矩阵 (N, F) 还原为原始特征 DataFrame,
2094
+ # 数值列为 float,类别列还原为 pandas 的 category 类型,
2095
+ # 以便兼容 enable_categorical=True 的 XGBoost 和 FT-Transformer 的输入。
2096
+ # 列顺序 = self.factor_nmes
2097
+
2098
+ data_dict = {}
2099
+
2100
+ for j, col in enumerate(self.factor_nmes):
2101
+ col_vals = X_mat[:, j]
2102
+
2103
+ if col in self.cate_list:
2104
+ cats = self.cat_categories_for_shap[col]
2105
+
2106
+ # SHAP 会扰动成小数,这里 round 回整数 code
2107
+ codes = np.round(col_vals).astype(int)
2108
+ # 限制在 [-1, len(cats)-1]
2109
+ codes = np.clip(codes, -1, len(cats) - 1)
2110
+
2111
+ # 使用 pandas.Categorical.from_codes:
2112
+ # - codes = -1 被当成缺失 (NaN)
2113
+ # - 其他索引映射到 cats 中对应的类别
2114
+ cat_series = pd.Categorical.from_codes(
2115
+ codes,
2116
+ categories=cats
2117
+ )
2118
+ # 存的是 Categorical 类型,而不是 object
2119
+ data_dict[col] = cat_series
2120
+ else:
2121
+ # 数值列:直接 float
2122
+ data_dict[col] = col_vals.astype(float)
2123
+
2124
+ df = pd.DataFrame(data_dict, columns=self.factor_nmes)
2125
+
2126
+ # 再保险:确保所有类别列 dtype 真的是 category
2127
+ for col in self.cate_list:
2128
+ if col in df.columns:
2129
+ df[col] = df[col].astype("category")
2130
+ return df
2131
+
2132
+ # ========= XGBoost SHAP =========
2133
+
2134
+ def compute_shap_xgb(self, n_background: int = 500,
2135
+ n_samples: int = 200,
2136
+ on_train: bool = True):
2137
+ # 使用 KernelExplainer 计算 XGBoost 的 SHAP 值(黑盒方式)。
2138
+ #
2139
+ # - 对 SHAP:输入是一份纯数值矩阵:
2140
+ # * 数值特征:float64
2141
+ # * 类别特征:用 _build_ft_shap_matrix 编码后的整数 code(float64)
2142
+ # - 对模型:仍然用原始 DataFrame + xgb_best.predict(...)
2143
+
2144
+ if not hasattr(self, "xgb_best"):
2145
+ raise RuntimeError("请先运行 bayesopt_xgb() 训练好 self.xgb_best")
2146
+
2147
+ # 1) 选择数据源:训练集 or 测试集(原始特征空间)
2148
+ data = self.train_data if on_train else self.test_data
2149
+ X_raw = data[self.factor_nmes]
2150
+
2151
+ # 2) 构造背景矩阵(用和 FT 一样的数值编码)
2152
+ background_raw = X_raw.sample(
2153
+ min(len(X_raw), n_background),
2154
+ random_state=self.rand_seed
2155
+ )
2156
+ # KernelExplainer 计算量极大,务必控制背景样本规模,否则会拖慢调试
2157
+ background_mat = self._build_ft_shap_matrix(
2158
+ background_raw
2159
+ ).astype(np.float64, copy=True)
2160
+
2161
+ # 3) 定义黑盒预测函数:数值矩阵 -> DataFrame -> xgb_best.predict
2162
+ def f_predict(x_mat: np.ndarray) -> np.ndarray:
2163
+ # 把编码矩阵还原成原始 DataFrame(数值+类别)
2164
+ df_input = self._decode_ft_shap_matrix_to_df(x_mat)
2165
+ # 注意:这里用的是 self.xgb_best.predict,和你训练/预测时一致
2166
+ y_pred = self.xgb_best.predict(df_input)
2167
+ return y_pred
2168
+
2169
+ explainer = shap.KernelExplainer(f_predict, background_mat)
2170
+
2171
+ # 4) 要解释的样本:原始特征 + 数值编码
2172
+ X_explain_raw = X_raw.sample(
2173
+ min(len(X_raw), n_samples),
2174
+ random_state=self.rand_seed
2175
+ )
2176
+ X_explain_mat = self._build_ft_shap_matrix(
2177
+ X_explain_raw
2178
+ ).astype(np.float64, copy=True)
2179
+
2180
+ # 5) 计算 SHAP 值(注意用 nsamples='auto' 控制复杂度)
2181
+ shap_values = explainer.shap_values(X_explain_mat, nsamples="auto")
2182
+
2183
+ # 6) 保存结果:
2184
+ # - shap_values:数值编码空间,对应 factor_nmes 的每一列
2185
+ # - X_explain_raw:原始 DataFrame,方便画图时显示真实类别名
2186
+ self.shap_xgb = {
2187
+ "explainer": explainer,
2188
+ "X_explain": X_explain_raw,
2189
+ "shap_values": shap_values,
2190
+ "base_value": explainer.expected_value,
2191
+ }
2192
+ return self.shap_xgb
2193
+ # ========= ResNet SHAP =========
2194
+
2195
+ def _resn_predict_wrapper(self, X_np):
2196
+ # 保证走 CPU
2197
+ model = self.resn_best.resnet.to("cpu")
2198
+ with torch.no_grad():
2199
+ # 不要 .to(self.device)
2200
+ X_tensor = torch.tensor(X_np, dtype=torch.float32)
2201
+ y_pred = model(X_tensor).cpu().numpy()
2202
+ y_pred = np.clip(y_pred, 1e-6, None)
2203
+ return y_pred.reshape(-1)
2204
+
2205
+ def compute_shap_resn(self, n_background: int = 500,
2206
+ n_samples: int = 200,
2207
+ on_train: bool = True):
2208
+
2209
+ # 使用 KernelExplainer 计算 ResNet 的 SHAP 值。
2210
+ # 解释空间:已 one-hot & 标准化后的特征 self.var_nmes。
2211
+
2212
+ if not hasattr(self, 'resn_best'):
2213
+ raise RuntimeError("请先运行 bayesopt_resnet() 训练好 resn_best")
2214
+
2215
+ self.resn_best.device = torch.device("cpu") # 强制走 CPU
2216
+ self.resn_best.resnet.to("cpu")
2217
+ if torch.cuda.is_available():
2218
+ torch.cuda.empty_cache()
2219
+
2220
+ # 选择数据集(已 one-hot & 标准化)
2221
+ data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
2222
+ X = data[self.var_nmes]
2223
+ if len(X) == 0:
2224
+ raise ValueError(
2225
+ "compute_shap_resn: 选择的数据集为空(len(X)==0),无法计算 SHAP。")
2226
+
2227
+ # 背景样本:float64 numpy
2228
+ background_df = X.sample(
2229
+ min(len(X), n_background),
2230
+ random_state=self.rand_seed
2231
+ )
2232
+ background_np = background_df.to_numpy(dtype=np.float64, copy=True)
2233
+
2234
+ # 黑盒预测函数
2235
+ def f_predict(x):
2236
+ y = self._resn_predict_wrapper(x)
2237
+ # 保证是一维数组
2238
+ y = np.asarray(y, dtype=np.float64).reshape(-1)
2239
+ return y
2240
+
2241
+ explainer = shap.KernelExplainer(f_predict, background_np)
2242
+
2243
+ # 要解释的样本
2244
+ X_explain_df = X.sample(
2245
+ min(len(X), n_samples),
2246
+ random_state=self.rand_seed
2247
+ )
2248
+ X_explain_np = X_explain_df.to_numpy(dtype=np.float64, copy=True)
2249
+
2250
+ max_nsamples = 300
2251
+ min_needed = X_explain_np.shape[1] + 2
2252
+ nsample_eff = max(min_needed, min(max_nsamples,
2253
+ X_explain_np.shape[0] * X_explain_np.shape[1]))
2254
+ shap_values = explainer.shap_values(X_explain_np, nsamples=nsample_eff)
2255
+ # 手动计算 base_value,避免 NotOneValueFound
2256
+ bg_pred = f_predict(background_np)
2257
+ if bg_pred.size == 0:
2258
+ raise ValueError("compute_shap_resn: 背景样本预测结果为空,无法计算 base_value。")
2259
+ base_value = float(bg_pred.mean())
2260
+
2261
+ self.shap_resn = {
2262
+ "explainer": explainer,
2263
+ "X_explain": X_explain_df, # DataFrame: 用于画图(有列名)
2264
+ "shap_values": shap_values, # numpy: (n_samples, n_features)
2265
+ # "base_value": explainer.expected_value,
2266
+ "base_value": base_value,
2267
+ }
2268
+ return self.shap_resn
2269
+
2270
+ # ========= FT-Transformer SHAP =========
2271
+
2272
+ def _ft_shap_predict_wrapper(self, X_mat: np.ndarray) -> np.ndarray:
2273
+
2274
+ # SHAP 的预测包装:
2275
+ # 数值矩阵 -> 还原为原始特征 DataFrame -> 调用 ft_best.predict
2276
+
2277
+ df_input = self._decode_ft_shap_matrix_to_df(X_mat)
2278
+ y_pred = self.ft_best.predict(df_input)
2279
+ return np.asarray(y_pred, dtype=np.float64).reshape(-1)
2280
+
2281
+ def compute_shap_ft(self, n_background: int = 500,
2282
+ n_samples: int = 200,
2283
+ on_train: bool = True):
2284
+
2285
+ # 使用 KernelExplainer 计算 FT-Transformer 的 SHAP 值。
2286
+ # 解释空间:数值+类别code 的混合数值矩阵(float64),
2287
+ # 但对外展示时仍使用原始特征名/取值(X_explain)。
2288
+
2289
+ if not hasattr(self, "ft_best"):
2290
+ raise RuntimeError("请先运行 bayesopt_ft() 训练好 ft_best")
2291
+
2292
+ self.ft_best.device = torch.device("cpu") # 强制走 CPU
2293
+ self.ft_best.ft.to("cpu")
2294
+ if torch.cuda.is_available():
2295
+ torch.cuda.empty_cache()
2296
+
2297
+ # 选择数据源(原始特征空间)
2298
+ data = self.train_data if on_train else self.test_data
2299
+ X_raw = data[self.factor_nmes]
2300
+
2301
+ # 背景矩阵
2302
+ background_raw = X_raw.sample(
2303
+ min(len(X_raw), n_background),
2304
+ random_state=self.rand_seed
2305
+ )
2306
+ background_mat = self._build_ft_shap_matrix(
2307
+ background_raw
2308
+ ).astype(np.float64, copy=True)
2309
+
2310
+ # 黑盒预测函数(数值矩阵 → DataFrame → FT 模型)
2311
+ def f_predict(x):
2312
+ return self._ft_shap_predict_wrapper(x)
2313
+
2314
+ explainer = shap.KernelExplainer(f_predict, background_mat)
2315
+
2316
+ # 要解释的样本(原始特征空间)
2317
+ X_explain_raw = X_raw.sample(
2318
+ min(len(X_raw), n_samples),
2319
+ random_state=self.rand_seed
2320
+ )
2321
+ X_explain_mat = self._build_ft_shap_matrix(
2322
+ X_explain_raw
2323
+ ).astype(np.float64, copy=True)
2324
+
2325
+ max_nsamples = 300
2326
+ min_needed = X_explain_mat.shape[1] + 2
2327
+ nsample_eff = max(min_needed, min(max_nsamples,
2328
+ X_explain_mat.shape[0] * X_explain_mat.shape[1]))
2329
+ shap_values = explainer.shap_values(
2330
+ X_explain_mat, nsamples=nsample_eff)
2331
+ bg_pred = self._ft_shap_predict_wrapper(background_mat)
2332
+ bg_pred = np.asarray(bg_pred, dtype=np.float64).reshape(-1)
2333
+ base_value = float(bg_pred.mean())
2334
+
2335
+ self.shap_ft = {
2336
+ "explainer": explainer,
2337
+ "X_explain": X_explain_raw, # 原始特征 DataFrame,用来画图
2338
+ "shap_values": shap_values, # numpy: (n_samples, n_features)
2339
+ # "base_value": explainer.expected_value,
2340
+ "base_value": base_value,
2341
+ }
2342
+ return self.shap_ft