ins-pricing 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ins_pricing/README.md +60 -0
- ins_pricing/__init__.py +102 -0
- ins_pricing/governance/README.md +18 -0
- ins_pricing/governance/__init__.py +20 -0
- ins_pricing/governance/approval.py +93 -0
- ins_pricing/governance/audit.py +37 -0
- ins_pricing/governance/registry.py +99 -0
- ins_pricing/governance/release.py +159 -0
- ins_pricing/modelling/BayesOpt.py +146 -0
- ins_pricing/modelling/BayesOpt_USAGE.md +925 -0
- ins_pricing/modelling/BayesOpt_entry.py +575 -0
- ins_pricing/modelling/BayesOpt_incremental.py +731 -0
- ins_pricing/modelling/Explain_Run.py +36 -0
- ins_pricing/modelling/Explain_entry.py +539 -0
- ins_pricing/modelling/Pricing_Run.py +36 -0
- ins_pricing/modelling/README.md +33 -0
- ins_pricing/modelling/__init__.py +44 -0
- ins_pricing/modelling/bayesopt/__init__.py +98 -0
- ins_pricing/modelling/bayesopt/config_preprocess.py +303 -0
- ins_pricing/modelling/bayesopt/core.py +1476 -0
- ins_pricing/modelling/bayesopt/models.py +2196 -0
- ins_pricing/modelling/bayesopt/trainers.py +2446 -0
- ins_pricing/modelling/bayesopt/utils.py +1021 -0
- ins_pricing/modelling/cli_common.py +136 -0
- ins_pricing/modelling/explain/__init__.py +55 -0
- ins_pricing/modelling/explain/gradients.py +334 -0
- ins_pricing/modelling/explain/metrics.py +176 -0
- ins_pricing/modelling/explain/permutation.py +155 -0
- ins_pricing/modelling/explain/shap_utils.py +146 -0
- ins_pricing/modelling/notebook_utils.py +284 -0
- ins_pricing/modelling/plotting/__init__.py +45 -0
- ins_pricing/modelling/plotting/common.py +63 -0
- ins_pricing/modelling/plotting/curves.py +572 -0
- ins_pricing/modelling/plotting/diagnostics.py +139 -0
- ins_pricing/modelling/plotting/geo.py +362 -0
- ins_pricing/modelling/plotting/importance.py +121 -0
- ins_pricing/modelling/run_logging.py +133 -0
- ins_pricing/modelling/tests/conftest.py +8 -0
- ins_pricing/modelling/tests/test_cross_val_generic.py +66 -0
- ins_pricing/modelling/tests/test_distributed_utils.py +18 -0
- ins_pricing/modelling/tests/test_explain.py +56 -0
- ins_pricing/modelling/tests/test_geo_tokens_split.py +49 -0
- ins_pricing/modelling/tests/test_graph_cache.py +33 -0
- ins_pricing/modelling/tests/test_plotting.py +63 -0
- ins_pricing/modelling/tests/test_plotting_library.py +150 -0
- ins_pricing/modelling/tests/test_preprocessor.py +48 -0
- ins_pricing/modelling/watchdog_run.py +211 -0
- ins_pricing/pricing/README.md +44 -0
- ins_pricing/pricing/__init__.py +27 -0
- ins_pricing/pricing/calibration.py +39 -0
- ins_pricing/pricing/data_quality.py +117 -0
- ins_pricing/pricing/exposure.py +85 -0
- ins_pricing/pricing/factors.py +91 -0
- ins_pricing/pricing/monitoring.py +99 -0
- ins_pricing/pricing/rate_table.py +78 -0
- ins_pricing/production/__init__.py +21 -0
- ins_pricing/production/drift.py +30 -0
- ins_pricing/production/monitoring.py +143 -0
- ins_pricing/production/scoring.py +40 -0
- ins_pricing/reporting/README.md +20 -0
- ins_pricing/reporting/__init__.py +11 -0
- ins_pricing/reporting/report_builder.py +72 -0
- ins_pricing/reporting/scheduler.py +45 -0
- ins_pricing/setup.py +41 -0
- ins_pricing v2/__init__.py +23 -0
- ins_pricing v2/governance/__init__.py +20 -0
- ins_pricing v2/governance/approval.py +93 -0
- ins_pricing v2/governance/audit.py +37 -0
- ins_pricing v2/governance/registry.py +99 -0
- ins_pricing v2/governance/release.py +159 -0
- ins_pricing v2/modelling/Explain_Run.py +36 -0
- ins_pricing v2/modelling/Pricing_Run.py +36 -0
- ins_pricing v2/modelling/__init__.py +151 -0
- ins_pricing v2/modelling/cli_common.py +141 -0
- ins_pricing v2/modelling/config.py +249 -0
- ins_pricing v2/modelling/config_preprocess.py +254 -0
- ins_pricing v2/modelling/core.py +741 -0
- ins_pricing v2/modelling/data_container.py +42 -0
- ins_pricing v2/modelling/explain/__init__.py +55 -0
- ins_pricing v2/modelling/explain/gradients.py +334 -0
- ins_pricing v2/modelling/explain/metrics.py +176 -0
- ins_pricing v2/modelling/explain/permutation.py +155 -0
- ins_pricing v2/modelling/explain/shap_utils.py +146 -0
- ins_pricing v2/modelling/features.py +215 -0
- ins_pricing v2/modelling/model_manager.py +148 -0
- ins_pricing v2/modelling/model_plotting.py +463 -0
- ins_pricing v2/modelling/models.py +2203 -0
- ins_pricing v2/modelling/notebook_utils.py +294 -0
- ins_pricing v2/modelling/plotting/__init__.py +45 -0
- ins_pricing v2/modelling/plotting/common.py +63 -0
- ins_pricing v2/modelling/plotting/curves.py +572 -0
- ins_pricing v2/modelling/plotting/diagnostics.py +139 -0
- ins_pricing v2/modelling/plotting/geo.py +362 -0
- ins_pricing v2/modelling/plotting/importance.py +121 -0
- ins_pricing v2/modelling/run_logging.py +133 -0
- ins_pricing v2/modelling/tests/conftest.py +8 -0
- ins_pricing v2/modelling/tests/test_cross_val_generic.py +66 -0
- ins_pricing v2/modelling/tests/test_distributed_utils.py +18 -0
- ins_pricing v2/modelling/tests/test_explain.py +56 -0
- ins_pricing v2/modelling/tests/test_geo_tokens_split.py +49 -0
- ins_pricing v2/modelling/tests/test_graph_cache.py +33 -0
- ins_pricing v2/modelling/tests/test_plotting.py +63 -0
- ins_pricing v2/modelling/tests/test_plotting_library.py +150 -0
- ins_pricing v2/modelling/tests/test_preprocessor.py +48 -0
- ins_pricing v2/modelling/trainers.py +2447 -0
- ins_pricing v2/modelling/utils.py +1020 -0
- ins_pricing v2/modelling/watchdog_run.py +211 -0
- ins_pricing v2/pricing/__init__.py +27 -0
- ins_pricing v2/pricing/calibration.py +39 -0
- ins_pricing v2/pricing/data_quality.py +117 -0
- ins_pricing v2/pricing/exposure.py +85 -0
- ins_pricing v2/pricing/factors.py +91 -0
- ins_pricing v2/pricing/monitoring.py +99 -0
- ins_pricing v2/pricing/rate_table.py +78 -0
- ins_pricing v2/production/__init__.py +21 -0
- ins_pricing v2/production/drift.py +30 -0
- ins_pricing v2/production/monitoring.py +143 -0
- ins_pricing v2/production/scoring.py +40 -0
- ins_pricing v2/reporting/__init__.py +11 -0
- ins_pricing v2/reporting/report_builder.py +72 -0
- ins_pricing v2/reporting/scheduler.py +45 -0
- ins_pricing v2/scripts/BayesOpt_incremental.py +722 -0
- ins_pricing v2/scripts/Explain_entry.py +545 -0
- ins_pricing v2/scripts/__init__.py +1 -0
- ins_pricing v2/scripts/train.py +568 -0
- ins_pricing v2/setup.py +55 -0
- ins_pricing v2/smoke_test.py +28 -0
- ins_pricing-0.1.6.dist-info/METADATA +78 -0
- ins_pricing-0.1.6.dist-info/RECORD +169 -0
- ins_pricing-0.1.6.dist-info/WHEEL +5 -0
- ins_pricing-0.1.6.dist-info/top_level.txt +4 -0
- user_packages/__init__.py +105 -0
- user_packages legacy/BayesOpt.py +5659 -0
- user_packages legacy/BayesOpt_entry.py +513 -0
- user_packages legacy/BayesOpt_incremental.py +685 -0
- user_packages legacy/Pricing_Run.py +36 -0
- user_packages legacy/Try/BayesOpt Legacy251213.py +3719 -0
- user_packages legacy/Try/BayesOpt Legacy251215.py +3758 -0
- user_packages legacy/Try/BayesOpt lagecy251201.py +3506 -0
- user_packages legacy/Try/BayesOpt lagecy251218.py +3992 -0
- user_packages legacy/Try/BayesOpt legacy.py +3280 -0
- user_packages legacy/Try/BayesOpt.py +838 -0
- user_packages legacy/Try/BayesOptAll.py +1569 -0
- user_packages legacy/Try/BayesOptAllPlatform.py +909 -0
- user_packages legacy/Try/BayesOptCPUGPU.py +1877 -0
- user_packages legacy/Try/BayesOptSearch.py +830 -0
- user_packages legacy/Try/BayesOptSearchOrigin.py +829 -0
- user_packages legacy/Try/BayesOptV1.py +1911 -0
- user_packages legacy/Try/BayesOptV10.py +2973 -0
- user_packages legacy/Try/BayesOptV11.py +3001 -0
- user_packages legacy/Try/BayesOptV12.py +3001 -0
- user_packages legacy/Try/BayesOptV2.py +2065 -0
- user_packages legacy/Try/BayesOptV3.py +2209 -0
- user_packages legacy/Try/BayesOptV4.py +2342 -0
- user_packages legacy/Try/BayesOptV5.py +2372 -0
- user_packages legacy/Try/BayesOptV6.py +2759 -0
- user_packages legacy/Try/BayesOptV7.py +2832 -0
- user_packages legacy/Try/BayesOptV8Codex.py +2731 -0
- user_packages legacy/Try/BayesOptV8Gemini.py +2614 -0
- user_packages legacy/Try/BayesOptV9.py +2927 -0
- user_packages legacy/Try/BayesOpt_entry legacy.py +313 -0
- user_packages legacy/Try/ModelBayesOptSearch.py +359 -0
- user_packages legacy/Try/ResNetBayesOptSearch.py +249 -0
- user_packages legacy/Try/XgbBayesOptSearch.py +121 -0
- user_packages legacy/Try/xgbbayesopt.py +523 -0
- user_packages legacy/__init__.py +19 -0
- user_packages legacy/cli_common.py +124 -0
- user_packages legacy/notebook_utils.py +228 -0
- user_packages legacy/watchdog_run.py +202 -0
|
@@ -0,0 +1,3001 @@
|
|
|
1
|
+
# 数据在 CPU 和 GPU 之间传输成本较高,可通过多条 CUDA 流并行搬运与计算来支撑更大数据集。
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import gc
|
|
5
|
+
import math
|
|
6
|
+
import os
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
|
+
import csv
|
|
11
|
+
|
|
12
|
+
import joblib
|
|
13
|
+
import matplotlib.pyplot as plt
|
|
14
|
+
import numpy as np # 1.26.2
|
|
15
|
+
import optuna # 4.3.0
|
|
16
|
+
import pandas as pd # 2.2.3
|
|
17
|
+
import shap
|
|
18
|
+
import statsmodels.api as sm
|
|
19
|
+
|
|
20
|
+
import torch # 版本: 1.10.1+cu111
|
|
21
|
+
import torch.nn as nn
|
|
22
|
+
import torch.nn.functional as F
|
|
23
|
+
import xgboost as xgb # 1.7.0
|
|
24
|
+
|
|
25
|
+
from torch.utils.data import Dataset, DataLoader, TensorDataset, DistributedSampler
|
|
26
|
+
from torch.cuda.amp import autocast, GradScaler
|
|
27
|
+
from torch.nn.utils import clip_grad_norm_
|
|
28
|
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
29
|
+
import torch.distributed as dist
|
|
30
|
+
from sklearn.model_selection import ShuffleSplit, cross_val_score # 1.2.2
|
|
31
|
+
from sklearn.preprocessing import StandardScaler
|
|
32
|
+
from sklearn.metrics import log_loss, make_scorer, mean_tweedie_deviance
|
|
33
|
+
|
|
34
|
+
# 常量与工具模块
|
|
35
|
+
# =============================================================================
|
|
36
|
+
torch.backends.cudnn.benchmark = True
|
|
37
|
+
EPS = 1e-8
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class IOUtils:
|
|
41
|
+
# 文件与路径处理的小工具集合。
|
|
42
|
+
|
|
43
|
+
@staticmethod
|
|
44
|
+
def csv_to_dict(file_path: str) -> List[Dict[str, Any]]:
|
|
45
|
+
with open(file_path, mode='r', encoding='utf-8') as file:
|
|
46
|
+
reader = csv.DictReader(file)
|
|
47
|
+
return [
|
|
48
|
+
dict(filter(lambda item: item[0] != '', row.items()))
|
|
49
|
+
for row in reader
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
@staticmethod
|
|
53
|
+
def ensure_parent_dir(file_path: str) -> None:
|
|
54
|
+
# 若目标文件所在目录不存在则自动创建
|
|
55
|
+
directory = os.path.dirname(file_path)
|
|
56
|
+
if directory:
|
|
57
|
+
os.makedirs(directory, exist_ok=True)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TrainingUtils:
|
|
61
|
+
# 训练阶段常用的小型辅助函数集合。
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def compute_batch_size(data_size: int, learning_rate: float, batch_num: int, minimum: int) -> int:
|
|
65
|
+
estimated = int((learning_rate / 1e-4) ** 0.5 *
|
|
66
|
+
(data_size / max(batch_num, 1)))
|
|
67
|
+
return max(1, min(data_size, max(minimum, estimated)))
|
|
68
|
+
|
|
69
|
+
@staticmethod
|
|
70
|
+
def tweedie_loss(pred, target, p=1.5, eps=1e-6, max_clip=1e6):
|
|
71
|
+
# 为确保稳定性先将预测值裁剪为正数
|
|
72
|
+
pred_clamped = torch.clamp(pred, min=eps)
|
|
73
|
+
if p == 1:
|
|
74
|
+
term1 = target * torch.log(target / pred_clamped + eps) # 泊松
|
|
75
|
+
term2 = -target + pred_clamped
|
|
76
|
+
term3 = 0
|
|
77
|
+
elif p == 0:
|
|
78
|
+
term1 = 0.5 * torch.pow(target - pred_clamped, 2) # 高斯
|
|
79
|
+
term2 = 0
|
|
80
|
+
term3 = 0
|
|
81
|
+
elif p == 2:
|
|
82
|
+
term1 = torch.log(pred_clamped / target + eps) # 伽马
|
|
83
|
+
term2 = -target / pred_clamped + 1
|
|
84
|
+
term3 = 0
|
|
85
|
+
else:
|
|
86
|
+
term1 = torch.pow(target, 2 - p) / ((1 - p) * (2 - p))
|
|
87
|
+
term2 = target * torch.pow(pred_clamped, 1 - p) / (1 - p)
|
|
88
|
+
term3 = torch.pow(pred_clamped, 2 - p) / (2 - p)
|
|
89
|
+
return torch.nan_to_num( # Tweedie 负对数似然(忽略常数项)
|
|
90
|
+
2 * (term1 - term2 + term3),
|
|
91
|
+
nan=eps,
|
|
92
|
+
posinf=max_clip,
|
|
93
|
+
neginf=-max_clip
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
@staticmethod
|
|
97
|
+
def free_cuda() -> None:
|
|
98
|
+
print(">>> Moving all models to CPU...")
|
|
99
|
+
for obj in gc.get_objects():
|
|
100
|
+
try:
|
|
101
|
+
if hasattr(obj, "to") and callable(obj.to):
|
|
102
|
+
obj.to("cpu")
|
|
103
|
+
except Exception:
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
print(">>> Deleting tensors, optimizers, dataloaders...")
|
|
107
|
+
gc.collect()
|
|
108
|
+
|
|
109
|
+
print(">>> Emptying CUDA cache...")
|
|
110
|
+
torch.cuda.empty_cache()
|
|
111
|
+
torch.cuda.synchronize()
|
|
112
|
+
|
|
113
|
+
print(">>> CUDA memory freed.")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class DistributedUtils:
|
|
117
|
+
_cached_state: Optional[tuple] = None
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def setup_ddp():
|
|
121
|
+
"""Initialize DDP process group."""
|
|
122
|
+
if dist.is_initialized():
|
|
123
|
+
if DistributedUtils._cached_state is None:
|
|
124
|
+
rank = dist.get_rank()
|
|
125
|
+
world_size = dist.get_world_size()
|
|
126
|
+
local_rank = int(os.environ.get("LOCAL_RANK", 0))
|
|
127
|
+
DistributedUtils._cached_state = (
|
|
128
|
+
True,
|
|
129
|
+
local_rank,
|
|
130
|
+
rank,
|
|
131
|
+
world_size,
|
|
132
|
+
)
|
|
133
|
+
return DistributedUtils._cached_state
|
|
134
|
+
|
|
135
|
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
|
136
|
+
rank = int(os.environ["RANK"])
|
|
137
|
+
world_size = int(os.environ["WORLD_SIZE"])
|
|
138
|
+
local_rank = int(os.environ["LOCAL_RANK"])
|
|
139
|
+
|
|
140
|
+
if torch.cuda.is_available():
|
|
141
|
+
torch.cuda.set_device(local_rank)
|
|
142
|
+
|
|
143
|
+
dist.init_process_group(backend="nccl", init_method="env://")
|
|
144
|
+
print(
|
|
145
|
+
f">>> DDP Initialized: Rank {rank}/{world_size}, Local Rank {local_rank}")
|
|
146
|
+
DistributedUtils._cached_state = (
|
|
147
|
+
True,
|
|
148
|
+
local_rank,
|
|
149
|
+
rank,
|
|
150
|
+
world_size,
|
|
151
|
+
)
|
|
152
|
+
return DistributedUtils._cached_state
|
|
153
|
+
else:
|
|
154
|
+
print(
|
|
155
|
+
f">>> DDP Setup Failed: RANK or WORLD_SIZE not found in env. Keys found: {list(os.environ.keys())}")
|
|
156
|
+
return False, 0, 0, 1
|
|
157
|
+
|
|
158
|
+
@staticmethod
|
|
159
|
+
def cleanup_ddp():
|
|
160
|
+
"""Destroy DDP process group."""
|
|
161
|
+
if dist.is_initialized():
|
|
162
|
+
dist.destroy_process_group()
|
|
163
|
+
DistributedUtils._cached_state = None
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def is_main_process():
|
|
167
|
+
return not dist.is_initialized() or dist.get_rank() == 0
|
|
168
|
+
|
|
169
|
+
@staticmethod
|
|
170
|
+
def world_size() -> int:
|
|
171
|
+
return dist.get_world_size() if dist.is_initialized() else 1
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class PlotUtils:
|
|
175
|
+
# 多种模型共享的绘图辅助工具。
|
|
176
|
+
|
|
177
|
+
@staticmethod
|
|
178
|
+
def split_data(data: pd.DataFrame, col_nme: str, wgt_nme: str, n_bins: int = 10) -> pd.DataFrame:
|
|
179
|
+
data_sorted = data.sort_values(by=col_nme, ascending=True).copy()
|
|
180
|
+
data_sorted['cum_weight'] = data_sorted[wgt_nme].cumsum()
|
|
181
|
+
w_sum = data_sorted[wgt_nme].sum()
|
|
182
|
+
if w_sum <= EPS:
|
|
183
|
+
data_sorted.loc[:, 'bins'] = 0
|
|
184
|
+
else:
|
|
185
|
+
data_sorted.loc[:, 'bins'] = np.floor(
|
|
186
|
+
data_sorted['cum_weight'] * float(n_bins) / w_sum
|
|
187
|
+
)
|
|
188
|
+
data_sorted.loc[(data_sorted['bins'] == n_bins),
|
|
189
|
+
'bins'] = n_bins - 1
|
|
190
|
+
return data_sorted.groupby(['bins'], observed=True).sum(numeric_only=True)
|
|
191
|
+
|
|
192
|
+
@staticmethod
|
|
193
|
+
def plot_lift_ax(ax, plot_data, title, pred_label='Predicted', act_label='Actual', weight_label='Earned Exposure'):
|
|
194
|
+
ax.plot(plot_data.index, plot_data['act_v'],
|
|
195
|
+
label=act_label, color='red')
|
|
196
|
+
ax.plot(plot_data.index, plot_data['exp_v'],
|
|
197
|
+
label=pred_label, color='blue')
|
|
198
|
+
ax.set_title(title, fontsize=8)
|
|
199
|
+
ax.set_xticks(plot_data.index)
|
|
200
|
+
ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
|
|
201
|
+
ax.tick_params(axis='y', labelsize=6)
|
|
202
|
+
ax.legend(loc='upper left', fontsize=5, frameon=False)
|
|
203
|
+
ax.margins(0.05)
|
|
204
|
+
ax2 = ax.twinx()
|
|
205
|
+
ax2.bar(plot_data.index, plot_data['weight'],
|
|
206
|
+
alpha=0.5, color='seagreen',
|
|
207
|
+
label=weight_label)
|
|
208
|
+
ax2.tick_params(axis='y', labelsize=6)
|
|
209
|
+
ax2.legend(loc='upper right', fontsize=5, frameon=False)
|
|
210
|
+
|
|
211
|
+
@staticmethod
|
|
212
|
+
def plot_dlift_ax(ax, plot_data, title, label1, label2, act_label='Actual', weight_label='Earned Exposure'):
|
|
213
|
+
ax.plot(plot_data.index, plot_data['act_v'],
|
|
214
|
+
label=act_label, color='red')
|
|
215
|
+
ax.plot(plot_data.index, plot_data['exp_v1'],
|
|
216
|
+
label=label1, color='blue')
|
|
217
|
+
ax.plot(plot_data.index, plot_data['exp_v2'],
|
|
218
|
+
label=label2, color='black')
|
|
219
|
+
ax.set_title(title, fontsize=8)
|
|
220
|
+
ax.set_xticks(plot_data.index)
|
|
221
|
+
ax.set_xticklabels(plot_data.index, rotation=90, fontsize=6)
|
|
222
|
+
ax.set_xlabel(f'{label1} / {label2}', fontsize=6)
|
|
223
|
+
ax.tick_params(axis='y', labelsize=6)
|
|
224
|
+
ax.legend(loc='upper left', fontsize=5, frameon=False)
|
|
225
|
+
ax.margins(0.1)
|
|
226
|
+
ax2 = ax.twinx()
|
|
227
|
+
ax2.bar(plot_data.index, plot_data['weight'],
|
|
228
|
+
alpha=0.5, color='seagreen',
|
|
229
|
+
label=weight_label)
|
|
230
|
+
ax2.tick_params(axis='y', labelsize=6)
|
|
231
|
+
ax2.legend(loc='upper right', fontsize=5, frameon=False)
|
|
232
|
+
|
|
233
|
+
@staticmethod
|
|
234
|
+
def plot_lift_list(pred_model, w_pred_list, w_act_list,
|
|
235
|
+
weight_list, tgt_nme, n_bins: int = 10,
|
|
236
|
+
fig_nme: str = 'Lift Chart'):
|
|
237
|
+
lift_data = pd.DataFrame()
|
|
238
|
+
lift_data.loc[:, 'pred'] = pred_model
|
|
239
|
+
lift_data.loc[:, 'w_pred'] = w_pred_list
|
|
240
|
+
lift_data.loc[:, 'act'] = w_act_list
|
|
241
|
+
lift_data.loc[:, 'weight'] = weight_list
|
|
242
|
+
plot_data = PlotUtils.split_data(lift_data, 'pred', 'weight', n_bins)
|
|
243
|
+
plot_data['exp_v'] = plot_data['w_pred'] / plot_data['weight']
|
|
244
|
+
plot_data['act_v'] = plot_data['act'] / plot_data['weight']
|
|
245
|
+
plot_data.reset_index(inplace=True)
|
|
246
|
+
|
|
247
|
+
fig = plt.figure(figsize=(7, 5))
|
|
248
|
+
ax = fig.add_subplot(111)
|
|
249
|
+
PlotUtils.plot_lift_ax(ax, plot_data, f'Lift Chart of {tgt_nme}')
|
|
250
|
+
plt.subplots_adjust(wspace=0.3)
|
|
251
|
+
|
|
252
|
+
save_path = os.path.join(
|
|
253
|
+
os.getcwd(), 'plot', f'05_{tgt_nme}_{fig_nme}.png')
|
|
254
|
+
IOUtils.ensure_parent_dir(save_path)
|
|
255
|
+
plt.savefig(save_path, dpi=300)
|
|
256
|
+
plt.close(fig)
|
|
257
|
+
|
|
258
|
+
@staticmethod
|
|
259
|
+
def plot_dlift_list(pred_model_1, pred_model_2,
|
|
260
|
+
model_nme_1, model_nme_2,
|
|
261
|
+
tgt_nme,
|
|
262
|
+
w_list, w_act_list, n_bins: int = 10,
|
|
263
|
+
fig_nme: str = 'Double Lift Chart'):
|
|
264
|
+
lift_data = pd.DataFrame()
|
|
265
|
+
lift_data.loc[:, 'pred1'] = pred_model_1
|
|
266
|
+
lift_data.loc[:, 'pred2'] = pred_model_2
|
|
267
|
+
lift_data.loc[:, 'diff_ly'] = lift_data['pred1'] / lift_data['pred2']
|
|
268
|
+
lift_data.loc[:, 'act'] = w_act_list
|
|
269
|
+
lift_data.loc[:, 'weight'] = w_list
|
|
270
|
+
lift_data.loc[:, 'w_pred1'] = lift_data['pred1'] * lift_data['weight']
|
|
271
|
+
lift_data.loc[:, 'w_pred2'] = lift_data['pred2'] * lift_data['weight']
|
|
272
|
+
plot_data = PlotUtils.split_data(
|
|
273
|
+
lift_data, 'diff_ly', 'weight', n_bins)
|
|
274
|
+
plot_data['exp_v1'] = plot_data['w_pred1'] / plot_data['act']
|
|
275
|
+
plot_data['exp_v2'] = plot_data['w_pred2'] / plot_data['act']
|
|
276
|
+
plot_data['act_v'] = plot_data['act']/plot_data['act']
|
|
277
|
+
plot_data.reset_index(inplace=True)
|
|
278
|
+
|
|
279
|
+
fig = plt.figure(figsize=(7, 5))
|
|
280
|
+
ax = fig.add_subplot(111)
|
|
281
|
+
PlotUtils.plot_dlift_ax(
|
|
282
|
+
ax, plot_data, f'Double Lift Chart of {tgt_nme}', model_nme_1, model_nme_2)
|
|
283
|
+
plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8)
|
|
284
|
+
|
|
285
|
+
save_path = os.path.join(
|
|
286
|
+
os.getcwd(), 'plot', f'06_{tgt_nme}_{fig_nme}.png')
|
|
287
|
+
IOUtils.ensure_parent_dir(save_path)
|
|
288
|
+
plt.savefig(save_path, dpi=300)
|
|
289
|
+
plt.close(fig)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
# 向后兼容的函数式封装
|
|
293
|
+
def csv_to_dict(file_path: str) -> List[Dict[str, Any]]:
|
|
294
|
+
return IOUtils.csv_to_dict(file_path)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def ensure_parent_dir(file_path: str) -> None:
|
|
298
|
+
IOUtils.ensure_parent_dir(file_path)
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def compute_batch_size(data_size: int, learning_rate: float, batch_num: int, minimum: int) -> int:
|
|
302
|
+
return TrainingUtils.compute_batch_size(data_size, learning_rate, batch_num, minimum)
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
# 定义在 PyTorch 环境下的 Tweedie 偏差损失函数
|
|
306
|
+
# 参考文档:https://scikit-learn.org/stable/modules/model_evaluation.html#mean-poisson-gamma-and-tweedie-deviances
|
|
307
|
+
def tweedie_loss(pred, target, p=1.5, eps=1e-6, max_clip=1e6):
|
|
308
|
+
return TrainingUtils.tweedie_loss(pred, target, p=p, eps=eps, max_clip=max_clip)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
# 定义释放CUDA内存函数
|
|
312
|
+
def free_cuda():
|
|
313
|
+
TrainingUtils.free_cuda()
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
class TorchTrainerMixin:
|
|
317
|
+
# 面向 Torch 表格训练器的共享工具方法。
|
|
318
|
+
|
|
319
|
+
def _device_type(self) -> str:
|
|
320
|
+
return getattr(self, "device", torch.device("cpu")).type
|
|
321
|
+
|
|
322
|
+
def _build_dataloader(self,
|
|
323
|
+
dataset,
|
|
324
|
+
N: int,
|
|
325
|
+
base_bs_gpu: tuple,
|
|
326
|
+
base_bs_cpu: tuple,
|
|
327
|
+
min_bs: int = 64,
|
|
328
|
+
target_effective_cuda: int = 8192,
|
|
329
|
+
target_effective_cpu: int = 4096,
|
|
330
|
+
large_threshold: int = 200_000,
|
|
331
|
+
mid_threshold: int = 50_000):
|
|
332
|
+
batch_size = TrainingUtils.compute_batch_size(
|
|
333
|
+
data_size=len(dataset),
|
|
334
|
+
learning_rate=self.learning_rate,
|
|
335
|
+
batch_num=self.batch_num,
|
|
336
|
+
minimum=min_bs
|
|
337
|
+
)
|
|
338
|
+
gpu_large, gpu_mid, gpu_small = base_bs_gpu
|
|
339
|
+
cpu_mid, cpu_small = base_bs_cpu
|
|
340
|
+
|
|
341
|
+
if self._device_type() == 'cuda':
|
|
342
|
+
device_count = torch.cuda.device_count()
|
|
343
|
+
# 多卡环境下,适当增大最小批量,确保每张卡都能分到足够数据
|
|
344
|
+
if device_count > 1:
|
|
345
|
+
min_bs = min_bs * device_count
|
|
346
|
+
print(
|
|
347
|
+
f">>> Multi-GPU detected: {device_count} devices. Adjusted min_bs to {min_bs}.")
|
|
348
|
+
|
|
349
|
+
if N > large_threshold:
|
|
350
|
+
base_bs = gpu_large * device_count
|
|
351
|
+
elif N > mid_threshold:
|
|
352
|
+
base_bs = gpu_mid * device_count
|
|
353
|
+
else:
|
|
354
|
+
base_bs = gpu_small * device_count
|
|
355
|
+
else:
|
|
356
|
+
base_bs = cpu_mid if N > mid_threshold else cpu_small
|
|
357
|
+
|
|
358
|
+
# 重新计算 batch_size,确保不小于调整后的 min_bs
|
|
359
|
+
batch_size = TrainingUtils.compute_batch_size(
|
|
360
|
+
data_size=len(dataset),
|
|
361
|
+
learning_rate=self.learning_rate,
|
|
362
|
+
batch_num=self.batch_num,
|
|
363
|
+
minimum=min_bs
|
|
364
|
+
)
|
|
365
|
+
batch_size = min(batch_size, base_bs, N)
|
|
366
|
+
|
|
367
|
+
target_effective_bs = target_effective_cuda if self._device_type(
|
|
368
|
+
) == 'cuda' else target_effective_cpu
|
|
369
|
+
accum_steps = max(1, target_effective_bs // batch_size)
|
|
370
|
+
|
|
371
|
+
print(
|
|
372
|
+
f">>> DataLoader config: Batch Size={batch_size}, Accum Steps={accum_steps}, Workers={min(8, os.cpu_count() or 1)}")
|
|
373
|
+
|
|
374
|
+
# Linux (posix) 采用 fork 更高效;Windows (nt) 使用 spawn,开销更大。
|
|
375
|
+
if os.name == 'nt':
|
|
376
|
+
workers = 0
|
|
377
|
+
else:
|
|
378
|
+
workers = min(8, os.cpu_count() or 1)
|
|
379
|
+
|
|
380
|
+
sampler = None
|
|
381
|
+
if dist.is_initialized():
|
|
382
|
+
sampler = DistributedSampler(dataset, shuffle=True)
|
|
383
|
+
shuffle = False # Sampler handles shuffling
|
|
384
|
+
else:
|
|
385
|
+
shuffle = True
|
|
386
|
+
|
|
387
|
+
dataloader = DataLoader(
|
|
388
|
+
dataset,
|
|
389
|
+
batch_size=batch_size,
|
|
390
|
+
shuffle=shuffle,
|
|
391
|
+
sampler=sampler,
|
|
392
|
+
num_workers=workers,
|
|
393
|
+
pin_memory=(self._device_type() == 'cuda'),
|
|
394
|
+
persistent_workers=workers > 0,
|
|
395
|
+
)
|
|
396
|
+
return dataloader, accum_steps
|
|
397
|
+
|
|
398
|
+
def _compute_weighted_loss(self, y_pred, y_true, weights, apply_softplus: bool = False):
|
|
399
|
+
task = getattr(self, "task_type", "regression")
|
|
400
|
+
if task == 'classification':
|
|
401
|
+
loss_fn = nn.BCEWithLogitsLoss(reduction='none')
|
|
402
|
+
losses = loss_fn(y_pred, y_true).view(-1)
|
|
403
|
+
else:
|
|
404
|
+
if apply_softplus:
|
|
405
|
+
y_pred = F.softplus(y_pred)
|
|
406
|
+
y_pred = torch.clamp(y_pred, min=1e-6)
|
|
407
|
+
power = getattr(self, "tw_power", 1.5)
|
|
408
|
+
losses = tweedie_loss(y_pred, y_true, p=power).view(-1)
|
|
409
|
+
weighted_loss = (losses * weights.view(-1)).sum() / \
|
|
410
|
+
torch.clamp(weights.sum(), min=EPS)
|
|
411
|
+
return weighted_loss
|
|
412
|
+
|
|
413
|
+
def _early_stop_update(self, val_loss, best_loss, best_state, patience_counter, model):
|
|
414
|
+
if val_loss < best_loss:
|
|
415
|
+
return val_loss, copy.deepcopy(model.state_dict()), 0, False
|
|
416
|
+
patience_counter += 1
|
|
417
|
+
should_stop = best_state is not None and patience_counter >= getattr(
|
|
418
|
+
self, "patience", 0)
|
|
419
|
+
return best_loss, best_state, patience_counter, should_stop
|
|
420
|
+
|
|
421
|
+
def _train_model(self,
|
|
422
|
+
model,
|
|
423
|
+
dataloader,
|
|
424
|
+
accum_steps,
|
|
425
|
+
optimizer,
|
|
426
|
+
scaler,
|
|
427
|
+
forward_fn,
|
|
428
|
+
val_forward_fn=None,
|
|
429
|
+
apply_softplus: bool = False,
|
|
430
|
+
clip_fn=None,
|
|
431
|
+
trial: Optional[optuna.trial.Trial] = None):
|
|
432
|
+
device_type = self._device_type()
|
|
433
|
+
best_loss = float('inf')
|
|
434
|
+
best_state = None
|
|
435
|
+
patience_counter = 0
|
|
436
|
+
stop_training = False
|
|
437
|
+
|
|
438
|
+
for epoch in range(1, getattr(self, "epochs", 1) + 1):
|
|
439
|
+
if hasattr(self, 'dataloader_sampler') and self.dataloader_sampler is not None:
|
|
440
|
+
self.dataloader_sampler.set_epoch(epoch)
|
|
441
|
+
|
|
442
|
+
model.train()
|
|
443
|
+
optimizer.zero_grad()
|
|
444
|
+
|
|
445
|
+
for step, batch in enumerate(dataloader):
|
|
446
|
+
with autocast(enabled=(device_type == 'cuda')):
|
|
447
|
+
y_pred, y_true, w = forward_fn(batch)
|
|
448
|
+
weighted_loss = self._compute_weighted_loss(
|
|
449
|
+
y_pred, y_true, w, apply_softplus=apply_softplus)
|
|
450
|
+
loss_for_backward = weighted_loss / accum_steps
|
|
451
|
+
|
|
452
|
+
scaler.scale(loss_for_backward).backward()
|
|
453
|
+
|
|
454
|
+
if ((step + 1) % accum_steps == 0) or ((step + 1) == len(dataloader)):
|
|
455
|
+
if clip_fn is not None:
|
|
456
|
+
clip_fn()
|
|
457
|
+
scaler.step(optimizer)
|
|
458
|
+
scaler.update()
|
|
459
|
+
optimizer.zero_grad()
|
|
460
|
+
|
|
461
|
+
if val_forward_fn is not None:
|
|
462
|
+
model.eval()
|
|
463
|
+
with torch.no_grad(), autocast(enabled=(device_type == 'cuda')):
|
|
464
|
+
val_result = val_forward_fn()
|
|
465
|
+
if isinstance(val_result, tuple) and len(val_result) == 3:
|
|
466
|
+
y_val_pred, y_val_true, w_val = val_result
|
|
467
|
+
val_weighted_loss = self._compute_weighted_loss(
|
|
468
|
+
y_val_pred, y_val_true, w_val, apply_softplus=apply_softplus)
|
|
469
|
+
else:
|
|
470
|
+
val_weighted_loss = val_result
|
|
471
|
+
|
|
472
|
+
best_loss, best_state, patience_counter, stop_training = self._early_stop_update(
|
|
473
|
+
val_weighted_loss, best_loss, best_state, patience_counter, model)
|
|
474
|
+
|
|
475
|
+
# Optuna 剪枝:若评估值劣于历史表现则提前中止该 trial
|
|
476
|
+
if trial is not None:
|
|
477
|
+
trial.report(val_weighted_loss, epoch)
|
|
478
|
+
if trial.should_prune():
|
|
479
|
+
raise optuna.TrialPruned()
|
|
480
|
+
|
|
481
|
+
if stop_training:
|
|
482
|
+
break
|
|
483
|
+
|
|
484
|
+
return best_state
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
# =============================================================================
|
|
488
|
+
# 绘图辅助模块
|
|
489
|
+
# =============================================================================
|
|
490
|
+
|
|
491
|
+
def split_data(data, col_nme, wgt_nme, n_bins=10):
|
|
492
|
+
return PlotUtils.split_data(data, col_nme, wgt_nme, n_bins)
|
|
493
|
+
|
|
494
|
+
# 定义提纯曲线(Lift)绘制函数
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def plot_lift_list(pred_model, w_pred_list, w_act_list,
|
|
498
|
+
weight_list, tgt_nme, n_bins=10,
|
|
499
|
+
fig_nme='Lift Chart'):
|
|
500
|
+
return PlotUtils.plot_lift_list(pred_model, w_pred_list, w_act_list,
|
|
501
|
+
weight_list, tgt_nme, n_bins, fig_nme)
|
|
502
|
+
|
|
503
|
+
# 定义双提纯曲线绘制函数
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
def plot_dlift_list(pred_model_1, pred_model_2,
|
|
507
|
+
model_nme_1, model_nme_2,
|
|
508
|
+
tgt_nme,
|
|
509
|
+
w_list, w_act_list, n_bins=10,
|
|
510
|
+
fig_nme='Double Lift Chart'):
|
|
511
|
+
return PlotUtils.plot_dlift_list(pred_model_1, pred_model_2,
|
|
512
|
+
model_nme_1, model_nme_2,
|
|
513
|
+
tgt_nme, w_list, w_act_list,
|
|
514
|
+
n_bins, fig_nme)
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
# =============================================================================
|
|
518
|
+
# ResNet 模型与 sklearn 风格封装
|
|
519
|
+
# =============================================================================
|
|
520
|
+
|
|
521
|
+
# 开始定义ResNet模型结构
|
|
522
|
+
# 残差块:两层线性 + ReLU + 残差连接
|
|
523
|
+
# ResBlock 继承 nn.Module
|
|
524
|
+
class ResBlock(nn.Module):
|
|
525
|
+
def __init__(self, dim: int, dropout: float = 0.1,
|
|
526
|
+
use_layernorm: bool = False, residual_scale: float = 0.1
|
|
527
|
+
):
|
|
528
|
+
super().__init__()
|
|
529
|
+
self.use_layernorm = use_layernorm
|
|
530
|
+
|
|
531
|
+
if use_layernorm:
|
|
532
|
+
Norm = nn.LayerNorm # 对最后一维做归一化
|
|
533
|
+
else:
|
|
534
|
+
def Norm(d): return nn.BatchNorm1d(d) # 保留一个开关,想试 BN 时也能用
|
|
535
|
+
|
|
536
|
+
self.norm1 = Norm(dim)
|
|
537
|
+
self.fc1 = nn.Linear(dim, dim, bias=True)
|
|
538
|
+
self.act = nn.ReLU(inplace=True)
|
|
539
|
+
self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
|
|
540
|
+
# self.norm2 = Norm(dim)
|
|
541
|
+
self.fc2 = nn.Linear(dim, dim, bias=True)
|
|
542
|
+
|
|
543
|
+
# 残差缩放,防止一开始就把主干搞炸
|
|
544
|
+
self.res_scale = nn.Parameter(
|
|
545
|
+
torch.tensor(residual_scale, dtype=torch.float32)
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
def forward(self, x):
|
|
549
|
+
# 前置激活结构
|
|
550
|
+
out = self.norm1(x)
|
|
551
|
+
out = self.fc1(out)
|
|
552
|
+
out = self.act(out)
|
|
553
|
+
out = self.dropout(out)
|
|
554
|
+
# out = self.norm2(out)
|
|
555
|
+
out = self.fc2(out)
|
|
556
|
+
# 残差缩放再相加
|
|
557
|
+
return x + self.res_scale * out
|
|
558
|
+
|
|
559
|
+
# ResNetSequential 继承 nn.Module,定义整个网络结构
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
class ResNetSequential(nn.Module):
|
|
563
|
+
# 输入张量形状:(batch, input_dim)
|
|
564
|
+
# 网络结构:全连接 + 归一化 + ReLU,再堆叠若干残差块,最后输出 Softplus
|
|
565
|
+
|
|
566
|
+
def __init__(self, input_dim: int, hidden_dim: int = 64, block_num: int = 2,
|
|
567
|
+
use_layernorm: bool = True, dropout: float = 0.1,
|
|
568
|
+
residual_scale: float = 0.1, task_type: str = 'regression'):
|
|
569
|
+
super(ResNetSequential, self).__init__()
|
|
570
|
+
|
|
571
|
+
self.net = nn.Sequential()
|
|
572
|
+
self.net.add_module('fc1', nn.Linear(input_dim, hidden_dim))
|
|
573
|
+
|
|
574
|
+
# if use_layernorm:
|
|
575
|
+
# self.net.add_module('norm1', nn.LayerNorm(hidden_dim))
|
|
576
|
+
# else:
|
|
577
|
+
# self.net.add_module('norm1', nn.BatchNorm1d(hidden_dim))
|
|
578
|
+
|
|
579
|
+
# self.net.add_module('relu1', nn.ReLU(inplace=True))
|
|
580
|
+
|
|
581
|
+
# 多个残差块
|
|
582
|
+
for i in range(block_num):
|
|
583
|
+
self.net.add_module(
|
|
584
|
+
f'ResBlk_{i+1}',
|
|
585
|
+
ResBlock(
|
|
586
|
+
hidden_dim,
|
|
587
|
+
dropout=dropout,
|
|
588
|
+
use_layernorm=use_layernorm,
|
|
589
|
+
residual_scale=residual_scale)
|
|
590
|
+
)
|
|
591
|
+
|
|
592
|
+
self.net.add_module('fc_out', nn.Linear(hidden_dim, 1))
|
|
593
|
+
|
|
594
|
+
if task_type == 'classification':
|
|
595
|
+
self.net.add_module('softplus', nn.Identity())
|
|
596
|
+
else:
|
|
597
|
+
self.net.add_module('softplus', nn.Softplus())
|
|
598
|
+
|
|
599
|
+
def forward(self, x):
|
|
600
|
+
if self.training and not hasattr(self, '_printed_device'):
|
|
601
|
+
print(f">>> ResNetSequential executing on device: {x.device}")
|
|
602
|
+
self._printed_device = True
|
|
603
|
+
return self.net(x)
|
|
604
|
+
|
|
605
|
+
# 定义ResNet模型的Scikit-Learn接口类
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
class ResNetSklearn(TorchTrainerMixin, nn.Module):
|
|
609
|
+
def __init__(self, model_nme: str, input_dim: int, hidden_dim: int = 64,
|
|
610
|
+
block_num: int = 2, batch_num: int = 100, epochs: int = 100,
|
|
611
|
+
task_type: str = 'regression',
|
|
612
|
+
tweedie_power: float = 1.5, learning_rate: float = 0.01, patience: int = 10,
|
|
613
|
+
use_layernorm: bool = True, dropout: float = 0.1,
|
|
614
|
+
residual_scale: float = 0.1,
|
|
615
|
+
use_data_parallel: bool = True,
|
|
616
|
+
use_ddp: bool = False):
|
|
617
|
+
super(ResNetSklearn, self).__init__()
|
|
618
|
+
|
|
619
|
+
self.use_ddp = use_ddp
|
|
620
|
+
self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = (
|
|
621
|
+
False, 0, 0, 1)
|
|
622
|
+
|
|
623
|
+
if self.use_ddp:
|
|
624
|
+
self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = DistributedUtils.setup_ddp()
|
|
625
|
+
|
|
626
|
+
self.input_dim = input_dim
|
|
627
|
+
self.hidden_dim = hidden_dim
|
|
628
|
+
self.block_num = block_num
|
|
629
|
+
self.batch_num = batch_num
|
|
630
|
+
self.epochs = epochs
|
|
631
|
+
self.task_type = task_type
|
|
632
|
+
self.model_nme = model_nme
|
|
633
|
+
self.learning_rate = learning_rate
|
|
634
|
+
self.patience = patience
|
|
635
|
+
self.use_layernorm = use_layernorm
|
|
636
|
+
self.dropout = dropout
|
|
637
|
+
self.residual_scale = residual_scale
|
|
638
|
+
|
|
639
|
+
# 设备选择:cuda > mps > cpu
|
|
640
|
+
if self.is_ddp_enabled:
|
|
641
|
+
self.device = torch.device(f'cuda:{self.local_rank}')
|
|
642
|
+
elif torch.cuda.is_available():
|
|
643
|
+
self.device = torch.device('cuda')
|
|
644
|
+
elif torch.backends.mps.is_available():
|
|
645
|
+
self.device = torch.device('mps')
|
|
646
|
+
else:
|
|
647
|
+
self.device = torch.device('cpu')
|
|
648
|
+
|
|
649
|
+
# Tweedie 幂指数设定(分类时不使用)
|
|
650
|
+
if self.task_type == 'classification':
|
|
651
|
+
self.tw_power = None
|
|
652
|
+
elif 'f' in self.model_nme:
|
|
653
|
+
self.tw_power = 1
|
|
654
|
+
elif 's' in self.model_nme:
|
|
655
|
+
self.tw_power = 2
|
|
656
|
+
else:
|
|
657
|
+
self.tw_power = tweedie_power
|
|
658
|
+
|
|
659
|
+
# 搭建网络(先在 CPU 上建好)
|
|
660
|
+
core = ResNetSequential(
|
|
661
|
+
self.input_dim,
|
|
662
|
+
self.hidden_dim,
|
|
663
|
+
self.block_num,
|
|
664
|
+
use_layernorm=self.use_layernorm,
|
|
665
|
+
dropout=self.dropout,
|
|
666
|
+
residual_scale=self.residual_scale,
|
|
667
|
+
task_type=self.task_type
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
# ===== 多卡支持:DataParallel vs DistributedDataParallel =====
|
|
671
|
+
if self.is_ddp_enabled:
|
|
672
|
+
core = core.to(self.device)
|
|
673
|
+
core = DDP(core, device_ids=[
|
|
674
|
+
self.local_rank], output_device=self.local_rank)
|
|
675
|
+
elif use_data_parallel and (self.device.type == 'cuda') and (torch.cuda.device_count() > 1):
|
|
676
|
+
core = nn.DataParallel(core, device_ids=list(
|
|
677
|
+
range(torch.cuda.device_count())))
|
|
678
|
+
# DataParallel 会把输入 scatter 到多卡上,但“主设备”仍然是 cuda:0
|
|
679
|
+
self.device = torch.device('cuda')
|
|
680
|
+
|
|
681
|
+
self.resnet = core.to(self.device)
|
|
682
|
+
|
|
683
|
+
# ================ 内部工具 ================
|
|
684
|
+
def _build_train_val_tensors(self, X_train, y_train, w_train, X_val, y_val, w_val):
|
|
685
|
+
X_tensor = torch.tensor(X_train.values, dtype=torch.float32)
|
|
686
|
+
y_tensor = torch.tensor(
|
|
687
|
+
y_train.values, dtype=torch.float32).view(-1, 1)
|
|
688
|
+
w_tensor = torch.tensor(w_train.values, dtype=torch.float32).view(
|
|
689
|
+
-1, 1) if w_train is not None else torch.ones_like(y_tensor)
|
|
690
|
+
|
|
691
|
+
has_val = X_val is not None and y_val is not None
|
|
692
|
+
if has_val:
|
|
693
|
+
X_val_tensor = torch.tensor(X_val.values, dtype=torch.float32)
|
|
694
|
+
y_val_tensor = torch.tensor(
|
|
695
|
+
y_val.values, dtype=torch.float32).view(-1, 1)
|
|
696
|
+
w_val_tensor = torch.tensor(w_val.values, dtype=torch.float32).view(
|
|
697
|
+
-1, 1) if w_val is not None else torch.ones_like(y_val_tensor)
|
|
698
|
+
else:
|
|
699
|
+
X_val_tensor = y_val_tensor = w_val_tensor = None
|
|
700
|
+
return X_tensor, y_tensor, w_tensor, X_val_tensor, y_val_tensor, w_val_tensor, has_val
|
|
701
|
+
|
|
702
|
+
def forward(self, x):
|
|
703
|
+
# 处理 SHAP 的 NumPy 输入
|
|
704
|
+
if isinstance(x, np.ndarray):
|
|
705
|
+
x_tensor = torch.tensor(x, dtype=torch.float32)
|
|
706
|
+
else:
|
|
707
|
+
x_tensor = x
|
|
708
|
+
|
|
709
|
+
x_tensor = x_tensor.to(self.device)
|
|
710
|
+
y_pred = self.resnet(x_tensor)
|
|
711
|
+
return y_pred
|
|
712
|
+
|
|
713
|
+
# ---------------- 训练 ----------------
|
|
714
|
+
|
|
715
|
+
def fit(self, X_train, y_train, w_train=None,
|
|
716
|
+
X_val=None, y_val=None, w_val=None, trial=None):
|
|
717
|
+
|
|
718
|
+
X_tensor, y_tensor, w_tensor, X_val_tensor, y_val_tensor, w_val_tensor, has_val = \
|
|
719
|
+
self._build_train_val_tensors(
|
|
720
|
+
X_train, y_train, w_train, X_val, y_val, w_val)
|
|
721
|
+
|
|
722
|
+
dataset = TensorDataset(X_tensor, y_tensor, w_tensor)
|
|
723
|
+
dataloader, accum_steps = self._build_dataloader(
|
|
724
|
+
dataset,
|
|
725
|
+
N=X_tensor.shape[0],
|
|
726
|
+
base_bs_gpu=(16384, 8192, 4096),
|
|
727
|
+
base_bs_cpu=(1024, 512),
|
|
728
|
+
min_bs=64,
|
|
729
|
+
target_effective_cuda=8192,
|
|
730
|
+
target_effective_cpu=4096
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
# 在每个 epoch 开始前设置 sampler 的 epoch,以保证 shuffle 的随机性
|
|
734
|
+
if self.is_ddp_enabled and hasattr(dataloader.sampler, 'set_epoch'):
|
|
735
|
+
self.dataloader_sampler = dataloader.sampler
|
|
736
|
+
else:
|
|
737
|
+
self.dataloader_sampler = None
|
|
738
|
+
|
|
739
|
+
# === 4. 优化器与 AMP ===
|
|
740
|
+
self.optimizer = torch.optim.Adam(
|
|
741
|
+
self.resnet.parameters(), lr=self.learning_rate)
|
|
742
|
+
self.scaler = GradScaler(enabled=(self.device.type == 'cuda'))
|
|
743
|
+
|
|
744
|
+
X_val_dev = y_val_dev = w_val_dev = None
|
|
745
|
+
val_dataloader = None
|
|
746
|
+
if has_val:
|
|
747
|
+
# 构建验证集 DataLoader
|
|
748
|
+
val_dataset = TensorDataset(
|
|
749
|
+
X_val_tensor, y_val_tensor, w_val_tensor)
|
|
750
|
+
# 验证阶段无需反向传播,可适当放大批量以提高吞吐
|
|
751
|
+
val_bs = accum_steps * dataloader.batch_size
|
|
752
|
+
|
|
753
|
+
# 验证集的 worker 数沿用相同的分配逻辑
|
|
754
|
+
if os.name == 'nt':
|
|
755
|
+
val_workers = 0
|
|
756
|
+
else:
|
|
757
|
+
val_workers = min(4, os.cpu_count() or 1)
|
|
758
|
+
|
|
759
|
+
val_dataloader = DataLoader(
|
|
760
|
+
val_dataset,
|
|
761
|
+
batch_size=val_bs,
|
|
762
|
+
shuffle=False,
|
|
763
|
+
num_workers=val_workers,
|
|
764
|
+
pin_memory=(self.device.type == 'cuda'),
|
|
765
|
+
persistent_workers=val_workers > 0,
|
|
766
|
+
)
|
|
767
|
+
# 验证集通常不需要 DDP Sampler,因为我们只在主进程验证或汇总验证结果
|
|
768
|
+
# 但为了简单起见,这里保持单卡验证或主进程验证
|
|
769
|
+
|
|
770
|
+
is_data_parallel = isinstance(self.resnet, nn.DataParallel)
|
|
771
|
+
|
|
772
|
+
def forward_fn(batch):
|
|
773
|
+
X_batch, y_batch, w_batch = batch
|
|
774
|
+
|
|
775
|
+
if not is_data_parallel:
|
|
776
|
+
X_batch = X_batch.to(self.device, non_blocking=True)
|
|
777
|
+
# 目标值与权重始终与主设备保持一致,便于后续损失计算
|
|
778
|
+
y_batch = y_batch.to(self.device, non_blocking=True)
|
|
779
|
+
w_batch = w_batch.to(self.device, non_blocking=True)
|
|
780
|
+
|
|
781
|
+
y_pred = self.resnet(X_batch)
|
|
782
|
+
return y_pred, y_batch, w_batch
|
|
783
|
+
|
|
784
|
+
def val_forward_fn():
|
|
785
|
+
total_loss = 0.0
|
|
786
|
+
total_weight = 0.0
|
|
787
|
+
for batch in val_dataloader:
|
|
788
|
+
X_b, y_b, w_b = batch
|
|
789
|
+
if not is_data_parallel:
|
|
790
|
+
X_b = X_b.to(self.device, non_blocking=True)
|
|
791
|
+
y_b = y_b.to(self.device, non_blocking=True)
|
|
792
|
+
w_b = w_b.to(self.device, non_blocking=True)
|
|
793
|
+
|
|
794
|
+
y_pred = self.resnet(X_b)
|
|
795
|
+
|
|
796
|
+
# 手动计算当前批次的加权损失,以便后续精确加总
|
|
797
|
+
task = getattr(self, "task_type", "regression")
|
|
798
|
+
if task == 'classification':
|
|
799
|
+
loss_fn = nn.BCEWithLogitsLoss(reduction='none')
|
|
800
|
+
losses = loss_fn(y_pred, y_b).view(-1)
|
|
801
|
+
else:
|
|
802
|
+
# 此处无需再做 softplus:训练时 apply_softplus=False,模型前向结果本身已为正
|
|
803
|
+
y_pred_clamped = torch.clamp(y_pred, min=1e-6)
|
|
804
|
+
power = getattr(self, "tw_power", 1.5)
|
|
805
|
+
losses = tweedie_loss(
|
|
806
|
+
y_pred_clamped, y_b, p=power).view(-1)
|
|
807
|
+
|
|
808
|
+
batch_weight_sum = torch.clamp(w_b.sum(), min=EPS)
|
|
809
|
+
batch_weighted_loss_sum = (losses * w_b.view(-1)).sum()
|
|
810
|
+
|
|
811
|
+
total_loss += batch_weighted_loss_sum.item()
|
|
812
|
+
total_weight += batch_weight_sum.item()
|
|
813
|
+
|
|
814
|
+
return total_loss / max(total_weight, EPS)
|
|
815
|
+
|
|
816
|
+
clip_fn = None
|
|
817
|
+
if self.device.type == 'cuda':
|
|
818
|
+
def clip_fn(): return (self.scaler.unscale_(self.optimizer),
|
|
819
|
+
clip_grad_norm_(self.resnet.parameters(), max_norm=1.0))
|
|
820
|
+
|
|
821
|
+
# DDP 模式下,只在主进程打印日志和保存模型
|
|
822
|
+
if self.is_ddp_enabled and not DistributedUtils.is_main_process():
|
|
823
|
+
# 非主进程不进行验证回调中的打印操作(需在 _train_model 内部控制,这里暂略)
|
|
824
|
+
pass
|
|
825
|
+
|
|
826
|
+
best_state = self._train_model(
|
|
827
|
+
self.resnet,
|
|
828
|
+
dataloader,
|
|
829
|
+
accum_steps,
|
|
830
|
+
self.optimizer,
|
|
831
|
+
self.scaler,
|
|
832
|
+
forward_fn,
|
|
833
|
+
val_forward_fn if has_val else None,
|
|
834
|
+
apply_softplus=False,
|
|
835
|
+
clip_fn=clip_fn,
|
|
836
|
+
trial=trial
|
|
837
|
+
)
|
|
838
|
+
|
|
839
|
+
if has_val and best_state is not None:
|
|
840
|
+
self.resnet.load_state_dict(best_state)
|
|
841
|
+
|
|
842
|
+
# ---------------- 预测 ----------------
|
|
843
|
+
|
|
844
|
+
def predict(self, X_test):
|
|
845
|
+
self.resnet.eval()
|
|
846
|
+
if isinstance(X_test, pd.DataFrame):
|
|
847
|
+
X_np = X_test.values.astype(np.float32)
|
|
848
|
+
else:
|
|
849
|
+
X_np = X_test
|
|
850
|
+
|
|
851
|
+
with torch.no_grad():
|
|
852
|
+
y_pred = self(X_np).cpu().numpy()
|
|
853
|
+
|
|
854
|
+
if self.task_type == 'classification':
|
|
855
|
+
y_pred = 1 / (1 + np.exp(-y_pred)) # Sigmoid 函数将 logit 转换为概率
|
|
856
|
+
else:
|
|
857
|
+
y_pred = np.clip(y_pred, 1e-6, None)
|
|
858
|
+
return y_pred.flatten()
|
|
859
|
+
|
|
860
|
+
# ---------------- 设置参数 ----------------
|
|
861
|
+
|
|
862
|
+
def set_params(self, params):
|
|
863
|
+
for key, value in params.items():
|
|
864
|
+
if hasattr(self, key):
|
|
865
|
+
setattr(self, key, value)
|
|
866
|
+
else:
|
|
867
|
+
raise ValueError(f"Parameter {key} not found in model.")
|
|
868
|
+
return self
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
# =============================================================================
|
|
872
|
+
# FT-Transformer 模型与 sklearn 风格封装
|
|
873
|
+
# =============================================================================
|
|
874
|
+
# 开始定义FT Transformer模型结构
|
|
875
|
+
|
|
876
|
+
|
|
877
|
+
class FeatureTokenizer(nn.Module):
|
|
878
|
+
# 将数值特征与类别特征统一映射为 token,输出形状为 (batch, token_num, d_model)
|
|
879
|
+
# 约定:
|
|
880
|
+
# - X_num:表示数值特征,shape=(batch, num_numeric)
|
|
881
|
+
# - X_cat:表示类别特征,shape=(batch, num_categorical),每列是编码后的整数标签 [0, card-1]
|
|
882
|
+
|
|
883
|
+
def __init__(self, num_numeric: int, cat_cardinalities, d_model: int):
|
|
884
|
+
super().__init__()
|
|
885
|
+
|
|
886
|
+
self.num_numeric = num_numeric
|
|
887
|
+
self.has_numeric = num_numeric > 0
|
|
888
|
+
|
|
889
|
+
if self.has_numeric:
|
|
890
|
+
self.num_linear = nn.Linear(num_numeric, d_model)
|
|
891
|
+
|
|
892
|
+
self.embeddings = nn.ModuleList([
|
|
893
|
+
nn.Embedding(card, d_model) for card in cat_cardinalities
|
|
894
|
+
])
|
|
895
|
+
|
|
896
|
+
def forward(self, X_num, X_cat):
|
|
897
|
+
tokens = []
|
|
898
|
+
|
|
899
|
+
if self.has_numeric:
|
|
900
|
+
# 数值特征整体映射为一个 token
|
|
901
|
+
# shape = (batch, d_model)
|
|
902
|
+
num_token = self.num_linear(X_num)
|
|
903
|
+
tokens.append(num_token)
|
|
904
|
+
|
|
905
|
+
# 每个类别特征各生成一个嵌入 token
|
|
906
|
+
for i, emb in enumerate(self.embeddings):
|
|
907
|
+
# shape = (batch, d_model)
|
|
908
|
+
tok = emb(X_cat[:, i])
|
|
909
|
+
tokens.append(tok)
|
|
910
|
+
|
|
911
|
+
# 拼接后得到 (batch, token_num, d_model)
|
|
912
|
+
x = torch.stack(tokens, dim=1)
|
|
913
|
+
return x
|
|
914
|
+
|
|
915
|
+
# 定义具有残差缩放的Encoder层
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
class ScaledTransformerEncoderLayer(nn.Module):
|
|
919
|
+
def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048,
|
|
920
|
+
dropout: float = 0.1, residual_scale_attn: float = 1.0,
|
|
921
|
+
residual_scale_ffn: float = 1.0, norm_first: bool = True,
|
|
922
|
+
):
|
|
923
|
+
super().__init__()
|
|
924
|
+
self.self_attn = nn.MultiheadAttention(
|
|
925
|
+
embed_dim=d_model,
|
|
926
|
+
num_heads=nhead,
|
|
927
|
+
dropout=dropout,
|
|
928
|
+
batch_first=True
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
# 前馈网络部分
|
|
932
|
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
|
933
|
+
self.dropout = nn.Dropout(dropout)
|
|
934
|
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
|
935
|
+
|
|
936
|
+
# 归一化与 Dropout
|
|
937
|
+
self.norm1 = nn.LayerNorm(d_model)
|
|
938
|
+
self.norm2 = nn.LayerNorm(d_model)
|
|
939
|
+
self.dropout1 = nn.Dropout(dropout)
|
|
940
|
+
self.dropout2 = nn.Dropout(dropout)
|
|
941
|
+
|
|
942
|
+
self.activation = nn.GELU()
|
|
943
|
+
# self.activation = nn.ReLU()
|
|
944
|
+
self.norm_first = norm_first
|
|
945
|
+
|
|
946
|
+
# 残差缩放系数
|
|
947
|
+
self.res_scale_attn = residual_scale_attn
|
|
948
|
+
self.res_scale_ffn = residual_scale_ffn
|
|
949
|
+
|
|
950
|
+
def forward(self, src, src_mask=None, src_key_padding_mask=None):
|
|
951
|
+
# 输入张量形状:(batch, 序列长度, d_model)
|
|
952
|
+
x = src
|
|
953
|
+
|
|
954
|
+
if self.norm_first:
|
|
955
|
+
# 先归一化再做注意力
|
|
956
|
+
x = x + self._sa_block(self.norm1(x), src_mask,
|
|
957
|
+
src_key_padding_mask)
|
|
958
|
+
x = x + self._ff_block(self.norm2(x))
|
|
959
|
+
else:
|
|
960
|
+
# 后归一化(一般不启用)
|
|
961
|
+
x = self.norm1(
|
|
962
|
+
x + self._sa_block(x, src_mask, src_key_padding_mask))
|
|
963
|
+
x = self.norm2(x + self._ff_block(x))
|
|
964
|
+
|
|
965
|
+
return x
|
|
966
|
+
|
|
967
|
+
def _sa_block(self, x, attn_mask, key_padding_mask):
|
|
968
|
+
# 自注意力并附带残差缩放
|
|
969
|
+
attn_out, _ = self.self_attn(
|
|
970
|
+
x, x, x,
|
|
971
|
+
attn_mask=attn_mask,
|
|
972
|
+
key_padding_mask=key_padding_mask,
|
|
973
|
+
need_weights=False
|
|
974
|
+
)
|
|
975
|
+
return self.res_scale_attn * self.dropout1(attn_out)
|
|
976
|
+
|
|
977
|
+
def _ff_block(self, x):
|
|
978
|
+
# 前馈网络并附带残差缩放
|
|
979
|
+
x2 = self.linear2(self.dropout(self.activation(self.linear1(x))))
|
|
980
|
+
return self.res_scale_ffn * self.dropout2(x2)
|
|
981
|
+
|
|
982
|
+
# 定义FT-Transformer核心模型
|
|
983
|
+
|
|
984
|
+
|
|
985
|
+
class FTTransformerCore(nn.Module):
|
|
986
|
+
# 最小可用版本的 FT-Transformer,由三部分组成:
|
|
987
|
+
# 1) FeatureTokenizer:将数值/类别特征转换成 token;
|
|
988
|
+
# 2) TransformerEncoder:建模特征之间的交互;
|
|
989
|
+
# 3) 池化 + MLP + Softplus:输出正值,方便 Tweedie/Gamma 等任务。
|
|
990
|
+
|
|
991
|
+
def __init__(self, num_numeric: int, cat_cardinalities, d_model: int = 64,
|
|
992
|
+
n_heads: int = 8, n_layers: int = 4, dropout: float = 0.1,
|
|
993
|
+
task_type: str = 'regression'
|
|
994
|
+
):
|
|
995
|
+
super().__init__()
|
|
996
|
+
|
|
997
|
+
self.tokenizer = FeatureTokenizer(
|
|
998
|
+
num_numeric=num_numeric,
|
|
999
|
+
cat_cardinalities=cat_cardinalities,
|
|
1000
|
+
d_model=d_model
|
|
1001
|
+
)
|
|
1002
|
+
scale = 1.0 / math.sqrt(n_layers) # 推荐一个默认值
|
|
1003
|
+
encoder_layer = ScaledTransformerEncoderLayer(
|
|
1004
|
+
d_model=d_model,
|
|
1005
|
+
nhead=n_heads,
|
|
1006
|
+
dim_feedforward=d_model * 4,
|
|
1007
|
+
dropout=dropout,
|
|
1008
|
+
residual_scale_attn=scale,
|
|
1009
|
+
residual_scale_ffn=scale,
|
|
1010
|
+
norm_first=True,
|
|
1011
|
+
)
|
|
1012
|
+
self.encoder = nn.TransformerEncoder(
|
|
1013
|
+
encoder_layer,
|
|
1014
|
+
num_layers=n_layers
|
|
1015
|
+
)
|
|
1016
|
+
self.n_layers = n_layers
|
|
1017
|
+
|
|
1018
|
+
layers = [
|
|
1019
|
+
# nn.LayerNorm(d_model),
|
|
1020
|
+
# nn.Linear(d_model, d_model),
|
|
1021
|
+
# nn.GELU(),
|
|
1022
|
+
nn.Linear(d_model, 1),
|
|
1023
|
+
]
|
|
1024
|
+
|
|
1025
|
+
if task_type == 'classification':
|
|
1026
|
+
# 分类任务输出 logits,与 BCEWithLogitsLoss 更匹配
|
|
1027
|
+
layers.append(nn.Identity())
|
|
1028
|
+
else:
|
|
1029
|
+
# 回归任务需保持正值,适配 Tweedie/Gamma
|
|
1030
|
+
layers.append(nn.Softplus())
|
|
1031
|
+
|
|
1032
|
+
self.head = nn.Sequential(*layers)
|
|
1033
|
+
|
|
1034
|
+
def forward(self, X_num, X_cat):
|
|
1035
|
+
|
|
1036
|
+
# 输入:
|
|
1037
|
+
# X_num -> (batch, 数值特征数) 的 float32 张量
|
|
1038
|
+
# X_cat -> (batch, 类别特征数) 的 long 张量
|
|
1039
|
+
|
|
1040
|
+
if self.training and not hasattr(self, '_printed_device'):
|
|
1041
|
+
print(f">>> FTTransformerCore executing on device: {X_num.device}")
|
|
1042
|
+
self._printed_device = True
|
|
1043
|
+
|
|
1044
|
+
tokens = self.tokenizer(X_num, X_cat) # => (batch, token_num, d_model)
|
|
1045
|
+
x = self.encoder(tokens) # => (batch, token_num, d_model)
|
|
1046
|
+
|
|
1047
|
+
# 对 token 做平均池化,再送入回归头
|
|
1048
|
+
x = x.mean(dim=1) # => (batch, d_model)
|
|
1049
|
+
|
|
1050
|
+
out = self.head(x) # => (batch, 1),Softplus 约束为正
|
|
1051
|
+
return out
|
|
1052
|
+
|
|
1053
|
+
# 定义TabularDataset类
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
class TabularDataset(Dataset):
|
|
1057
|
+
def __init__(self, X_num, X_cat, y, w):
|
|
1058
|
+
|
|
1059
|
+
# 输入张量说明:
|
|
1060
|
+
# X_num: torch.float32,shape=(N, 数值特征数)
|
|
1061
|
+
# X_cat: torch.long, shape=(N, 类别特征数)
|
|
1062
|
+
# y: torch.float32,shape=(N, 1)
|
|
1063
|
+
# w: torch.float32,shape=(N, 1)
|
|
1064
|
+
|
|
1065
|
+
self.X_num = X_num
|
|
1066
|
+
self.X_cat = X_cat
|
|
1067
|
+
self.y = y
|
|
1068
|
+
self.w = w
|
|
1069
|
+
|
|
1070
|
+
def __len__(self):
|
|
1071
|
+
return self.y.shape[0]
|
|
1072
|
+
|
|
1073
|
+
def __getitem__(self, idx):
|
|
1074
|
+
return (
|
|
1075
|
+
self.X_num[idx],
|
|
1076
|
+
self.X_cat[idx],
|
|
1077
|
+
self.y[idx],
|
|
1078
|
+
self.w[idx],
|
|
1079
|
+
)
|
|
1080
|
+
|
|
1081
|
+
# 定义FTTransformer的Scikit-Learn接口类
|
|
1082
|
+
|
|
1083
|
+
|
|
1084
|
+
class FTTransformerSklearn(TorchTrainerMixin, nn.Module):
|
|
1085
|
+
|
|
1086
|
+
# sklearn 风格包装:
|
|
1087
|
+
# - num_cols:数值特征列名列表
|
|
1088
|
+
# - cat_cols:类别特征列名列表(需事先做标签编码,取值 ∈ [0, n_classes-1])
|
|
1089
|
+
|
|
1090
|
+
def __init__(self, model_nme: str, num_cols, cat_cols, d_model: int = 64, n_heads: int = 8,
|
|
1091
|
+
n_layers: int = 4, dropout: float = 0.1, batch_num: int = 100, epochs: int = 100,
|
|
1092
|
+
task_type: str = 'regression',
|
|
1093
|
+
tweedie_power: float = 1.5, learning_rate: float = 1e-3, patience: int = 10,
|
|
1094
|
+
use_data_parallel: bool = True,
|
|
1095
|
+
use_ddp: bool = False
|
|
1096
|
+
):
|
|
1097
|
+
super().__init__()
|
|
1098
|
+
|
|
1099
|
+
self.use_ddp = use_ddp
|
|
1100
|
+
self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = (
|
|
1101
|
+
False, 0, 0, 1)
|
|
1102
|
+
if self.use_ddp:
|
|
1103
|
+
self.is_ddp_enabled, self.local_rank, self.rank, self.world_size = DistributedUtils.setup_ddp()
|
|
1104
|
+
|
|
1105
|
+
self.model_nme = model_nme
|
|
1106
|
+
self.num_cols = list(num_cols)
|
|
1107
|
+
self.cat_cols = list(cat_cols)
|
|
1108
|
+
self.d_model = d_model
|
|
1109
|
+
self.n_heads = n_heads
|
|
1110
|
+
self.n_layers = n_layers
|
|
1111
|
+
self.dropout = dropout
|
|
1112
|
+
self.batch_num = batch_num
|
|
1113
|
+
self.epochs = epochs
|
|
1114
|
+
self.learning_rate = learning_rate
|
|
1115
|
+
self.task_type = task_type
|
|
1116
|
+
self.patience = patience
|
|
1117
|
+
if self.task_type == 'classification':
|
|
1118
|
+
self.tw_power = None # 分类时不使用 Tweedie 幂
|
|
1119
|
+
elif 'f' in self.model_nme:
|
|
1120
|
+
self.tw_power = 1.0
|
|
1121
|
+
elif 's' in self.model_nme:
|
|
1122
|
+
self.tw_power = 2.0
|
|
1123
|
+
else:
|
|
1124
|
+
self.tw_power = tweedie_power
|
|
1125
|
+
|
|
1126
|
+
if self.is_ddp_enabled:
|
|
1127
|
+
self.device = torch.device(f"cuda:{self.local_rank}")
|
|
1128
|
+
elif torch.cuda.is_available():
|
|
1129
|
+
self.device = torch.device("cuda")
|
|
1130
|
+
elif torch.backends.mps.is_available():
|
|
1131
|
+
self.device = torch.device("mps")
|
|
1132
|
+
else:
|
|
1133
|
+
self.device = torch.device("cpu")
|
|
1134
|
+
self.cat_cardinalities = None
|
|
1135
|
+
self.cat_categories = {}
|
|
1136
|
+
self.ft = None
|
|
1137
|
+
self.use_data_parallel = torch.cuda.device_count() > 1 and use_data_parallel
|
|
1138
|
+
|
|
1139
|
+
def _build_model(self, X_train):
|
|
1140
|
+
num_numeric = len(self.num_cols)
|
|
1141
|
+
cat_cardinalities = []
|
|
1142
|
+
|
|
1143
|
+
for col in self.cat_cols:
|
|
1144
|
+
cats = X_train[col].astype('category')
|
|
1145
|
+
categories = cats.cat.categories
|
|
1146
|
+
self.cat_categories[col] = categories # 保存训练集类别全集
|
|
1147
|
+
|
|
1148
|
+
card = len(categories) + 1 # 多预留 1 类给“未知/缺失”
|
|
1149
|
+
cat_cardinalities.append(card)
|
|
1150
|
+
|
|
1151
|
+
self.cat_cardinalities = cat_cardinalities
|
|
1152
|
+
|
|
1153
|
+
core = FTTransformerCore(
|
|
1154
|
+
num_numeric=num_numeric,
|
|
1155
|
+
cat_cardinalities=cat_cardinalities,
|
|
1156
|
+
d_model=self.d_model,
|
|
1157
|
+
n_heads=self.n_heads,
|
|
1158
|
+
n_layers=self.n_layers,
|
|
1159
|
+
dropout=self.dropout,
|
|
1160
|
+
task_type=self.task_type
|
|
1161
|
+
)
|
|
1162
|
+
if self.is_ddp_enabled:
|
|
1163
|
+
core = core.to(self.device)
|
|
1164
|
+
core = DDP(core, device_ids=[
|
|
1165
|
+
self.local_rank], output_device=self.local_rank)
|
|
1166
|
+
elif self.use_data_parallel:
|
|
1167
|
+
core = nn.DataParallel(core, device_ids=list(
|
|
1168
|
+
range(torch.cuda.device_count())))
|
|
1169
|
+
self.device = torch.device("cuda")
|
|
1170
|
+
self.ft = core.to(self.device)
|
|
1171
|
+
|
|
1172
|
+
def _encode_cats(self, X):
|
|
1173
|
+
# 输入 DataFrame 至少需要包含所有类别特征列
|
|
1174
|
+
# 返回形状 (N, 类别特征数) 的 int64 数组
|
|
1175
|
+
|
|
1176
|
+
if not self.cat_cols:
|
|
1177
|
+
return np.zeros((len(X), 0), dtype='int64')
|
|
1178
|
+
|
|
1179
|
+
X_cat_list = []
|
|
1180
|
+
for col in self.cat_cols:
|
|
1181
|
+
# 使用训练阶段记录的类别全集
|
|
1182
|
+
categories = self.cat_categories[col]
|
|
1183
|
+
# 按固定类别构造 Categorical
|
|
1184
|
+
cats = pd.Categorical(X[col], categories=categories)
|
|
1185
|
+
codes = cats.codes.astype('int64', copy=True) # -1 表示未知或缺失
|
|
1186
|
+
# 未知或缺失映射到额外的“未知”索引 len(categories)
|
|
1187
|
+
codes[codes < 0] = len(categories)
|
|
1188
|
+
X_cat_list.append(codes)
|
|
1189
|
+
|
|
1190
|
+
X_cat_np = np.stack(X_cat_list, axis=1) # 形状 (N, 类别特征数)
|
|
1191
|
+
return X_cat_np
|
|
1192
|
+
|
|
1193
|
+
def _build_train_tensors(self, X_train, y_train, w_train):
|
|
1194
|
+
return self._tensorize_split(X_train, y_train, w_train)
|
|
1195
|
+
|
|
1196
|
+
def _build_val_tensors(self, X_val, y_val, w_val):
|
|
1197
|
+
return self._tensorize_split(X_val, y_val, w_val, allow_none=True)
|
|
1198
|
+
|
|
1199
|
+
def _tensorize_split(self, X, y, w, allow_none: bool = False):
|
|
1200
|
+
if X is None:
|
|
1201
|
+
if allow_none:
|
|
1202
|
+
return None, None, None, None, False
|
|
1203
|
+
raise ValueError("输入特征 X 不能为空。")
|
|
1204
|
+
|
|
1205
|
+
X_num = torch.tensor(
|
|
1206
|
+
X[self.num_cols].to_numpy(dtype=np.float32, copy=True),
|
|
1207
|
+
dtype=torch.float32
|
|
1208
|
+
)
|
|
1209
|
+
if self.cat_cols:
|
|
1210
|
+
X_cat = torch.tensor(self._encode_cats(X), dtype=torch.long)
|
|
1211
|
+
else:
|
|
1212
|
+
X_cat = torch.zeros((X_num.shape[0], 0), dtype=torch.long)
|
|
1213
|
+
|
|
1214
|
+
y_tensor = torch.tensor(
|
|
1215
|
+
y.values, dtype=torch.float32).view(-1, 1) if y is not None else None
|
|
1216
|
+
if y_tensor is None:
|
|
1217
|
+
w_tensor = None
|
|
1218
|
+
elif w is not None:
|
|
1219
|
+
w_tensor = torch.tensor(
|
|
1220
|
+
w.values, dtype=torch.float32).view(-1, 1)
|
|
1221
|
+
else:
|
|
1222
|
+
w_tensor = torch.ones_like(y_tensor)
|
|
1223
|
+
return X_num, X_cat, y_tensor, w_tensor, y is not None
|
|
1224
|
+
|
|
1225
|
+
def fit(self, X_train, y_train, w_train=None,
|
|
1226
|
+
X_val=None, y_val=None, w_val=None, trial=None):
|
|
1227
|
+
|
|
1228
|
+
# 首次拟合时需要构建底层模型结构
|
|
1229
|
+
if self.ft is None:
|
|
1230
|
+
self._build_model(X_train)
|
|
1231
|
+
|
|
1232
|
+
X_num_train, X_cat_train, y_tensor, w_tensor, _ = self._build_train_tensors(
|
|
1233
|
+
X_train, y_train, w_train)
|
|
1234
|
+
X_num_val, X_cat_val, y_val_tensor, w_val_tensor, has_val = self._build_val_tensors(
|
|
1235
|
+
X_val, y_val, w_val)
|
|
1236
|
+
|
|
1237
|
+
# --- 构建 DataLoader ---
|
|
1238
|
+
dataset = TabularDataset(
|
|
1239
|
+
X_num_train, X_cat_train, y_tensor, w_tensor
|
|
1240
|
+
)
|
|
1241
|
+
|
|
1242
|
+
dataloader, accum_steps = self._build_dataloader(
|
|
1243
|
+
dataset,
|
|
1244
|
+
N=X_num_train.shape[0],
|
|
1245
|
+
base_bs_gpu=(16384, 8192, 4096),
|
|
1246
|
+
base_bs_cpu=(256, 128),
|
|
1247
|
+
min_bs=64,
|
|
1248
|
+
target_effective_cuda=4096,
|
|
1249
|
+
target_effective_cpu=2048
|
|
1250
|
+
)
|
|
1251
|
+
|
|
1252
|
+
if self.is_ddp_enabled and hasattr(dataloader.sampler, 'set_epoch'):
|
|
1253
|
+
self.dataloader_sampler = dataloader.sampler
|
|
1254
|
+
else:
|
|
1255
|
+
self.dataloader_sampler = None
|
|
1256
|
+
|
|
1257
|
+
optimizer = torch.optim.Adam(
|
|
1258
|
+
self.ft.parameters(), lr=self.learning_rate)
|
|
1259
|
+
scaler = GradScaler(enabled=(self.device.type == 'cuda'))
|
|
1260
|
+
|
|
1261
|
+
X_num_val_dev = X_cat_val_dev = y_val_dev = w_val_dev = None
|
|
1262
|
+
val_dataloader = None
|
|
1263
|
+
if has_val:
|
|
1264
|
+
val_dataset = TabularDataset(
|
|
1265
|
+
X_num_val, X_cat_val, y_val_tensor, w_val_tensor
|
|
1266
|
+
)
|
|
1267
|
+
val_bs = accum_steps * dataloader.batch_size
|
|
1268
|
+
|
|
1269
|
+
if os.name == 'nt':
|
|
1270
|
+
val_workers = 0
|
|
1271
|
+
else:
|
|
1272
|
+
val_workers = min(4, os.cpu_count() or 1)
|
|
1273
|
+
|
|
1274
|
+
val_dataloader = DataLoader(
|
|
1275
|
+
val_dataset,
|
|
1276
|
+
batch_size=val_bs,
|
|
1277
|
+
shuffle=False,
|
|
1278
|
+
num_workers=val_workers,
|
|
1279
|
+
pin_memory=(self.device.type == 'cuda'),
|
|
1280
|
+
persistent_workers=val_workers > 0,
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
is_data_parallel = isinstance(self.ft, nn.DataParallel)
|
|
1284
|
+
|
|
1285
|
+
def forward_fn(batch):
|
|
1286
|
+
X_num_b, X_cat_b, y_b, w_b = batch
|
|
1287
|
+
|
|
1288
|
+
if not is_data_parallel:
|
|
1289
|
+
X_num_b = X_num_b.to(self.device, non_blocking=True)
|
|
1290
|
+
X_cat_b = X_cat_b.to(self.device, non_blocking=True)
|
|
1291
|
+
y_b = y_b.to(self.device, non_blocking=True)
|
|
1292
|
+
w_b = w_b.to(self.device, non_blocking=True)
|
|
1293
|
+
|
|
1294
|
+
y_pred = self.ft(X_num_b, X_cat_b)
|
|
1295
|
+
return y_pred, y_b, w_b
|
|
1296
|
+
|
|
1297
|
+
def val_forward_fn():
|
|
1298
|
+
total_loss = 0.0
|
|
1299
|
+
total_weight = 0.0
|
|
1300
|
+
for batch in val_dataloader:
|
|
1301
|
+
X_num_b, X_cat_b, y_b, w_b = batch
|
|
1302
|
+
if not is_data_parallel:
|
|
1303
|
+
X_num_b = X_num_b.to(self.device, non_blocking=True)
|
|
1304
|
+
X_cat_b = X_cat_b.to(self.device, non_blocking=True)
|
|
1305
|
+
y_b = y_b.to(self.device, non_blocking=True)
|
|
1306
|
+
w_b = w_b.to(self.device, non_blocking=True)
|
|
1307
|
+
|
|
1308
|
+
y_pred = self.ft(X_num_b, X_cat_b)
|
|
1309
|
+
|
|
1310
|
+
# 手动计算验证损失
|
|
1311
|
+
task = getattr(self, "task_type", "regression")
|
|
1312
|
+
if task == 'classification':
|
|
1313
|
+
loss_fn = nn.BCEWithLogitsLoss(reduction='none')
|
|
1314
|
+
losses = loss_fn(y_pred, y_b).view(-1)
|
|
1315
|
+
else:
|
|
1316
|
+
# 模型输出已通过 Softplus,无需再次应用
|
|
1317
|
+
y_pred_clamped = torch.clamp(y_pred, min=1e-6)
|
|
1318
|
+
power = getattr(self, "tw_power", 1.5)
|
|
1319
|
+
losses = tweedie_loss(
|
|
1320
|
+
y_pred_clamped, y_b, p=power).view(-1)
|
|
1321
|
+
|
|
1322
|
+
batch_weight_sum = torch.clamp(w_b.sum(), min=EPS)
|
|
1323
|
+
batch_weighted_loss_sum = (losses * w_b.view(-1)).sum()
|
|
1324
|
+
|
|
1325
|
+
total_loss += batch_weighted_loss_sum.item()
|
|
1326
|
+
total_weight += batch_weight_sum.item()
|
|
1327
|
+
|
|
1328
|
+
return total_loss / max(total_weight, EPS)
|
|
1329
|
+
|
|
1330
|
+
clip_fn = None
|
|
1331
|
+
if self.device.type == 'cuda':
|
|
1332
|
+
def clip_fn(): return (scaler.unscale_(optimizer),
|
|
1333
|
+
clip_grad_norm_(self.ft.parameters(), max_norm=1.0))
|
|
1334
|
+
|
|
1335
|
+
best_state = self._train_model(
|
|
1336
|
+
self.ft,
|
|
1337
|
+
dataloader,
|
|
1338
|
+
accum_steps,
|
|
1339
|
+
optimizer,
|
|
1340
|
+
scaler,
|
|
1341
|
+
forward_fn,
|
|
1342
|
+
val_forward_fn if has_val else None,
|
|
1343
|
+
apply_softplus=False,
|
|
1344
|
+
clip_fn=clip_fn,
|
|
1345
|
+
trial=trial
|
|
1346
|
+
)
|
|
1347
|
+
|
|
1348
|
+
if has_val and best_state is not None:
|
|
1349
|
+
self.ft.load_state_dict(best_state)
|
|
1350
|
+
|
|
1351
|
+
def predict(self, X_test):
|
|
1352
|
+
# X_test 需要包含所有数值列与类别列
|
|
1353
|
+
|
|
1354
|
+
self.ft.eval()
|
|
1355
|
+
X_num, X_cat, _, _, _ = self._tensorize_split(
|
|
1356
|
+
X_test, None, None, allow_none=True)
|
|
1357
|
+
|
|
1358
|
+
with torch.no_grad():
|
|
1359
|
+
X_num = X_num.to(self.device, non_blocking=True)
|
|
1360
|
+
X_cat = X_cat.to(self.device, non_blocking=True)
|
|
1361
|
+
y_pred = self.ft(X_num, X_cat).cpu().numpy()
|
|
1362
|
+
|
|
1363
|
+
if self.task_type == 'classification':
|
|
1364
|
+
# 从 logits 转换为概率
|
|
1365
|
+
y_pred = 1 / (1 + np.exp(-y_pred))
|
|
1366
|
+
else:
|
|
1367
|
+
# 模型已含 softplus,若需要可按需做 log-exp 平滑:y_pred = log(1 + exp(y_pred))
|
|
1368
|
+
y_pred = np.clip(y_pred, 1e-6, None)
|
|
1369
|
+
return y_pred.ravel()
|
|
1370
|
+
|
|
1371
|
+
def set_params(self, params: dict):
|
|
1372
|
+
|
|
1373
|
+
# 和 sklearn 风格保持一致。
|
|
1374
|
+
# 注意:对结构性参数(如 d_model/n_heads)修改后,需要重新 fit 才会生效。
|
|
1375
|
+
|
|
1376
|
+
for key, value in params.items():
|
|
1377
|
+
if hasattr(self, key):
|
|
1378
|
+
setattr(self, key, value)
|
|
1379
|
+
else:
|
|
1380
|
+
raise ValueError(f"Parameter {key} not found in model.")
|
|
1381
|
+
return self
|
|
1382
|
+
|
|
1383
|
+
|
|
1384
|
+
# ===== 基础组件与训练封装 =====================================================
|
|
1385
|
+
|
|
1386
|
+
# =============================================================================
|
|
1387
|
+
# 配置、预处理与训练器基类
|
|
1388
|
+
# =============================================================================
|
|
1389
|
+
@dataclass
|
|
1390
|
+
class BayesOptConfig:
|
|
1391
|
+
model_nme: str
|
|
1392
|
+
resp_nme: str
|
|
1393
|
+
weight_nme: str
|
|
1394
|
+
factor_nmes: List[str]
|
|
1395
|
+
task_type: str = 'regression'
|
|
1396
|
+
binary_resp_nme: Optional[str] = None
|
|
1397
|
+
cate_list: Optional[List[str]] = None
|
|
1398
|
+
prop_test: float = 0.25
|
|
1399
|
+
rand_seed: Optional[int] = None
|
|
1400
|
+
epochs: int = 100
|
|
1401
|
+
use_gpu: bool = True
|
|
1402
|
+
use_resn_data_parallel: bool = False
|
|
1403
|
+
use_ft_data_parallel: bool = False
|
|
1404
|
+
use_resn_ddp: bool = False
|
|
1405
|
+
use_ft_ddp: bool = False
|
|
1406
|
+
optuna_storage: Optional[str] = None
|
|
1407
|
+
optuna_study_prefix: Optional[str] = None
|
|
1408
|
+
|
|
1409
|
+
|
|
1410
|
+
class OutputManager:
|
|
1411
|
+
# 统一管理结果、图表与模型的输出路径
|
|
1412
|
+
|
|
1413
|
+
def __init__(self, root: Optional[str] = None, model_name: str = "model") -> None:
|
|
1414
|
+
self.root = Path(root or os.getcwd())
|
|
1415
|
+
self.model_name = model_name
|
|
1416
|
+
self.plot_dir = self.root / 'plot'
|
|
1417
|
+
self.result_dir = self.root / 'Results'
|
|
1418
|
+
self.model_dir = self.root / 'model'
|
|
1419
|
+
|
|
1420
|
+
def _prepare(self, path: Path) -> str:
|
|
1421
|
+
ensure_parent_dir(str(path))
|
|
1422
|
+
return str(path)
|
|
1423
|
+
|
|
1424
|
+
def plot_path(self, filename: str) -> str:
|
|
1425
|
+
return self._prepare(self.plot_dir / filename)
|
|
1426
|
+
|
|
1427
|
+
def result_path(self, filename: str) -> str:
|
|
1428
|
+
return self._prepare(self.result_dir / filename)
|
|
1429
|
+
|
|
1430
|
+
def model_path(self, filename: str) -> str:
|
|
1431
|
+
return self._prepare(self.model_dir / filename)
|
|
1432
|
+
|
|
1433
|
+
|
|
1434
|
+
class DatasetPreprocessor:
|
|
1435
|
+
# 为各训练器准备通用的训练/测试数据视图
|
|
1436
|
+
|
|
1437
|
+
def __init__(self, train_df: pd.DataFrame, test_df: pd.DataFrame,
|
|
1438
|
+
config: BayesOptConfig) -> None:
|
|
1439
|
+
self.config = config
|
|
1440
|
+
self.train_data = train_df.copy(deep=True)
|
|
1441
|
+
self.test_data = test_df.copy(deep=True)
|
|
1442
|
+
self.num_features: List[str] = []
|
|
1443
|
+
self.train_oht_scl_data: Optional[pd.DataFrame] = None
|
|
1444
|
+
self.test_oht_scl_data: Optional[pd.DataFrame] = None
|
|
1445
|
+
self.var_nmes: List[str] = []
|
|
1446
|
+
self.cat_categories_for_shap: Dict[str, List[Any]] = {}
|
|
1447
|
+
|
|
1448
|
+
def run(self) -> "DatasetPreprocessor":
|
|
1449
|
+
cfg = self.config
|
|
1450
|
+
# 预先计算加权实际值,后续画图、校验都依赖该字段
|
|
1451
|
+
self.train_data.loc[:, 'w_act'] = self.train_data[cfg.resp_nme] * \
|
|
1452
|
+
self.train_data[cfg.weight_nme]
|
|
1453
|
+
self.test_data.loc[:, 'w_act'] = self.test_data[cfg.resp_nme] * \
|
|
1454
|
+
self.test_data[cfg.weight_nme]
|
|
1455
|
+
if cfg.binary_resp_nme:
|
|
1456
|
+
self.train_data.loc[:, 'w_binary_act'] = self.train_data[cfg.binary_resp_nme] * \
|
|
1457
|
+
self.train_data[cfg.weight_nme]
|
|
1458
|
+
self.test_data.loc[:, 'w_binary_act'] = self.test_data[cfg.binary_resp_nme] * \
|
|
1459
|
+
self.test_data[cfg.weight_nme]
|
|
1460
|
+
# 高分位裁剪用来吸收离群值;若删除会导致极端点主导损失
|
|
1461
|
+
q99 = self.train_data[cfg.resp_nme].quantile(0.999)
|
|
1462
|
+
self.train_data[cfg.resp_nme] = self.train_data[cfg.resp_nme].clip(
|
|
1463
|
+
upper=q99)
|
|
1464
|
+
cate_list = list(cfg.cate_list or [])
|
|
1465
|
+
if cate_list:
|
|
1466
|
+
for cate in cate_list:
|
|
1467
|
+
self.train_data[cate] = self.train_data[cate].astype(
|
|
1468
|
+
'category')
|
|
1469
|
+
self.test_data[cate] = self.test_data[cate].astype('category')
|
|
1470
|
+
cats = self.train_data[cate].cat.categories
|
|
1471
|
+
self.cat_categories_for_shap[cate] = list(cats)
|
|
1472
|
+
self.num_features = [
|
|
1473
|
+
nme for nme in cfg.factor_nmes if nme not in cate_list]
|
|
1474
|
+
train_oht = self.train_data[cfg.factor_nmes +
|
|
1475
|
+
[cfg.weight_nme] + [cfg.resp_nme]].copy()
|
|
1476
|
+
test_oht = self.test_data[cfg.factor_nmes +
|
|
1477
|
+
[cfg.weight_nme] + [cfg.resp_nme]].copy()
|
|
1478
|
+
train_oht = pd.get_dummies(
|
|
1479
|
+
train_oht,
|
|
1480
|
+
columns=cate_list,
|
|
1481
|
+
drop_first=True,
|
|
1482
|
+
dtype=np.int8
|
|
1483
|
+
)
|
|
1484
|
+
test_oht = pd.get_dummies(
|
|
1485
|
+
test_oht,
|
|
1486
|
+
columns=cate_list,
|
|
1487
|
+
drop_first=True,
|
|
1488
|
+
dtype=np.int8
|
|
1489
|
+
)
|
|
1490
|
+
for num_chr in self.num_features:
|
|
1491
|
+
# 逐列标准化保障每个特征在同一量级,否则神经网络会难以收敛
|
|
1492
|
+
scaler = StandardScaler()
|
|
1493
|
+
train_oht[num_chr] = scaler.fit_transform(
|
|
1494
|
+
train_oht[num_chr].values.reshape(-1, 1))
|
|
1495
|
+
test_oht[num_chr] = scaler.transform(
|
|
1496
|
+
test_oht[num_chr].values.reshape(-1, 1))
|
|
1497
|
+
# reindex 时将缺失的哑变量列补零,避免测试集列数与训练集不一致
|
|
1498
|
+
test_oht = test_oht.reindex(columns=train_oht.columns, fill_value=0)
|
|
1499
|
+
self.train_oht_scl_data = train_oht
|
|
1500
|
+
self.test_oht_scl_data = test_oht
|
|
1501
|
+
self.var_nmes = list(
|
|
1502
|
+
set(list(train_oht.columns)) - set([cfg.weight_nme, cfg.resp_nme])
|
|
1503
|
+
)
|
|
1504
|
+
return self
|
|
1505
|
+
|
|
1506
|
+
# =============================================================================
|
|
1507
|
+
# 训练器体系
|
|
1508
|
+
# =============================================================================
|
|
1509
|
+
|
|
1510
|
+
|
|
1511
|
+
class TrainerBase:
|
|
1512
|
+
def __init__(self, context: "BayesOptModel", label: str, model_name_prefix: str) -> None:
|
|
1513
|
+
self.ctx = context
|
|
1514
|
+
self.label = label
|
|
1515
|
+
self.model_name_prefix = model_name_prefix
|
|
1516
|
+
self.model = None
|
|
1517
|
+
self.best_params: Optional[Dict[str, Any]] = None
|
|
1518
|
+
self.best_trial = None
|
|
1519
|
+
|
|
1520
|
+
@property
|
|
1521
|
+
def config(self) -> BayesOptConfig:
|
|
1522
|
+
return self.ctx.config
|
|
1523
|
+
|
|
1524
|
+
@property
|
|
1525
|
+
def output(self) -> OutputManager:
|
|
1526
|
+
return self.ctx.output_manager
|
|
1527
|
+
|
|
1528
|
+
def _get_model_filename(self) -> str:
|
|
1529
|
+
ext = 'pkl' if self.label in ['Xgboost', 'GLM'] else 'pth'
|
|
1530
|
+
return f'01_{self.ctx.model_nme}_{self.model_name_prefix}.{ext}'
|
|
1531
|
+
|
|
1532
|
+
def tune(self, max_evals: int, objective_fn=None) -> None:
|
|
1533
|
+
# 通用的 Optuna 调参循环流程。
|
|
1534
|
+
if objective_fn is None:
|
|
1535
|
+
# 若子类未显式提供 objective_fn,则默认使用 cross_val 作为优化目标
|
|
1536
|
+
objective_fn = self.cross_val
|
|
1537
|
+
|
|
1538
|
+
total_trials = max(1, int(max_evals))
|
|
1539
|
+
progress_counter = {"count": 0}
|
|
1540
|
+
|
|
1541
|
+
def objective_wrapper(trial: optuna.trial.Trial) -> float:
|
|
1542
|
+
should_log = DistributedUtils.is_main_process()
|
|
1543
|
+
if should_log:
|
|
1544
|
+
current_idx = progress_counter["count"] + 1
|
|
1545
|
+
print(
|
|
1546
|
+
f"[Optuna][{self.label}] Trial {current_idx}/{total_trials} started "
|
|
1547
|
+
f"(trial_id={trial.number})."
|
|
1548
|
+
)
|
|
1549
|
+
try:
|
|
1550
|
+
result = objective_fn(trial)
|
|
1551
|
+
except RuntimeError as exc:
|
|
1552
|
+
if "out of memory" in str(exc).lower():
|
|
1553
|
+
print(
|
|
1554
|
+
f"[Optuna][{self.label}] OOM detected. Pruning trial and clearing CUDA cache."
|
|
1555
|
+
)
|
|
1556
|
+
self._clean_gpu()
|
|
1557
|
+
raise optuna.TrialPruned() from exc
|
|
1558
|
+
raise
|
|
1559
|
+
finally:
|
|
1560
|
+
self._clean_gpu()
|
|
1561
|
+
if should_log:
|
|
1562
|
+
progress_counter["count"] = progress_counter["count"] + 1
|
|
1563
|
+
trial_state = getattr(trial, "state", None)
|
|
1564
|
+
state_repr = getattr(trial_state, "name", "OK")
|
|
1565
|
+
print(
|
|
1566
|
+
f"[Optuna][{self.label}] Trial {progress_counter['count']}/{total_trials} finished "
|
|
1567
|
+
f"(status={state_repr})."
|
|
1568
|
+
)
|
|
1569
|
+
return result
|
|
1570
|
+
|
|
1571
|
+
study = optuna.create_study(
|
|
1572
|
+
direction='minimize',
|
|
1573
|
+
sampler=optuna.samplers.TPESampler(seed=self.ctx.rand_seed)
|
|
1574
|
+
)
|
|
1575
|
+
study.optimize(objective_wrapper, n_trials=max_evals)
|
|
1576
|
+
self.best_params = study.best_params
|
|
1577
|
+
self.best_trial = study.best_trial
|
|
1578
|
+
|
|
1579
|
+
# 将最优参数保存为 CSV,方便复现
|
|
1580
|
+
params_path = self.output.result_path(
|
|
1581
|
+
f'{self.ctx.model_nme}_bestparams_{self.label.lower()}.csv'
|
|
1582
|
+
)
|
|
1583
|
+
pd.DataFrame(self.best_params, index=[0]).to_csv(params_path)
|
|
1584
|
+
|
|
1585
|
+
def train(self) -> None:
|
|
1586
|
+
raise NotImplementedError
|
|
1587
|
+
|
|
1588
|
+
def save(self) -> None:
|
|
1589
|
+
if self.model is None:
|
|
1590
|
+
print(f"[save] Warning: No model to save for {self.label}")
|
|
1591
|
+
return
|
|
1592
|
+
|
|
1593
|
+
path = self.output.model_path(self._get_model_filename())
|
|
1594
|
+
if self.label in ['Xgboost', 'GLM']:
|
|
1595
|
+
joblib.dump(self.model, path)
|
|
1596
|
+
else:
|
|
1597
|
+
# Torch 模型既可以只存 state_dict,也可以整个对象一起序列化
|
|
1598
|
+
# 兼容历史行为:ResNetTrainer 保存 state_dict,FTTrainer 保存完整对象
|
|
1599
|
+
if hasattr(self.model, 'resnet'): # ResNetSklearn
|
|
1600
|
+
torch.save(self.model.resnet.state_dict(), path)
|
|
1601
|
+
else: # FTTransformerSklearn or others
|
|
1602
|
+
torch.save(self.model, path)
|
|
1603
|
+
|
|
1604
|
+
def load(self) -> None:
|
|
1605
|
+
path = self.output.model_path(self._get_model_filename())
|
|
1606
|
+
if not os.path.exists(path):
|
|
1607
|
+
print(f"[load] Warning: Model file not found: {path}")
|
|
1608
|
+
return
|
|
1609
|
+
|
|
1610
|
+
if self.label in ['Xgboost', 'GLM']:
|
|
1611
|
+
self.model = joblib.load(path)
|
|
1612
|
+
else:
|
|
1613
|
+
# Torch 模型的加载需要根据结构区别处理
|
|
1614
|
+
if self.label == 'ResNet' or self.label == 'ResNetClassifier':
|
|
1615
|
+
# ResNet 需要重新构建骨架,结构参数依赖 ctx,因此交由子类处理
|
|
1616
|
+
pass
|
|
1617
|
+
else:
|
|
1618
|
+
# FT-Transformer 序列化了整个对象,可直接加载后迁移到目标设备
|
|
1619
|
+
loaded = torch.load(path, map_location='cpu')
|
|
1620
|
+
self._move_to_device(loaded)
|
|
1621
|
+
self.model = loaded
|
|
1622
|
+
|
|
1623
|
+
def _move_to_device(self, model_obj):
|
|
1624
|
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
1625
|
+
if hasattr(model_obj, 'device'):
|
|
1626
|
+
model_obj.device = device
|
|
1627
|
+
if hasattr(model_obj, 'to'):
|
|
1628
|
+
model_obj.to(device)
|
|
1629
|
+
# 若对象内部还包含 ft/resnet 子模块,也要同时迁移设备
|
|
1630
|
+
if hasattr(model_obj, 'ft'):
|
|
1631
|
+
model_obj.ft.to(device)
|
|
1632
|
+
if hasattr(model_obj, 'resnet'):
|
|
1633
|
+
model_obj.resnet.to(device)
|
|
1634
|
+
|
|
1635
|
+
def _clean_gpu(self):
|
|
1636
|
+
gc.collect()
|
|
1637
|
+
if torch.cuda.is_available():
|
|
1638
|
+
device = None
|
|
1639
|
+
try:
|
|
1640
|
+
device = getattr(self, "device", None)
|
|
1641
|
+
except Exception:
|
|
1642
|
+
device = None
|
|
1643
|
+
if isinstance(device, torch.device):
|
|
1644
|
+
try:
|
|
1645
|
+
torch.cuda.set_device(device)
|
|
1646
|
+
except Exception:
|
|
1647
|
+
pass
|
|
1648
|
+
torch.cuda.empty_cache()
|
|
1649
|
+
torch.cuda.ipc_collect()
|
|
1650
|
+
torch.cuda.synchronize()
|
|
1651
|
+
|
|
1652
|
+
# 预测 + 缓存逻辑
|
|
1653
|
+
def _predict_and_cache(self,
|
|
1654
|
+
model,
|
|
1655
|
+
pred_prefix: str,
|
|
1656
|
+
use_oht: bool = False,
|
|
1657
|
+
design_fn=None) -> None:
|
|
1658
|
+
if design_fn:
|
|
1659
|
+
X_train = design_fn(train=True)
|
|
1660
|
+
X_test = design_fn(train=False)
|
|
1661
|
+
elif use_oht:
|
|
1662
|
+
X_train = self.ctx.train_oht_scl_data[self.ctx.var_nmes]
|
|
1663
|
+
X_test = self.ctx.test_oht_scl_data[self.ctx.var_nmes]
|
|
1664
|
+
else:
|
|
1665
|
+
X_train = self.ctx.train_data[self.ctx.factor_nmes]
|
|
1666
|
+
X_test = self.ctx.test_data[self.ctx.factor_nmes]
|
|
1667
|
+
|
|
1668
|
+
preds_train = model.predict(X_train)
|
|
1669
|
+
preds_test = model.predict(X_test)
|
|
1670
|
+
|
|
1671
|
+
self.ctx.train_data[f'pred_{pred_prefix}'] = preds_train
|
|
1672
|
+
self.ctx.test_data[f'pred_{pred_prefix}'] = preds_test
|
|
1673
|
+
self.ctx.train_data[f'w_pred_{pred_prefix}'] = (
|
|
1674
|
+
self.ctx.train_data[f'pred_{pred_prefix}'] *
|
|
1675
|
+
self.ctx.train_data[self.ctx.weight_nme]
|
|
1676
|
+
)
|
|
1677
|
+
self.ctx.test_data[f'w_pred_{pred_prefix}'] = (
|
|
1678
|
+
self.ctx.test_data[f'pred_{pred_prefix}'] *
|
|
1679
|
+
self.ctx.test_data[self.ctx.weight_nme]
|
|
1680
|
+
)
|
|
1681
|
+
|
|
1682
|
+
def _fit_predict_cache(self,
|
|
1683
|
+
model,
|
|
1684
|
+
X_train,
|
|
1685
|
+
y_train,
|
|
1686
|
+
sample_weight,
|
|
1687
|
+
pred_prefix: str,
|
|
1688
|
+
use_oht: bool = False,
|
|
1689
|
+
design_fn=None,
|
|
1690
|
+
fit_kwargs: Optional[Dict[str, Any]] = None,
|
|
1691
|
+
sample_weight_arg: Optional[str] = 'sample_weight') -> None:
|
|
1692
|
+
fit_kwargs = fit_kwargs.copy() if fit_kwargs else {}
|
|
1693
|
+
if sample_weight is not None and sample_weight_arg:
|
|
1694
|
+
fit_kwargs.setdefault(sample_weight_arg, sample_weight)
|
|
1695
|
+
model.fit(X_train, y_train, **fit_kwargs)
|
|
1696
|
+
self.ctx.model_label.append(self.label)
|
|
1697
|
+
self._predict_and_cache(
|
|
1698
|
+
model, pred_prefix, use_oht=use_oht, design_fn=design_fn)
|
|
1699
|
+
|
|
1700
|
+
|
|
1701
|
+
class XGBTrainer(TrainerBase):
|
|
1702
|
+
def __init__(self, context: "BayesOptModel") -> None:
|
|
1703
|
+
super().__init__(context, 'Xgboost', 'Xgboost')
|
|
1704
|
+
self.model: Optional[xgb.XGBRegressor] = None
|
|
1705
|
+
|
|
1706
|
+
def _build_estimator(self) -> xgb.XGBRegressor:
|
|
1707
|
+
params = dict(
|
|
1708
|
+
objective=self.ctx.obj,
|
|
1709
|
+
random_state=self.ctx.rand_seed,
|
|
1710
|
+
subsample=0.9,
|
|
1711
|
+
tree_method='gpu_hist' if self.ctx.use_gpu else 'hist',
|
|
1712
|
+
enable_categorical=True,
|
|
1713
|
+
predictor='gpu_predictor' if self.ctx.use_gpu else 'cpu_predictor'
|
|
1714
|
+
)
|
|
1715
|
+
if self.ctx.use_gpu:
|
|
1716
|
+
params['gpu_id'] = 0
|
|
1717
|
+
print(f">>> XGBoost using GPU ID: 0 (Single GPU Mode)")
|
|
1718
|
+
return xgb.XGBRegressor(**params)
|
|
1719
|
+
|
|
1720
|
+
def cross_val(self, trial: optuna.trial.Trial) -> float:
|
|
1721
|
+
learning_rate = trial.suggest_float(
|
|
1722
|
+
'learning_rate', 1e-5, 1e-1, log=True)
|
|
1723
|
+
gamma = trial.suggest_float('gamma', 0, 10000)
|
|
1724
|
+
max_depth = trial.suggest_int('max_depth', 3, 25)
|
|
1725
|
+
n_estimators = trial.suggest_int('n_estimators', 10, 500, step=10)
|
|
1726
|
+
min_child_weight = trial.suggest_int(
|
|
1727
|
+
'min_child_weight', 100, 10000, step=100)
|
|
1728
|
+
reg_alpha = trial.suggest_float('reg_alpha', 1e-10, 1, log=True)
|
|
1729
|
+
reg_lambda = trial.suggest_float('reg_lambda', 1e-10, 1, log=True)
|
|
1730
|
+
if self.ctx.obj == 'reg:tweedie':
|
|
1731
|
+
tweedie_variance_power = trial.suggest_float(
|
|
1732
|
+
'tweedie_variance_power', 1, 2)
|
|
1733
|
+
elif self.ctx.obj == 'count:poisson':
|
|
1734
|
+
tweedie_variance_power = 1
|
|
1735
|
+
elif self.ctx.obj == 'reg:gamma':
|
|
1736
|
+
tweedie_variance_power = 2
|
|
1737
|
+
else:
|
|
1738
|
+
tweedie_variance_power = 1.5
|
|
1739
|
+
clf = self._build_estimator()
|
|
1740
|
+
params = {
|
|
1741
|
+
'learning_rate': learning_rate,
|
|
1742
|
+
'gamma': gamma,
|
|
1743
|
+
'max_depth': max_depth,
|
|
1744
|
+
'n_estimators': n_estimators,
|
|
1745
|
+
'min_child_weight': min_child_weight,
|
|
1746
|
+
'reg_alpha': reg_alpha,
|
|
1747
|
+
'reg_lambda': reg_lambda
|
|
1748
|
+
}
|
|
1749
|
+
if self.ctx.obj == 'reg:tweedie':
|
|
1750
|
+
params['tweedie_variance_power'] = tweedie_variance_power
|
|
1751
|
+
clf.set_params(**params)
|
|
1752
|
+
n_jobs = 1 if self.ctx.use_gpu else int(1 / self.ctx.prop_test)
|
|
1753
|
+
acc = cross_val_score(
|
|
1754
|
+
clf,
|
|
1755
|
+
self.ctx.train_data[self.ctx.factor_nmes],
|
|
1756
|
+
self.ctx.train_data[self.ctx.resp_nme].values,
|
|
1757
|
+
fit_params=self.ctx.fit_params,
|
|
1758
|
+
cv=self.ctx.cv,
|
|
1759
|
+
scoring=make_scorer(
|
|
1760
|
+
mean_tweedie_deviance,
|
|
1761
|
+
power=tweedie_variance_power,
|
|
1762
|
+
greater_is_better=False),
|
|
1763
|
+
error_score='raise',
|
|
1764
|
+
n_jobs=n_jobs
|
|
1765
|
+
).mean()
|
|
1766
|
+
return -acc
|
|
1767
|
+
|
|
1768
|
+
def train(self) -> None:
|
|
1769
|
+
if not self.best_params:
|
|
1770
|
+
raise RuntimeError('请先运行 tune() 以获得 XGB 最优参数。')
|
|
1771
|
+
self.model = self._build_estimator()
|
|
1772
|
+
self.model.set_params(**self.best_params)
|
|
1773
|
+
self._fit_predict_cache(
|
|
1774
|
+
self.model,
|
|
1775
|
+
self.ctx.train_data[self.ctx.factor_nmes],
|
|
1776
|
+
self.ctx.train_data[self.ctx.resp_nme].values,
|
|
1777
|
+
sample_weight=None,
|
|
1778
|
+
pred_prefix='xgb',
|
|
1779
|
+
fit_kwargs=self.ctx.fit_params,
|
|
1780
|
+
sample_weight_arg=None # 样本权重已通过 fit_kwargs 传入
|
|
1781
|
+
)
|
|
1782
|
+
self.ctx.xgb_best = self.model
|
|
1783
|
+
|
|
1784
|
+
|
|
1785
|
+
class GLMTrainer(TrainerBase):
|
|
1786
|
+
def __init__(self, context: "BayesOptModel") -> None:
|
|
1787
|
+
super().__init__(context, 'GLM', 'GLM')
|
|
1788
|
+
self.model = None
|
|
1789
|
+
|
|
1790
|
+
def _select_family(self, tweedie_power: Optional[float] = None):
|
|
1791
|
+
if self.ctx.task_type == 'classification':
|
|
1792
|
+
return sm.families.Binomial()
|
|
1793
|
+
if self.ctx.obj == 'count:poisson':
|
|
1794
|
+
return sm.families.Poisson()
|
|
1795
|
+
if self.ctx.obj == 'reg:gamma':
|
|
1796
|
+
return sm.families.Gamma()
|
|
1797
|
+
power = tweedie_power if tweedie_power is not None else 1.5
|
|
1798
|
+
return sm.families.Tweedie(var_power=power, link=sm.families.links.log())
|
|
1799
|
+
|
|
1800
|
+
def _prepare_design(self, data: pd.DataFrame) -> pd.DataFrame:
|
|
1801
|
+
# 为 statsmodels 设计矩阵添加截距项
|
|
1802
|
+
X = data[self.ctx.var_nmes]
|
|
1803
|
+
return sm.add_constant(X, has_constant='add')
|
|
1804
|
+
|
|
1805
|
+
def _metric_power(self, family, tweedie_power: Optional[float]) -> float:
|
|
1806
|
+
if isinstance(family, sm.families.Poisson):
|
|
1807
|
+
return 1.0
|
|
1808
|
+
if isinstance(family, sm.families.Gamma):
|
|
1809
|
+
return 2.0
|
|
1810
|
+
if isinstance(family, sm.families.Tweedie):
|
|
1811
|
+
return tweedie_power if tweedie_power is not None else getattr(family, 'var_power', 1.5)
|
|
1812
|
+
return 1.5
|
|
1813
|
+
|
|
1814
|
+
def cross_val(self, trial: optuna.trial.Trial) -> float:
|
|
1815
|
+
alpha = trial.suggest_float('alpha', 1e-6, 1e2, log=True)
|
|
1816
|
+
l1_ratio = trial.suggest_float('l1_ratio', 0.0, 1.0)
|
|
1817
|
+
tweedie_power = None
|
|
1818
|
+
if self.ctx.task_type == 'regression' and self.ctx.obj == 'reg:tweedie':
|
|
1819
|
+
tweedie_power = trial.suggest_float('tweedie_power', 1.0, 2.0)
|
|
1820
|
+
|
|
1821
|
+
X_all = self._prepare_design(self.ctx.train_oht_scl_data)
|
|
1822
|
+
y_all = self.ctx.train_oht_scl_data[self.ctx.resp_nme]
|
|
1823
|
+
w_all = self.ctx.train_oht_scl_data[self.ctx.weight_nme]
|
|
1824
|
+
|
|
1825
|
+
scores = []
|
|
1826
|
+
for train_idx, val_idx in self.ctx.cv.split(X_all):
|
|
1827
|
+
X_train, X_val = X_all.iloc[train_idx], X_all.iloc[val_idx]
|
|
1828
|
+
y_train, y_val = y_all.iloc[train_idx], y_all.iloc[val_idx]
|
|
1829
|
+
w_train, w_val = w_all.iloc[train_idx], w_all.iloc[val_idx]
|
|
1830
|
+
|
|
1831
|
+
family = self._select_family(tweedie_power)
|
|
1832
|
+
glm = sm.GLM(y_train, X_train, family=family,
|
|
1833
|
+
freq_weights=w_train)
|
|
1834
|
+
result = glm.fit_regularized(
|
|
1835
|
+
alpha=alpha, L1_wt=l1_ratio, maxiter=200)
|
|
1836
|
+
|
|
1837
|
+
y_pred = result.predict(X_val)
|
|
1838
|
+
if self.ctx.task_type == 'classification':
|
|
1839
|
+
y_pred = np.clip(y_pred, EPS, 1 - EPS)
|
|
1840
|
+
fold_score = log_loss(
|
|
1841
|
+
y_val, y_pred, sample_weight=w_val)
|
|
1842
|
+
else:
|
|
1843
|
+
y_pred = np.maximum(y_pred, EPS)
|
|
1844
|
+
fold_score = mean_tweedie_deviance(
|
|
1845
|
+
y_val,
|
|
1846
|
+
y_pred,
|
|
1847
|
+
sample_weight=w_val,
|
|
1848
|
+
power=self._metric_power(family, tweedie_power)
|
|
1849
|
+
)
|
|
1850
|
+
scores.append(fold_score)
|
|
1851
|
+
|
|
1852
|
+
return float(np.mean(scores))
|
|
1853
|
+
|
|
1854
|
+
def train(self) -> None:
|
|
1855
|
+
if not self.best_params:
|
|
1856
|
+
raise RuntimeError('请先运行 tune() 以获得 GLM 最优参数。')
|
|
1857
|
+
tweedie_power = self.best_params.get('tweedie_power')
|
|
1858
|
+
family = self._select_family(tweedie_power)
|
|
1859
|
+
|
|
1860
|
+
X_train = self._prepare_design(self.ctx.train_oht_scl_data)
|
|
1861
|
+
y_train = self.ctx.train_oht_scl_data[self.ctx.resp_nme]
|
|
1862
|
+
w_train = self.ctx.train_oht_scl_data[self.ctx.weight_nme]
|
|
1863
|
+
|
|
1864
|
+
glm = sm.GLM(y_train, X_train, family=family,
|
|
1865
|
+
freq_weights=w_train)
|
|
1866
|
+
self.model = glm.fit_regularized(
|
|
1867
|
+
alpha=self.best_params['alpha'],
|
|
1868
|
+
L1_wt=self.best_params['l1_ratio'],
|
|
1869
|
+
maxiter=300
|
|
1870
|
+
)
|
|
1871
|
+
|
|
1872
|
+
self.ctx.glm_best = self.model
|
|
1873
|
+
self.ctx.model_label += [self.label]
|
|
1874
|
+
self._predict_and_cache(
|
|
1875
|
+
self.model,
|
|
1876
|
+
'glm',
|
|
1877
|
+
design_fn=lambda train: self._prepare_design(
|
|
1878
|
+
self.ctx.train_oht_scl_data if train else self.ctx.test_oht_scl_data
|
|
1879
|
+
)
|
|
1880
|
+
)
|
|
1881
|
+
|
|
1882
|
+
|
|
1883
|
+
class ResNetTrainer(TrainerBase):
|
|
1884
|
+
def __init__(self, context: "BayesOptModel") -> None:
|
|
1885
|
+
if context.task_type == 'classification':
|
|
1886
|
+
super().__init__(context, 'ResNetClassifier', 'ResNet')
|
|
1887
|
+
else:
|
|
1888
|
+
super().__init__(context, 'ResNet', 'ResNet')
|
|
1889
|
+
self.model: Optional[ResNetSklearn] = None
|
|
1890
|
+
|
|
1891
|
+
# ========= 交叉验证(BayesOpt 用) =========
|
|
1892
|
+
def cross_val(self, trial: optuna.trial.Trial) -> float:
|
|
1893
|
+
# 针对 ResNet 的交叉验证流程,重点控制显存:
|
|
1894
|
+
# - 每个 fold 单独创建 ResNetSklearn,结束立刻释放资源;
|
|
1895
|
+
# - fold 完成后迁移模型到 CPU,删除对象并调用 gc/empty_cache;
|
|
1896
|
+
# - 可选:BayesOpt 期间只抽样部分训练集以减少显存压力。
|
|
1897
|
+
|
|
1898
|
+
# 1. 超参空间(基本沿用你之前的设定)
|
|
1899
|
+
learning_rate = trial.suggest_float(
|
|
1900
|
+
'learning_rate', 1e-6, 1e-2, log=True
|
|
1901
|
+
)
|
|
1902
|
+
# hidden_dim = trial.suggest_int('hidden_dim', 32, 256, step=32) # 不宜过大
|
|
1903
|
+
hidden_dim = trial.suggest_int('hidden_dim', 8, 32, step=2)
|
|
1904
|
+
block_num = trial.suggest_int('block_num', 2, 10)
|
|
1905
|
+
|
|
1906
|
+
if self.ctx.task_type == 'regression':
|
|
1907
|
+
if self.ctx.obj == 'reg:tweedie':
|
|
1908
|
+
tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
|
|
1909
|
+
elif self.ctx.obj == 'count:poisson':
|
|
1910
|
+
tw_power = 1.0
|
|
1911
|
+
elif self.ctx.obj == 'reg:gamma':
|
|
1912
|
+
tw_power = 2.0
|
|
1913
|
+
else:
|
|
1914
|
+
tw_power = 1.5
|
|
1915
|
+
else: # classification
|
|
1916
|
+
tw_power = None # Not used
|
|
1917
|
+
|
|
1918
|
+
fold_losses = []
|
|
1919
|
+
|
|
1920
|
+
# 2. (可选)BayesOpt 只在子样本上做 CV,减轻显存 & 时间压力
|
|
1921
|
+
data_for_cv = self.ctx.train_oht_scl_data
|
|
1922
|
+
max_rows_for_resnet_bo = min(100000, int(
|
|
1923
|
+
len(data_for_cv)/5)) # 你可以按 A30 情况调小,比如 50_000
|
|
1924
|
+
if len(data_for_cv) > max_rows_for_resnet_bo:
|
|
1925
|
+
data_for_cv = data_for_cv.sample(
|
|
1926
|
+
max_rows_for_resnet_bo,
|
|
1927
|
+
random_state=self.ctx.rand_seed
|
|
1928
|
+
)
|
|
1929
|
+
|
|
1930
|
+
X_all = data_for_cv[self.ctx.var_nmes]
|
|
1931
|
+
y_all = data_for_cv[self.ctx.resp_nme]
|
|
1932
|
+
w_all = data_for_cv[self.ctx.weight_nme]
|
|
1933
|
+
|
|
1934
|
+
# 用局部 ShuffleSplit,避免子样本时索引不一致
|
|
1935
|
+
cv_local = ShuffleSplit(
|
|
1936
|
+
n_splits=int(1 / self.ctx.prop_test),
|
|
1937
|
+
test_size=self.ctx.prop_test,
|
|
1938
|
+
random_state=self.ctx.rand_seed
|
|
1939
|
+
)
|
|
1940
|
+
|
|
1941
|
+
# 使用 Hold-out 验证代替 K-Fold CV 以提高速度
|
|
1942
|
+
# 只取一次划分
|
|
1943
|
+
train_idx, val_idx = next(cv_local.split(X_all))
|
|
1944
|
+
|
|
1945
|
+
X_train_fold = X_all.iloc[train_idx]
|
|
1946
|
+
y_train_fold = y_all.iloc[train_idx]
|
|
1947
|
+
w_train_fold = w_all.iloc[train_idx]
|
|
1948
|
+
|
|
1949
|
+
X_val_fold = X_all.iloc[val_idx]
|
|
1950
|
+
y_val_fold = y_all.iloc[val_idx]
|
|
1951
|
+
w_val_fold = w_all.iloc[val_idx]
|
|
1952
|
+
|
|
1953
|
+
# 3. 创建 ResNet 模型
|
|
1954
|
+
cv_net = ResNetSklearn(
|
|
1955
|
+
model_nme=self.ctx.model_nme,
|
|
1956
|
+
input_dim=X_all.shape[1],
|
|
1957
|
+
hidden_dim=hidden_dim,
|
|
1958
|
+
block_num=block_num,
|
|
1959
|
+
task_type=self.ctx.task_type,
|
|
1960
|
+
epochs=self.ctx.epochs,
|
|
1961
|
+
tweedie_power=tw_power,
|
|
1962
|
+
learning_rate=learning_rate,
|
|
1963
|
+
patience=5,
|
|
1964
|
+
use_layernorm=True,
|
|
1965
|
+
dropout=0.1,
|
|
1966
|
+
residual_scale=0.1,
|
|
1967
|
+
use_data_parallel=self.ctx.config.use_resn_data_parallel,
|
|
1968
|
+
use_ddp=self.ctx.config.use_resn_ddp
|
|
1969
|
+
)
|
|
1970
|
+
|
|
1971
|
+
try:
|
|
1972
|
+
# 4. 训练
|
|
1973
|
+
cv_net.fit(
|
|
1974
|
+
X_train_fold,
|
|
1975
|
+
y_train_fold,
|
|
1976
|
+
w_train_fold,
|
|
1977
|
+
X_val_fold,
|
|
1978
|
+
y_val_fold,
|
|
1979
|
+
w_val_fold,
|
|
1980
|
+
trial=trial
|
|
1981
|
+
)
|
|
1982
|
+
|
|
1983
|
+
# 5. 验证集预测
|
|
1984
|
+
y_pred_fold = cv_net.predict(X_val_fold)
|
|
1985
|
+
|
|
1986
|
+
# 6. 评估:Tweedie deviance(评估用,训练 loss 不动)
|
|
1987
|
+
if self.ctx.task_type == 'regression':
|
|
1988
|
+
loss = mean_tweedie_deviance(
|
|
1989
|
+
y_val_fold,
|
|
1990
|
+
y_pred_fold,
|
|
1991
|
+
sample_weight=w_val_fold,
|
|
1992
|
+
power=tw_power
|
|
1993
|
+
)
|
|
1994
|
+
else: # classification
|
|
1995
|
+
from sklearn.metrics import log_loss
|
|
1996
|
+
loss = log_loss(
|
|
1997
|
+
y_val_fold,
|
|
1998
|
+
y_pred_fold,
|
|
1999
|
+
sample_weight=w_val_fold,
|
|
2000
|
+
)
|
|
2001
|
+
fold_losses.append(loss)
|
|
2002
|
+
finally:
|
|
2003
|
+
# 7. 结束后释放 GPU 资源
|
|
2004
|
+
try:
|
|
2005
|
+
if hasattr(cv_net, "resnet"):
|
|
2006
|
+
cv_net.resnet.to("cpu")
|
|
2007
|
+
except Exception:
|
|
2008
|
+
pass
|
|
2009
|
+
del cv_net
|
|
2010
|
+
self._clean_gpu()
|
|
2011
|
+
|
|
2012
|
+
return np.mean(fold_losses)
|
|
2013
|
+
|
|
2014
|
+
# ========= 用最优超参训练最终 ResNet =========
|
|
2015
|
+
def train(self) -> None:
|
|
2016
|
+
if not self.best_params:
|
|
2017
|
+
raise RuntimeError('请先运行 tune() 以获得 ResNet 最优参数。')
|
|
2018
|
+
|
|
2019
|
+
self.model = ResNetSklearn(
|
|
2020
|
+
model_nme=self.ctx.model_nme,
|
|
2021
|
+
input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1],
|
|
2022
|
+
task_type=self.ctx.task_type,
|
|
2023
|
+
use_data_parallel=self.ctx.config.use_resn_data_parallel,
|
|
2024
|
+
use_ddp=self.ctx.config.use_resn_ddp
|
|
2025
|
+
)
|
|
2026
|
+
self.model.set_params(self.best_params)
|
|
2027
|
+
|
|
2028
|
+
self._fit_predict_cache(
|
|
2029
|
+
self.model,
|
|
2030
|
+
self.ctx.train_oht_scl_data[self.ctx.var_nmes],
|
|
2031
|
+
self.ctx.train_oht_scl_data[self.ctx.resp_nme],
|
|
2032
|
+
sample_weight=self.ctx.train_oht_scl_data[self.ctx.weight_nme],
|
|
2033
|
+
pred_prefix='resn',
|
|
2034
|
+
use_oht=True,
|
|
2035
|
+
sample_weight_arg='w_train'
|
|
2036
|
+
)
|
|
2037
|
+
|
|
2038
|
+
# 方便外部调用
|
|
2039
|
+
self.ctx.resn_best = self.model
|
|
2040
|
+
|
|
2041
|
+
# ========= 保存 / 加载 =========
|
|
2042
|
+
# ResNet 使用 state_dict 保存,需要特殊的 load 逻辑,所以保留 load
|
|
2043
|
+
# save 逻辑已经在 TrainerBase 中处理了 (check for .resnet attribute)
|
|
2044
|
+
|
|
2045
|
+
def load(self) -> None:
|
|
2046
|
+
# 将磁盘中的 ResNet 权重加载到当前设备,保持与上下文一致。
|
|
2047
|
+
path = self.output.model_path(self._get_model_filename())
|
|
2048
|
+
if os.path.exists(path):
|
|
2049
|
+
resn_loaded = ResNetSklearn(
|
|
2050
|
+
model_nme=self.ctx.model_nme,
|
|
2051
|
+
input_dim=self.ctx.train_oht_scl_data[self.ctx.var_nmes].shape[1],
|
|
2052
|
+
task_type=self.ctx.task_type,
|
|
2053
|
+
use_data_parallel=self.ctx.config.use_resn_data_parallel,
|
|
2054
|
+
use_ddp=self.ctx.config.use_resn_ddp
|
|
2055
|
+
)
|
|
2056
|
+
state_dict = torch.load(path, map_location='cpu')
|
|
2057
|
+
resn_loaded.resnet.load_state_dict(state_dict)
|
|
2058
|
+
|
|
2059
|
+
self._move_to_device(resn_loaded)
|
|
2060
|
+
self.model = resn_loaded
|
|
2061
|
+
self.ctx.resn_best = self.model
|
|
2062
|
+
else:
|
|
2063
|
+
print(f"[ResNetTrainer.load] 未找到模型文件:{path}")
|
|
2064
|
+
|
|
2065
|
+
|
|
2066
|
+
class FTTrainer(TrainerBase):
|
|
2067
|
+
def __init__(self, context: "BayesOptModel") -> None:
|
|
2068
|
+
if context.task_type == 'classification':
|
|
2069
|
+
super().__init__(context, 'FTTransformerClassifier', 'FTTransformer')
|
|
2070
|
+
else:
|
|
2071
|
+
super().__init__(context, 'FTTransformer', 'FTTransformer')
|
|
2072
|
+
self.model: Optional[FTTransformerSklearn] = None
|
|
2073
|
+
|
|
2074
|
+
def cross_val(self, trial: optuna.trial.Trial) -> float:
|
|
2075
|
+
# 针对 FT-Transformer 的交叉验证,重点同样在显存控制:
|
|
2076
|
+
# - 收缩超参搜索空间,防止不必要的超大模型;
|
|
2077
|
+
# - 每个 fold 结束后立即释放 GPU 显存,确保下一个 trial 顺利进行。
|
|
2078
|
+
# 超参空间适当缩小一点,避免特别大的模型
|
|
2079
|
+
learning_rate = trial.suggest_float(
|
|
2080
|
+
'learning_rate', 1e-5, 5e-4, log=True
|
|
2081
|
+
)
|
|
2082
|
+
d_model = trial.suggest_int('d_model', 32, 256, step=32)
|
|
2083
|
+
# n_heads = trial.suggest_categorical('n_heads', [2, 4]) 避免欠拟合
|
|
2084
|
+
n_heads = trial.suggest_categorical('n_heads', [2, 4, 8])
|
|
2085
|
+
# n_layers = trial.suggest_int('n_layers', 2, 4) 避免欠拟合
|
|
2086
|
+
n_layers = trial.suggest_int('n_layers', 2, 8)
|
|
2087
|
+
dropout = trial.suggest_float('dropout', 0.0, 0.2)
|
|
2088
|
+
approx_units = d_model * n_layers * max(1, len(self.ctx.factor_nmes))
|
|
2089
|
+
if approx_units > 1_200_000:
|
|
2090
|
+
print(
|
|
2091
|
+
f"[FTTrainer] Trial pruned early: d_model={d_model}, n_layers={n_layers} -> approx_units={approx_units}")
|
|
2092
|
+
raise optuna.TrialPruned(
|
|
2093
|
+
"config exceeds safe memory budget; prune before training")
|
|
2094
|
+
|
|
2095
|
+
if self.ctx.task_type == 'regression':
|
|
2096
|
+
if self.ctx.obj == 'reg:tweedie':
|
|
2097
|
+
tw_power = trial.suggest_float('tw_power', 1.0, 2.0)
|
|
2098
|
+
elif self.ctx.obj == 'count:poisson':
|
|
2099
|
+
tw_power = 1.0
|
|
2100
|
+
elif self.ctx.obj == 'reg:gamma':
|
|
2101
|
+
tw_power = 2.0
|
|
2102
|
+
else:
|
|
2103
|
+
tw_power = 1.5
|
|
2104
|
+
else: # classification
|
|
2105
|
+
tw_power = None # Not used
|
|
2106
|
+
|
|
2107
|
+
fold_losses = []
|
|
2108
|
+
|
|
2109
|
+
# 可选:只在子样本上做 BO,避免大数据直接压垮显存
|
|
2110
|
+
data_for_cv = self.ctx.train_data
|
|
2111
|
+
max_rows_for_ft_bo = min(1000000, int(
|
|
2112
|
+
len(data_for_cv)/2)) # 你可以根据显存情况调小或调大
|
|
2113
|
+
if len(data_for_cv) > max_rows_for_ft_bo:
|
|
2114
|
+
data_for_cv = data_for_cv.sample(
|
|
2115
|
+
max_rows_for_ft_bo,
|
|
2116
|
+
random_state=self.ctx.rand_seed
|
|
2117
|
+
)
|
|
2118
|
+
|
|
2119
|
+
# 用局部 ShuffleSplit,避免子样本时索引不一致
|
|
2120
|
+
cv_local = ShuffleSplit(
|
|
2121
|
+
n_splits=int(1 / self.ctx.prop_test),
|
|
2122
|
+
test_size=self.ctx.prop_test,
|
|
2123
|
+
random_state=self.ctx.rand_seed
|
|
2124
|
+
)
|
|
2125
|
+
|
|
2126
|
+
# 使用 Hold-out 验证代替 K-Fold CV 以提高速度
|
|
2127
|
+
# 只取一次划分
|
|
2128
|
+
train_idx, val_idx = next(cv_local.split(
|
|
2129
|
+
data_for_cv[self.ctx.factor_nmes]))
|
|
2130
|
+
|
|
2131
|
+
X_train_fold = data_for_cv.iloc[train_idx][self.ctx.factor_nmes]
|
|
2132
|
+
y_train_fold = data_for_cv.iloc[train_idx][self.ctx.resp_nme]
|
|
2133
|
+
w_train_fold = data_for_cv.iloc[train_idx][self.ctx.weight_nme]
|
|
2134
|
+
X_val_fold = data_for_cv.iloc[val_idx][self.ctx.factor_nmes]
|
|
2135
|
+
y_val_fold = data_for_cv.iloc[val_idx][self.ctx.resp_nme]
|
|
2136
|
+
w_val_fold = data_for_cv.iloc[val_idx][self.ctx.weight_nme]
|
|
2137
|
+
|
|
2138
|
+
cv_ft = FTTransformerSklearn(
|
|
2139
|
+
model_nme=self.ctx.model_nme,
|
|
2140
|
+
num_cols=self.ctx.num_features,
|
|
2141
|
+
cat_cols=self.ctx.cate_list,
|
|
2142
|
+
d_model=d_model,
|
|
2143
|
+
n_heads=n_heads,
|
|
2144
|
+
n_layers=n_layers,
|
|
2145
|
+
dropout=dropout,
|
|
2146
|
+
task_type=self.ctx.task_type,
|
|
2147
|
+
# batch_num=batch_num,
|
|
2148
|
+
epochs=self.ctx.epochs,
|
|
2149
|
+
tweedie_power=tw_power,
|
|
2150
|
+
learning_rate=learning_rate,
|
|
2151
|
+
patience=5,
|
|
2152
|
+
use_data_parallel=self.ctx.config.use_ft_data_parallel,
|
|
2153
|
+
use_ddp=self.ctx.config.use_ft_ddp
|
|
2154
|
+
)
|
|
2155
|
+
|
|
2156
|
+
try:
|
|
2157
|
+
cv_ft.fit(
|
|
2158
|
+
X_train_fold, y_train_fold, w_train_fold,
|
|
2159
|
+
X_val_fold, y_val_fold, w_val_fold,
|
|
2160
|
+
trial=trial
|
|
2161
|
+
)
|
|
2162
|
+
y_pred_fold = cv_ft.predict(X_val_fold)
|
|
2163
|
+
if self.ctx.task_type == 'regression':
|
|
2164
|
+
loss = mean_tweedie_deviance(
|
|
2165
|
+
y_val_fold,
|
|
2166
|
+
y_pred_fold,
|
|
2167
|
+
sample_weight=w_val_fold,
|
|
2168
|
+
power=tw_power
|
|
2169
|
+
)
|
|
2170
|
+
else: # classification
|
|
2171
|
+
from sklearn.metrics import log_loss
|
|
2172
|
+
loss = log_loss(
|
|
2173
|
+
y_val_fold,
|
|
2174
|
+
y_pred_fold,
|
|
2175
|
+
sample_weight=w_val_fold,
|
|
2176
|
+
)
|
|
2177
|
+
fold_losses.append(loss)
|
|
2178
|
+
finally:
|
|
2179
|
+
# 结束后立即释放 GPU 资源
|
|
2180
|
+
try:
|
|
2181
|
+
# 如果模型在 GPU 上,先挪回 CPU
|
|
2182
|
+
if hasattr(cv_ft, "ft"):
|
|
2183
|
+
cv_ft.ft.to("cpu")
|
|
2184
|
+
except Exception:
|
|
2185
|
+
pass
|
|
2186
|
+
del cv_ft
|
|
2187
|
+
self._clean_gpu()
|
|
2188
|
+
|
|
2189
|
+
return np.mean(fold_losses)
|
|
2190
|
+
|
|
2191
|
+
def train(self) -> None:
|
|
2192
|
+
if not self.best_params:
|
|
2193
|
+
raise RuntimeError('请先运行 tune() 以获得 FT-Transformer 最优参数。')
|
|
2194
|
+
self.model = FTTransformerSklearn(
|
|
2195
|
+
model_nme=self.ctx.model_nme,
|
|
2196
|
+
num_cols=self.ctx.num_features,
|
|
2197
|
+
cat_cols=self.ctx.cate_list,
|
|
2198
|
+
task_type=self.ctx.task_type,
|
|
2199
|
+
use_data_parallel=self.ctx.config.use_ft_data_parallel,
|
|
2200
|
+
use_ddp=self.ctx.config.use_ft_ddp
|
|
2201
|
+
)
|
|
2202
|
+
self.model.set_params(self.best_params)
|
|
2203
|
+
self._fit_predict_cache(
|
|
2204
|
+
self.model,
|
|
2205
|
+
self.ctx.train_data[self.ctx.factor_nmes],
|
|
2206
|
+
self.ctx.train_data[self.ctx.resp_nme],
|
|
2207
|
+
sample_weight=self.ctx.train_data[self.ctx.weight_nme],
|
|
2208
|
+
pred_prefix='ft',
|
|
2209
|
+
sample_weight_arg='w_train'
|
|
2210
|
+
)
|
|
2211
|
+
self.ctx.ft_best = self.model
|
|
2212
|
+
|
|
2213
|
+
|
|
2214
|
+
# =============================================================================
|
|
2215
|
+
# BayesOpt orchestration & SHAP utilities
|
|
2216
|
+
# =============================================================================
|
|
2217
|
+
class BayesOptModel:
|
|
2218
|
+
def __init__(self, train_data, test_data,
|
|
2219
|
+
model_nme, resp_nme, weight_nme, factor_nmes, task_type='regression',
|
|
2220
|
+
binary_resp_nme=None,
|
|
2221
|
+
cate_list=None, prop_test=0.25, rand_seed=None,
|
|
2222
|
+
epochs=100, use_gpu=True,
|
|
2223
|
+
use_resn_data_parallel: bool = False, use_ft_data_parallel: bool = False,
|
|
2224
|
+
use_resn_ddp: bool = False, use_ft_ddp: bool = False):
|
|
2225
|
+
cfg = BayesOptConfig(
|
|
2226
|
+
model_nme=model_nme,
|
|
2227
|
+
task_type=task_type,
|
|
2228
|
+
resp_nme=resp_nme,
|
|
2229
|
+
weight_nme=weight_nme,
|
|
2230
|
+
factor_nmes=list(factor_nmes),
|
|
2231
|
+
binary_resp_nme=binary_resp_nme,
|
|
2232
|
+
cate_list=list(cate_list) if cate_list else None,
|
|
2233
|
+
prop_test=prop_test,
|
|
2234
|
+
rand_seed=rand_seed,
|
|
2235
|
+
epochs=epochs,
|
|
2236
|
+
use_gpu=use_gpu,
|
|
2237
|
+
use_resn_data_parallel=use_resn_data_parallel,
|
|
2238
|
+
use_ft_data_parallel=use_ft_data_parallel,
|
|
2239
|
+
use_resn_ddp=use_resn_ddp,
|
|
2240
|
+
use_ft_ddp=use_ft_ddp
|
|
2241
|
+
)
|
|
2242
|
+
self.config = cfg
|
|
2243
|
+
self.model_nme = cfg.model_nme
|
|
2244
|
+
self.task_type = cfg.task_type
|
|
2245
|
+
self.resp_nme = cfg.resp_nme
|
|
2246
|
+
self.weight_nme = cfg.weight_nme
|
|
2247
|
+
self.factor_nmes = cfg.factor_nmes
|
|
2248
|
+
self.binary_resp_nme = cfg.binary_resp_nme
|
|
2249
|
+
self.cate_list = list(cfg.cate_list or [])
|
|
2250
|
+
self.prop_test = cfg.prop_test
|
|
2251
|
+
self.epochs = cfg.epochs
|
|
2252
|
+
self.rand_seed = cfg.rand_seed if cfg.rand_seed is not None else np.random.randint(
|
|
2253
|
+
1, 10000)
|
|
2254
|
+
self.use_gpu = bool(cfg.use_gpu and torch.cuda.is_available())
|
|
2255
|
+
self.output_manager = OutputManager(os.getcwd(), self.model_nme)
|
|
2256
|
+
|
|
2257
|
+
preprocessor = DatasetPreprocessor(train_data, test_data, cfg).run()
|
|
2258
|
+
self.train_data = preprocessor.train_data
|
|
2259
|
+
self.test_data = preprocessor.test_data
|
|
2260
|
+
self.train_oht_scl_data = preprocessor.train_oht_scl_data
|
|
2261
|
+
self.test_oht_scl_data = preprocessor.test_oht_scl_data
|
|
2262
|
+
self.var_nmes = preprocessor.var_nmes
|
|
2263
|
+
self.num_features = preprocessor.num_features
|
|
2264
|
+
self.cat_categories_for_shap = preprocessor.cat_categories_for_shap
|
|
2265
|
+
|
|
2266
|
+
self.cv = ShuffleSplit(n_splits=int(1/self.prop_test),
|
|
2267
|
+
test_size=self.prop_test,
|
|
2268
|
+
random_state=self.rand_seed)
|
|
2269
|
+
if self.task_type == 'classification':
|
|
2270
|
+
self.obj = 'binary:logistic'
|
|
2271
|
+
else: # regression
|
|
2272
|
+
if 'f' in self.model_nme:
|
|
2273
|
+
self.obj = 'count:poisson'
|
|
2274
|
+
elif 's' in self.model_nme:
|
|
2275
|
+
self.obj = 'reg:gamma'
|
|
2276
|
+
elif 'bc' in self.model_nme:
|
|
2277
|
+
self.obj = 'reg:tweedie'
|
|
2278
|
+
else:
|
|
2279
|
+
self.obj = 'reg:tweedie'
|
|
2280
|
+
self.fit_params = {
|
|
2281
|
+
'sample_weight': self.train_data[self.weight_nme].values
|
|
2282
|
+
}
|
|
2283
|
+
self.model_label: List[str] = []
|
|
2284
|
+
self.optuna_storage = cfg.optuna_storage
|
|
2285
|
+
self.optuna_study_prefix = cfg.optuna_study_prefix or "bayesopt"
|
|
2286
|
+
|
|
2287
|
+
# 记录各模型训练器,后续统一通过标签访问,方便扩展新模型
|
|
2288
|
+
self.trainers: Dict[str, TrainerBase] = {
|
|
2289
|
+
'glm': GLMTrainer(self),
|
|
2290
|
+
'xgb': XGBTrainer(self),
|
|
2291
|
+
'resn': ResNetTrainer(self),
|
|
2292
|
+
'ft': FTTrainer(self)
|
|
2293
|
+
}
|
|
2294
|
+
self.xgb_best = None
|
|
2295
|
+
self.resn_best = None
|
|
2296
|
+
self.glm_best = None
|
|
2297
|
+
self.ft_best = None
|
|
2298
|
+
self.best_xgb_params = None
|
|
2299
|
+
self.best_resn_params = None
|
|
2300
|
+
self.best_ft_params = None
|
|
2301
|
+
self.best_xgb_trial = None
|
|
2302
|
+
self.best_resn_trial = None
|
|
2303
|
+
self.best_ft_trial = None
|
|
2304
|
+
self.best_glm_params = None
|
|
2305
|
+
self.best_glm_trial = None
|
|
2306
|
+
self.xgb_load = None
|
|
2307
|
+
self.resn_load = None
|
|
2308
|
+
self.ft_load = None
|
|
2309
|
+
|
|
2310
|
+
# 定义单因素画图函数
|
|
2311
|
+
def plot_oneway(self, n_bins=10):
|
|
2312
|
+
for c in self.factor_nmes:
|
|
2313
|
+
fig = plt.figure(figsize=(7, 5))
|
|
2314
|
+
if c in self.cate_list:
|
|
2315
|
+
group_col = c
|
|
2316
|
+
plot_source = self.train_data
|
|
2317
|
+
else:
|
|
2318
|
+
group_col = f'{c}_bins'
|
|
2319
|
+
bins = pd.qcut(
|
|
2320
|
+
self.train_data[c],
|
|
2321
|
+
n_bins,
|
|
2322
|
+
duplicates='drop' # 注意:如果分位数重复会丢 bin,避免异常终止
|
|
2323
|
+
)
|
|
2324
|
+
plot_source = self.train_data.assign(**{group_col: bins})
|
|
2325
|
+
plot_data = plot_source.groupby(
|
|
2326
|
+
[group_col], observed=True).sum(numeric_only=True)
|
|
2327
|
+
plot_data.reset_index(inplace=True)
|
|
2328
|
+
plot_data['act_v'] = plot_data['w_act'] / \
|
|
2329
|
+
plot_data[self.weight_nme]
|
|
2330
|
+
plot_data.head()
|
|
2331
|
+
ax = fig.add_subplot(111)
|
|
2332
|
+
ax.plot(plot_data.index, plot_data['act_v'],
|
|
2333
|
+
label='Actual', color='red')
|
|
2334
|
+
ax.set_title(
|
|
2335
|
+
'Analysis of %s : Train Data' % group_col,
|
|
2336
|
+
fontsize=8)
|
|
2337
|
+
plt.xticks(plot_data.index,
|
|
2338
|
+
list(plot_data[group_col].astype(str)),
|
|
2339
|
+
rotation=90)
|
|
2340
|
+
if len(list(plot_data[group_col].astype(str))) > 50:
|
|
2341
|
+
plt.xticks(fontsize=3)
|
|
2342
|
+
else:
|
|
2343
|
+
plt.xticks(fontsize=6)
|
|
2344
|
+
plt.yticks(fontsize=6)
|
|
2345
|
+
ax2 = ax.twinx()
|
|
2346
|
+
ax2.bar(plot_data.index,
|
|
2347
|
+
plot_data[self.weight_nme],
|
|
2348
|
+
alpha=0.5, color='seagreen')
|
|
2349
|
+
plt.yticks(fontsize=6)
|
|
2350
|
+
plt.margins(0.05)
|
|
2351
|
+
plt.subplots_adjust(wspace=0.3)
|
|
2352
|
+
save_path = self.output_manager.plot_path(
|
|
2353
|
+
f'00_{self.model_nme}_{group_col}_oneway.png')
|
|
2354
|
+
plt.savefig(save_path, dpi=300)
|
|
2355
|
+
plt.close(fig)
|
|
2356
|
+
|
|
2357
|
+
# 定义通用优化函数
|
|
2358
|
+
def optimize_model(self, model_key: str, max_evals: int = 100):
|
|
2359
|
+
if model_key not in self.trainers:
|
|
2360
|
+
print(f"Warning: Unknown model key: {model_key}")
|
|
2361
|
+
return
|
|
2362
|
+
|
|
2363
|
+
trainer = self.trainers[model_key]
|
|
2364
|
+
trainer.tune(max_evals)
|
|
2365
|
+
trainer.train()
|
|
2366
|
+
|
|
2367
|
+
# Update context attributes for backward compatibility
|
|
2368
|
+
setattr(self, f"{model_key}_best", trainer.model)
|
|
2369
|
+
setattr(self, f"best_{model_key}_params", trainer.best_params)
|
|
2370
|
+
setattr(self, f"best_{model_key}_trial", trainer.best_trial)
|
|
2371
|
+
|
|
2372
|
+
# 定义GLM贝叶斯优化函数
|
|
2373
|
+
def bayesopt_glm(self, max_evals=50):
|
|
2374
|
+
self.optimize_model('glm', max_evals)
|
|
2375
|
+
|
|
2376
|
+
# 定义Xgboost贝叶斯优化函数
|
|
2377
|
+
def bayesopt_xgb(self, max_evals=100):
|
|
2378
|
+
self.optimize_model('xgb', max_evals)
|
|
2379
|
+
|
|
2380
|
+
# 定义ResNet贝叶斯优化函数
|
|
2381
|
+
def bayesopt_resnet(self, max_evals=100):
|
|
2382
|
+
self.optimize_model('resn', max_evals)
|
|
2383
|
+
|
|
2384
|
+
# 定义 FT-Transformer 贝叶斯优化函数
|
|
2385
|
+
def bayesopt_ft(self, max_evals=50):
|
|
2386
|
+
self.optimize_model('ft', max_evals)
|
|
2387
|
+
|
|
2388
|
+
# 绘制提纯曲线
|
|
2389
|
+
def plot_lift(self, model_label, pred_nme, n_bins=10):
|
|
2390
|
+
model_map = {
|
|
2391
|
+
'Xgboost': 'pred_xgb',
|
|
2392
|
+
'ResNet': 'pred_resn',
|
|
2393
|
+
'ResNetClassifier': 'pred_resn',
|
|
2394
|
+
'FTTransformer': 'pred_ft',
|
|
2395
|
+
'FTTransformerClassifier': 'pred_ft',
|
|
2396
|
+
'GLM': 'pred_glm'
|
|
2397
|
+
}
|
|
2398
|
+
for k, v in model_map.items():
|
|
2399
|
+
if model_label.startswith(k):
|
|
2400
|
+
pred_nme = v
|
|
2401
|
+
break
|
|
2402
|
+
|
|
2403
|
+
fig = plt.figure(figsize=(11, 5))
|
|
2404
|
+
for pos, (title, data) in zip([121, 122],
|
|
2405
|
+
[('Lift Chart on Train Data', self.train_data),
|
|
2406
|
+
('Lift Chart on Test Data', self.test_data)]):
|
|
2407
|
+
lift_df = pd.DataFrame({
|
|
2408
|
+
'pred': data[pred_nme].values,
|
|
2409
|
+
'w_pred': data[f'w_{pred_nme}'].values,
|
|
2410
|
+
'act': data['w_act'].values,
|
|
2411
|
+
'weight': data[self.weight_nme].values
|
|
2412
|
+
})
|
|
2413
|
+
plot_data = PlotUtils.split_data(lift_df, 'pred', 'weight', n_bins)
|
|
2414
|
+
denom = np.maximum(plot_data['weight'], EPS)
|
|
2415
|
+
plot_data['exp_v'] = plot_data['w_pred'] / denom
|
|
2416
|
+
plot_data['act_v'] = plot_data['act'] / denom
|
|
2417
|
+
plot_data = plot_data.reset_index()
|
|
2418
|
+
|
|
2419
|
+
ax = fig.add_subplot(pos)
|
|
2420
|
+
PlotUtils.plot_lift_ax(ax, plot_data, title)
|
|
2421
|
+
|
|
2422
|
+
plt.subplots_adjust(wspace=0.3)
|
|
2423
|
+
save_path = self.output_manager.plot_path(
|
|
2424
|
+
f'01_{self.model_nme}_{model_label}_lift.png')
|
|
2425
|
+
plt.savefig(save_path, dpi=300)
|
|
2426
|
+
plt.show()
|
|
2427
|
+
plt.close(fig)
|
|
2428
|
+
|
|
2429
|
+
# 绘制双提纯曲线
|
|
2430
|
+
def plot_dlift(self, model_comp: List[str] = ['xgb', 'resn'], n_bins: int = 10) -> None:
|
|
2431
|
+
# 绘制双提纯曲线,对比两个模型在不同分箱下的表现。
|
|
2432
|
+
# Args:
|
|
2433
|
+
# model_comp: 需要对比的模型简称(如 ['xgb', 'resn'],支持 'xgb'/'resn'/'ft')。
|
|
2434
|
+
# n_bins: 分箱数量,用于控制 lift 曲线的粒度。
|
|
2435
|
+
if len(model_comp) != 2:
|
|
2436
|
+
raise ValueError("`model_comp` 必须包含两个模型进行对比。")
|
|
2437
|
+
|
|
2438
|
+
model_name_map = {
|
|
2439
|
+
'xgb': 'Xgboost',
|
|
2440
|
+
'resn': 'ResNet',
|
|
2441
|
+
'ft': 'FTTransformer',
|
|
2442
|
+
'glm': 'GLM'
|
|
2443
|
+
}
|
|
2444
|
+
|
|
2445
|
+
name1, name2 = model_comp
|
|
2446
|
+
if name1 not in model_name_map or name2 not in model_name_map:
|
|
2447
|
+
raise ValueError(f"不支持的模型简称。请从 {list(model_name_map.keys())} 中选择。")
|
|
2448
|
+
|
|
2449
|
+
fig, axes = plt.subplots(1, 2, figsize=(11, 5))
|
|
2450
|
+
datasets = {
|
|
2451
|
+
'Train Data': self.train_data,
|
|
2452
|
+
'Test Data': self.test_data
|
|
2453
|
+
}
|
|
2454
|
+
|
|
2455
|
+
for ax, (data_name, data) in zip(axes, datasets.items()):
|
|
2456
|
+
pred1_col = f'w_pred_{name1}'
|
|
2457
|
+
pred2_col = f'w_pred_{name2}'
|
|
2458
|
+
|
|
2459
|
+
if pred1_col not in data.columns or pred2_col not in data.columns:
|
|
2460
|
+
print(
|
|
2461
|
+
f"警告: 在 {data_name} 中找不到预测列 {pred1_col} 或 {pred2_col}。跳过绘图。")
|
|
2462
|
+
continue
|
|
2463
|
+
|
|
2464
|
+
lift_data = pd.DataFrame({
|
|
2465
|
+
'pred1': data[pred1_col].values,
|
|
2466
|
+
'pred2': data[pred2_col].values,
|
|
2467
|
+
'diff_ly': data[pred1_col].values / np.maximum(data[pred2_col].values, EPS),
|
|
2468
|
+
'act': data['w_act'].values,
|
|
2469
|
+
'weight': data[self.weight_nme].values
|
|
2470
|
+
})
|
|
2471
|
+
plot_data = PlotUtils.split_data(
|
|
2472
|
+
lift_data, 'diff_ly', 'weight', n_bins)
|
|
2473
|
+
denom = np.maximum(plot_data['act'], EPS)
|
|
2474
|
+
plot_data['exp_v1'] = plot_data['pred1'] / denom
|
|
2475
|
+
plot_data['exp_v2'] = plot_data['pred2'] / denom
|
|
2476
|
+
plot_data['act_v'] = plot_data['act'] / denom
|
|
2477
|
+
plot_data.reset_index(inplace=True)
|
|
2478
|
+
|
|
2479
|
+
label1 = model_name_map[name1]
|
|
2480
|
+
label2 = model_name_map[name2]
|
|
2481
|
+
|
|
2482
|
+
PlotUtils.plot_dlift_ax(
|
|
2483
|
+
ax, plot_data, f'Double Lift Chart on {data_name}', label1, label2)
|
|
2484
|
+
|
|
2485
|
+
plt.subplots_adjust(bottom=0.25, top=0.95, right=0.8, wspace=0.3)
|
|
2486
|
+
save_path = self.output_manager.plot_path(
|
|
2487
|
+
f'02_{self.model_nme}_dlift_{name1}_vs_{name2}.png')
|
|
2488
|
+
plt.savefig(save_path, dpi=300)
|
|
2489
|
+
plt.show()
|
|
2490
|
+
plt.close(fig)
|
|
2491
|
+
|
|
2492
|
+
# 绘制成交率提升曲线
|
|
2493
|
+
def plot_conversion_lift(self, model_pred_col: str, n_bins: int = 20):
|
|
2494
|
+
if not self.binary_resp_nme:
|
|
2495
|
+
print("错误: 未在 BayesOptModel 初始化时提供 `binary_resp_nme`。无法绘制成交率曲线。")
|
|
2496
|
+
return
|
|
2497
|
+
|
|
2498
|
+
fig, axes = plt.subplots(1, 2, figsize=(14, 6), sharey=True)
|
|
2499
|
+
datasets = {
|
|
2500
|
+
'Train Data': self.train_data,
|
|
2501
|
+
'Test Data': self.test_data
|
|
2502
|
+
}
|
|
2503
|
+
|
|
2504
|
+
for ax, (data_name, data) in zip(axes, datasets.items()):
|
|
2505
|
+
if model_pred_col not in data.columns:
|
|
2506
|
+
print(f"警告: 在 {data_name} 中找不到预测列 '{model_pred_col}'。跳过绘图。")
|
|
2507
|
+
continue
|
|
2508
|
+
|
|
2509
|
+
# 按模型预测分排序,并计算分箱
|
|
2510
|
+
plot_data = data.sort_values(by=model_pred_col).copy()
|
|
2511
|
+
plot_data['cum_weight'] = plot_data[self.weight_nme].cumsum()
|
|
2512
|
+
total_weight = plot_data[self.weight_nme].sum()
|
|
2513
|
+
|
|
2514
|
+
if total_weight > EPS:
|
|
2515
|
+
plot_data['bin'] = pd.cut(
|
|
2516
|
+
plot_data['cum_weight'],
|
|
2517
|
+
bins=n_bins,
|
|
2518
|
+
labels=False,
|
|
2519
|
+
right=False
|
|
2520
|
+
)
|
|
2521
|
+
else:
|
|
2522
|
+
plot_data['bin'] = 0
|
|
2523
|
+
|
|
2524
|
+
# 按分箱聚合
|
|
2525
|
+
lift_agg = plot_data.groupby('bin').agg(
|
|
2526
|
+
total_weight=(self.weight_nme, 'sum'),
|
|
2527
|
+
actual_conversions=(self.binary_resp_nme, 'sum'),
|
|
2528
|
+
weighted_conversions=('w_binary_act', 'sum'),
|
|
2529
|
+
avg_pred=(model_pred_col, 'mean')
|
|
2530
|
+
).reset_index()
|
|
2531
|
+
|
|
2532
|
+
# 计算成交率
|
|
2533
|
+
lift_agg['conversion_rate'] = lift_agg['weighted_conversions'] / \
|
|
2534
|
+
lift_agg['total_weight']
|
|
2535
|
+
|
|
2536
|
+
# 计算整体平均成交率
|
|
2537
|
+
overall_conversion_rate = data['w_binary_act'].sum(
|
|
2538
|
+
) / data[self.weight_nme].sum()
|
|
2539
|
+
ax.axhline(y=overall_conversion_rate, color='gray', linestyle='--',
|
|
2540
|
+
label=f'Overall Avg Rate ({overall_conversion_rate:.2%})')
|
|
2541
|
+
|
|
2542
|
+
ax.plot(lift_agg['bin'], lift_agg['conversion_rate'],
|
|
2543
|
+
marker='o', linestyle='-', label='Actual Conversion Rate')
|
|
2544
|
+
ax.set_title(f'Conversion Rate Lift Chart on {data_name}')
|
|
2545
|
+
ax.set_xlabel(f'Model Score Decile (based on {model_pred_col})')
|
|
2546
|
+
ax.set_ylabel('Conversion Rate')
|
|
2547
|
+
ax.grid(True, linestyle='--', alpha=0.6)
|
|
2548
|
+
ax.legend()
|
|
2549
|
+
|
|
2550
|
+
plt.tight_layout()
|
|
2551
|
+
plt.show()
|
|
2552
|
+
|
|
2553
|
+
# 保存模型
|
|
2554
|
+
def save_model(self, model_name=None):
|
|
2555
|
+
keys = [model_name] if model_name else self.trainers.keys()
|
|
2556
|
+
for key in keys:
|
|
2557
|
+
if key in self.trainers:
|
|
2558
|
+
self.trainers[key].save()
|
|
2559
|
+
else:
|
|
2560
|
+
if model_name: # Only warn if specific model requested
|
|
2561
|
+
print(f"[save_model] Warning: Unknown model key {key}")
|
|
2562
|
+
|
|
2563
|
+
def load_model(self, model_name=None):
|
|
2564
|
+
keys = [model_name] if model_name else self.trainers.keys()
|
|
2565
|
+
for key in keys:
|
|
2566
|
+
if key in self.trainers:
|
|
2567
|
+
self.trainers[key].load()
|
|
2568
|
+
# Update context attributes
|
|
2569
|
+
trainer = self.trainers[key]
|
|
2570
|
+
if trainer.model is not None:
|
|
2571
|
+
setattr(self, f"{key}_best", trainer.model)
|
|
2572
|
+
# Also update xxx_load for backward compatibility if needed
|
|
2573
|
+
# Original code had xgb_load, resn_load, ft_load but not glm_load
|
|
2574
|
+
if key in ['xgb', 'resn', 'ft']:
|
|
2575
|
+
setattr(self, f"{key}_load", trainer.model)
|
|
2576
|
+
else:
|
|
2577
|
+
if model_name:
|
|
2578
|
+
print(f"[load_model] Warning: Unknown model key {key}")
|
|
2579
|
+
|
|
2580
|
+
def _sample_rows(self, data: pd.DataFrame, n: int) -> pd.DataFrame:
|
|
2581
|
+
if len(data) == 0:
|
|
2582
|
+
return data
|
|
2583
|
+
return data.sample(min(len(data), n), random_state=self.rand_seed)
|
|
2584
|
+
|
|
2585
|
+
@staticmethod
|
|
2586
|
+
def _shap_nsamples(arr: np.ndarray, max_nsamples: int = 300) -> int:
|
|
2587
|
+
min_needed = arr.shape[1] + 2
|
|
2588
|
+
return max(min_needed, min(max_nsamples, arr.shape[0] * arr.shape[1]))
|
|
2589
|
+
|
|
2590
|
+
def _build_ft_shap_matrix(self, data: pd.DataFrame) -> np.ndarray:
|
|
2591
|
+
|
|
2592
|
+
# 将原始特征 DataFrame (包含 self.factor_nmes) 转成
|
|
2593
|
+
# 纯数值矩阵: 数值列为 float64,类别列为整数 code(float64 存储)。
|
|
2594
|
+
# 列顺序与 self.factor_nmes 保持一致。
|
|
2595
|
+
|
|
2596
|
+
matrices = []
|
|
2597
|
+
|
|
2598
|
+
for col in self.factor_nmes:
|
|
2599
|
+
s = data[col]
|
|
2600
|
+
|
|
2601
|
+
if col in self.cate_list:
|
|
2602
|
+
# 类别列:按训练时的类别全集编码
|
|
2603
|
+
cats = pd.Categorical(
|
|
2604
|
+
s,
|
|
2605
|
+
categories=self.cat_categories_for_shap[col]
|
|
2606
|
+
)
|
|
2607
|
+
# cats.codes 是一个 Index / ndarray,用 np.asarray 包一下再 reshape
|
|
2608
|
+
codes = np.asarray(cats.codes, dtype=np.float64).reshape(-1, 1)
|
|
2609
|
+
matrices.append(codes)
|
|
2610
|
+
else:
|
|
2611
|
+
# 数值列:转成 Series -> numpy -> reshape
|
|
2612
|
+
vals = pd.to_numeric(s, errors="coerce")
|
|
2613
|
+
arr = vals.to_numpy(dtype=np.float64, copy=True).reshape(-1, 1)
|
|
2614
|
+
matrices.append(arr)
|
|
2615
|
+
|
|
2616
|
+
X_mat = np.concatenate(matrices, axis=1) # (N, F)
|
|
2617
|
+
return X_mat
|
|
2618
|
+
|
|
2619
|
+
def _decode_ft_shap_matrix_to_df(self, X_mat: np.ndarray) -> pd.DataFrame:
|
|
2620
|
+
|
|
2621
|
+
# 将 SHAP 的数值矩阵 (N, F) 还原为原始特征 DataFrame,
|
|
2622
|
+
# 数值列为 float,类别列还原为 pandas 的 category 类型,
|
|
2623
|
+
# 以便兼容 enable_categorical=True 的 XGBoost 和 FT-Transformer 的输入。
|
|
2624
|
+
# 列顺序 = self.factor_nmes
|
|
2625
|
+
|
|
2626
|
+
data_dict = {}
|
|
2627
|
+
|
|
2628
|
+
for j, col in enumerate(self.factor_nmes):
|
|
2629
|
+
col_vals = X_mat[:, j]
|
|
2630
|
+
|
|
2631
|
+
if col in self.cate_list:
|
|
2632
|
+
cats = self.cat_categories_for_shap[col]
|
|
2633
|
+
|
|
2634
|
+
# SHAP 会扰动成小数,这里 round 回整数 code
|
|
2635
|
+
codes = np.round(col_vals).astype(int)
|
|
2636
|
+
# 限制在 [-1, len(cats)-1]
|
|
2637
|
+
codes = np.clip(codes, -1, len(cats) - 1)
|
|
2638
|
+
|
|
2639
|
+
# 使用 pandas.Categorical.from_codes:
|
|
2640
|
+
# - codes = -1 被当成缺失 (NaN)
|
|
2641
|
+
# - 其他索引映射到 cats 中对应的类别
|
|
2642
|
+
cat_series = pd.Categorical.from_codes(
|
|
2643
|
+
codes,
|
|
2644
|
+
categories=cats
|
|
2645
|
+
)
|
|
2646
|
+
# 存的是 Categorical 类型,而不是 object
|
|
2647
|
+
data_dict[col] = cat_series
|
|
2648
|
+
else:
|
|
2649
|
+
# 数值列:直接 float
|
|
2650
|
+
data_dict[col] = col_vals.astype(float)
|
|
2651
|
+
|
|
2652
|
+
df = pd.DataFrame(data_dict, columns=self.factor_nmes)
|
|
2653
|
+
|
|
2654
|
+
# 再保险:确保所有类别列 dtype 真的是 category
|
|
2655
|
+
for col in self.cate_list:
|
|
2656
|
+
if col in df.columns:
|
|
2657
|
+
df[col] = df[col].astype("category")
|
|
2658
|
+
return df
|
|
2659
|
+
|
|
2660
|
+
def _build_glm_design(self, data: pd.DataFrame) -> pd.DataFrame:
|
|
2661
|
+
# 与 GLM 训练阶段一致:在 one-hot + 标准化特征上添加截距
|
|
2662
|
+
X = data[self.var_nmes]
|
|
2663
|
+
return sm.add_constant(X, has_constant='add')
|
|
2664
|
+
|
|
2665
|
+
def _compute_shap_core(self,
|
|
2666
|
+
model_key: str,
|
|
2667
|
+
n_background: int,
|
|
2668
|
+
n_samples: int,
|
|
2669
|
+
on_train: bool,
|
|
2670
|
+
X_df: pd.DataFrame,
|
|
2671
|
+
prep_fn,
|
|
2672
|
+
predict_fn,
|
|
2673
|
+
cleanup_fn=None):
|
|
2674
|
+
# 通用的 SHAP 计算核心逻辑:配置背景样本、构建解释器并返回结果。
|
|
2675
|
+
if model_key not in self.trainers or self.trainers[model_key].model is None:
|
|
2676
|
+
raise RuntimeError(f"Model {model_key} not trained.")
|
|
2677
|
+
|
|
2678
|
+
if cleanup_fn:
|
|
2679
|
+
cleanup_fn()
|
|
2680
|
+
|
|
2681
|
+
# Background
|
|
2682
|
+
bg_df = self._sample_rows(X_df, n_background)
|
|
2683
|
+
bg_mat = prep_fn(bg_df)
|
|
2684
|
+
|
|
2685
|
+
# Explainer
|
|
2686
|
+
explainer = shap.KernelExplainer(predict_fn, bg_mat)
|
|
2687
|
+
|
|
2688
|
+
# Explain data
|
|
2689
|
+
ex_df = self._sample_rows(X_df, n_samples)
|
|
2690
|
+
ex_mat = prep_fn(ex_df)
|
|
2691
|
+
|
|
2692
|
+
nsample_eff = self._shap_nsamples(ex_mat)
|
|
2693
|
+
shap_values = explainer.shap_values(ex_mat, nsamples=nsample_eff)
|
|
2694
|
+
|
|
2695
|
+
# Base value
|
|
2696
|
+
bg_pred = predict_fn(bg_mat)
|
|
2697
|
+
base_value = float(np.asarray(bg_pred).mean())
|
|
2698
|
+
|
|
2699
|
+
return {
|
|
2700
|
+
"explainer": explainer,
|
|
2701
|
+
"X_explain": ex_df,
|
|
2702
|
+
"shap_values": shap_values,
|
|
2703
|
+
"base_value": base_value
|
|
2704
|
+
}
|
|
2705
|
+
|
|
2706
|
+
# ========= XGBoost SHAP =========
|
|
2707
|
+
def compute_shap_xgb(self, n_background: int = 500,
|
|
2708
|
+
n_samples: int = 200,
|
|
2709
|
+
on_train: bool = True):
|
|
2710
|
+
data = self.train_data if on_train else self.test_data
|
|
2711
|
+
X_raw = data[self.factor_nmes]
|
|
2712
|
+
|
|
2713
|
+
def predict_wrapper(x_mat):
|
|
2714
|
+
df_input = self._decode_ft_shap_matrix_to_df(x_mat)
|
|
2715
|
+
return self.xgb_best.predict(df_input)
|
|
2716
|
+
|
|
2717
|
+
self.shap_xgb = self._compute_shap_core(
|
|
2718
|
+
'xgb', n_background, n_samples, on_train,
|
|
2719
|
+
X_df=X_raw,
|
|
2720
|
+
prep_fn=lambda df: self._build_ft_shap_matrix(
|
|
2721
|
+
df).astype(np.float64),
|
|
2722
|
+
predict_fn=predict_wrapper
|
|
2723
|
+
)
|
|
2724
|
+
return self.shap_xgb
|
|
2725
|
+
|
|
2726
|
+
# ========= ResNet SHAP =========
|
|
2727
|
+
def _resn_predict_wrapper(self, X_np):
|
|
2728
|
+
# 保证走 CPU
|
|
2729
|
+
model = self.resn_best.resnet.to("cpu")
|
|
2730
|
+
with torch.no_grad():
|
|
2731
|
+
X_tensor = torch.tensor(X_np, dtype=torch.float32)
|
|
2732
|
+
y_pred = model(X_tensor).cpu().numpy()
|
|
2733
|
+
y_pred = np.clip(y_pred, 1e-6, None)
|
|
2734
|
+
return y_pred.reshape(-1)
|
|
2735
|
+
|
|
2736
|
+
def compute_shap_resn(self, n_background: int = 500,
|
|
2737
|
+
n_samples: int = 200,
|
|
2738
|
+
on_train: bool = True):
|
|
2739
|
+
data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
|
|
2740
|
+
X = data[self.var_nmes]
|
|
2741
|
+
|
|
2742
|
+
def cleanup():
|
|
2743
|
+
self.resn_best.device = torch.device("cpu")
|
|
2744
|
+
self.resn_best.resnet.to("cpu")
|
|
2745
|
+
if torch.cuda.is_available():
|
|
2746
|
+
torch.cuda.empty_cache()
|
|
2747
|
+
|
|
2748
|
+
self.shap_resn = self._compute_shap_core(
|
|
2749
|
+
'resn', n_background, n_samples, on_train,
|
|
2750
|
+
X_df=X,
|
|
2751
|
+
prep_fn=lambda df: df.to_numpy(dtype=np.float64),
|
|
2752
|
+
predict_fn=lambda x: self._resn_predict_wrapper(x),
|
|
2753
|
+
cleanup_fn=cleanup
|
|
2754
|
+
)
|
|
2755
|
+
return self.shap_resn
|
|
2756
|
+
|
|
2757
|
+
# ========= FT-Transformer SHAP =========
|
|
2758
|
+
def _ft_shap_predict_wrapper(self, X_mat: np.ndarray) -> np.ndarray:
|
|
2759
|
+
df_input = self._decode_ft_shap_matrix_to_df(X_mat)
|
|
2760
|
+
y_pred = self.ft_best.predict(df_input)
|
|
2761
|
+
return np.asarray(y_pred, dtype=np.float64).reshape(-1)
|
|
2762
|
+
|
|
2763
|
+
def compute_shap_ft(self, n_background: int = 500,
|
|
2764
|
+
n_samples: int = 200,
|
|
2765
|
+
on_train: bool = True):
|
|
2766
|
+
data = self.train_data if on_train else self.test_data
|
|
2767
|
+
X_raw = data[self.factor_nmes]
|
|
2768
|
+
|
|
2769
|
+
def cleanup():
|
|
2770
|
+
self.ft_best.device = torch.device("cpu")
|
|
2771
|
+
self.ft_best.ft.to("cpu")
|
|
2772
|
+
if torch.cuda.is_available():
|
|
2773
|
+
torch.cuda.empty_cache()
|
|
2774
|
+
|
|
2775
|
+
self.shap_ft = self._compute_shap_core(
|
|
2776
|
+
'ft', n_background, n_samples, on_train,
|
|
2777
|
+
X_df=X_raw,
|
|
2778
|
+
prep_fn=lambda df: self._build_ft_shap_matrix(
|
|
2779
|
+
df).astype(np.float64),
|
|
2780
|
+
predict_fn=self._ft_shap_predict_wrapper,
|
|
2781
|
+
cleanup_fn=cleanup
|
|
2782
|
+
)
|
|
2783
|
+
return self.shap_ft
|
|
2784
|
+
|
|
2785
|
+
# ========= GLM SHAP =========
|
|
2786
|
+
def compute_shap_glm(self, n_background: int = 500,
|
|
2787
|
+
n_samples: int = 200,
|
|
2788
|
+
on_train: bool = True):
|
|
2789
|
+
data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
|
|
2790
|
+
design_all = self._build_glm_design(data)
|
|
2791
|
+
design_cols = list(design_all.columns)
|
|
2792
|
+
|
|
2793
|
+
matrices = []
|
|
2794
|
+
|
|
2795
|
+
for col in self.factor_nmes:
|
|
2796
|
+
s = data[col]
|
|
2797
|
+
|
|
2798
|
+
if col in self.cate_list:
|
|
2799
|
+
# 类别列:按训练时的类别全集编码
|
|
2800
|
+
cats = pd.Categorical(
|
|
2801
|
+
s,
|
|
2802
|
+
categories=self.cat_categories_for_shap[col]
|
|
2803
|
+
)
|
|
2804
|
+
# cats.codes 是一个 Index / ndarray,用 np.asarray 包一下再 reshape
|
|
2805
|
+
codes = np.asarray(cats.codes, dtype=np.float64).reshape(-1, 1)
|
|
2806
|
+
matrices.append(codes)
|
|
2807
|
+
else:
|
|
2808
|
+
# 数值列:转成 Series -> numpy -> reshape
|
|
2809
|
+
vals = pd.to_numeric(s, errors="coerce")
|
|
2810
|
+
arr = vals.to_numpy(dtype=np.float64, copy=True).reshape(-1, 1)
|
|
2811
|
+
matrices.append(arr)
|
|
2812
|
+
|
|
2813
|
+
X_mat = np.concatenate(matrices, axis=1) # (N, F)
|
|
2814
|
+
return X_mat
|
|
2815
|
+
|
|
2816
|
+
def _decode_ft_shap_matrix_to_df(self, X_mat: np.ndarray) -> pd.DataFrame:
|
|
2817
|
+
|
|
2818
|
+
# 将 SHAP 的数值矩阵 (N, F) 还原为原始特征 DataFrame,
|
|
2819
|
+
# 数值列为 float,类别列还原为 pandas 的 category 类型,
|
|
2820
|
+
# 以便兼容 enable_categorical=True 的 XGBoost 和 FT-Transformer 的输入。
|
|
2821
|
+
# 列顺序 = self.factor_nmes
|
|
2822
|
+
|
|
2823
|
+
data_dict = {}
|
|
2824
|
+
|
|
2825
|
+
for j, col in enumerate(self.factor_nmes):
|
|
2826
|
+
col_vals = X_mat[:, j]
|
|
2827
|
+
|
|
2828
|
+
if col in self.cate_list:
|
|
2829
|
+
cats = self.cat_categories_for_shap[col]
|
|
2830
|
+
|
|
2831
|
+
# SHAP 会扰动成小数,这里 round 回整数 code
|
|
2832
|
+
codes = np.round(col_vals).astype(int)
|
|
2833
|
+
# 限制在 [-1, len(cats)-1]
|
|
2834
|
+
codes = np.clip(codes, -1, len(cats) - 1)
|
|
2835
|
+
|
|
2836
|
+
# 使用 pandas.Categorical.from_codes:
|
|
2837
|
+
# - codes = -1 被当成缺失 (NaN)
|
|
2838
|
+
# - 其他索引映射到 cats 中对应的类别
|
|
2839
|
+
cat_series = pd.Categorical.from_codes(
|
|
2840
|
+
codes,
|
|
2841
|
+
categories=cats
|
|
2842
|
+
)
|
|
2843
|
+
# 存的是 Categorical 类型,而不是 object
|
|
2844
|
+
data_dict[col] = cat_series
|
|
2845
|
+
else:
|
|
2846
|
+
# 数值列:直接 float
|
|
2847
|
+
data_dict[col] = col_vals.astype(float)
|
|
2848
|
+
|
|
2849
|
+
df = pd.DataFrame(data_dict, columns=self.factor_nmes)
|
|
2850
|
+
|
|
2851
|
+
# 再保险:确保所有类别列 dtype 真的是 category
|
|
2852
|
+
for col in self.cate_list:
|
|
2853
|
+
if col in df.columns:
|
|
2854
|
+
df[col] = df[col].astype("category")
|
|
2855
|
+
return df
|
|
2856
|
+
|
|
2857
|
+
def _build_glm_design(self, data: pd.DataFrame) -> pd.DataFrame:
|
|
2858
|
+
# 与 GLM 训练阶段一致:在 one-hot + 标准化特征上添加截距
|
|
2859
|
+
X = data[self.var_nmes]
|
|
2860
|
+
return sm.add_constant(X, has_constant='add')
|
|
2861
|
+
|
|
2862
|
+
def _compute_shap_core(self,
|
|
2863
|
+
model_key: str,
|
|
2864
|
+
n_background: int,
|
|
2865
|
+
n_samples: int,
|
|
2866
|
+
on_train: bool,
|
|
2867
|
+
X_df: pd.DataFrame,
|
|
2868
|
+
prep_fn,
|
|
2869
|
+
predict_fn,
|
|
2870
|
+
cleanup_fn=None):
|
|
2871
|
+
# 通用的 SHAP 计算核心逻辑:配置背景样本、构建解释器并返回结果。
|
|
2872
|
+
if model_key not in self.trainers or self.trainers[model_key].model is None:
|
|
2873
|
+
raise RuntimeError(f"Model {model_key} not trained.")
|
|
2874
|
+
|
|
2875
|
+
if cleanup_fn:
|
|
2876
|
+
cleanup_fn()
|
|
2877
|
+
|
|
2878
|
+
# Background
|
|
2879
|
+
bg_df = self._sample_rows(X_df, n_background)
|
|
2880
|
+
bg_mat = prep_fn(bg_df)
|
|
2881
|
+
|
|
2882
|
+
# Explainer
|
|
2883
|
+
explainer = shap.KernelExplainer(predict_fn, bg_mat)
|
|
2884
|
+
|
|
2885
|
+
# Explain data
|
|
2886
|
+
ex_df = self._sample_rows(X_df, n_samples)
|
|
2887
|
+
ex_mat = prep_fn(ex_df)
|
|
2888
|
+
|
|
2889
|
+
nsample_eff = self._shap_nsamples(ex_mat)
|
|
2890
|
+
shap_values = explainer.shap_values(ex_mat, nsamples=nsample_eff)
|
|
2891
|
+
|
|
2892
|
+
# Base value
|
|
2893
|
+
bg_pred = predict_fn(bg_mat)
|
|
2894
|
+
base_value = float(np.asarray(bg_pred).mean())
|
|
2895
|
+
|
|
2896
|
+
return {
|
|
2897
|
+
"explainer": explainer,
|
|
2898
|
+
"X_explain": ex_df,
|
|
2899
|
+
"shap_values": shap_values,
|
|
2900
|
+
"base_value": base_value
|
|
2901
|
+
}
|
|
2902
|
+
|
|
2903
|
+
# ========= XGBoost SHAP =========
|
|
2904
|
+
def compute_shap_xgb(self, n_background: int = 500,
|
|
2905
|
+
n_samples: int = 200,
|
|
2906
|
+
on_train: bool = True):
|
|
2907
|
+
data = self.train_data if on_train else self.test_data
|
|
2908
|
+
X_raw = data[self.factor_nmes]
|
|
2909
|
+
|
|
2910
|
+
def predict_wrapper(x_mat):
|
|
2911
|
+
df_input = self._decode_ft_shap_matrix_to_df(x_mat)
|
|
2912
|
+
return self.xgb_best.predict(df_input)
|
|
2913
|
+
|
|
2914
|
+
self.shap_xgb = self._compute_shap_core(
|
|
2915
|
+
'xgb', n_background, n_samples, on_train,
|
|
2916
|
+
X_df=X_raw,
|
|
2917
|
+
prep_fn=lambda df: self._build_ft_shap_matrix(
|
|
2918
|
+
df).astype(np.float64),
|
|
2919
|
+
predict_fn=predict_wrapper
|
|
2920
|
+
)
|
|
2921
|
+
return self.shap_xgb
|
|
2922
|
+
|
|
2923
|
+
# ========= ResNet SHAP =========
|
|
2924
|
+
def _resn_predict_wrapper(self, X_np):
|
|
2925
|
+
# 保证走 CPU
|
|
2926
|
+
model = self.resn_best.resnet.to("cpu")
|
|
2927
|
+
with torch.no_grad():
|
|
2928
|
+
X_tensor = torch.tensor(X_np, dtype=torch.float32)
|
|
2929
|
+
y_pred = model(X_tensor).cpu().numpy()
|
|
2930
|
+
y_pred = np.clip(y_pred, 1e-6, None)
|
|
2931
|
+
return y_pred.reshape(-1)
|
|
2932
|
+
|
|
2933
|
+
def compute_shap_resn(self, n_background: int = 500,
|
|
2934
|
+
n_samples: int = 200,
|
|
2935
|
+
on_train: bool = True):
|
|
2936
|
+
data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
|
|
2937
|
+
X = data[self.var_nmes]
|
|
2938
|
+
|
|
2939
|
+
def cleanup():
|
|
2940
|
+
self.resn_best.device = torch.device("cpu")
|
|
2941
|
+
self.resn_best.resnet.to("cpu")
|
|
2942
|
+
if torch.cuda.is_available():
|
|
2943
|
+
torch.cuda.empty_cache()
|
|
2944
|
+
|
|
2945
|
+
self.shap_resn = self._compute_shap_core(
|
|
2946
|
+
'resn', n_background, n_samples, on_train,
|
|
2947
|
+
X_df=X,
|
|
2948
|
+
prep_fn=lambda df: df.to_numpy(dtype=np.float64),
|
|
2949
|
+
predict_fn=lambda x: self._resn_predict_wrapper(x),
|
|
2950
|
+
cleanup_fn=cleanup
|
|
2951
|
+
)
|
|
2952
|
+
return self.shap_resn
|
|
2953
|
+
|
|
2954
|
+
# ========= FT-Transformer SHAP =========
|
|
2955
|
+
def _ft_shap_predict_wrapper(self, X_mat: np.ndarray) -> np.ndarray:
|
|
2956
|
+
df_input = self._decode_ft_shap_matrix_to_df(X_mat)
|
|
2957
|
+
y_pred = self.ft_best.predict(df_input)
|
|
2958
|
+
return np.asarray(y_pred, dtype=np.float64).reshape(-1)
|
|
2959
|
+
|
|
2960
|
+
def compute_shap_ft(self, n_background: int = 500,
|
|
2961
|
+
n_samples: int = 200,
|
|
2962
|
+
on_train: bool = True):
|
|
2963
|
+
data = self.train_data if on_train else self.test_data
|
|
2964
|
+
X_raw = data[self.factor_nmes]
|
|
2965
|
+
|
|
2966
|
+
def cleanup():
|
|
2967
|
+
self.ft_best.device = torch.device("cpu")
|
|
2968
|
+
self.ft_best.ft.to("cpu")
|
|
2969
|
+
if torch.cuda.is_available():
|
|
2970
|
+
torch.cuda.empty_cache()
|
|
2971
|
+
|
|
2972
|
+
self.shap_ft = self._compute_shap_core(
|
|
2973
|
+
'ft', n_background, n_samples, on_train,
|
|
2974
|
+
X_df=X_raw,
|
|
2975
|
+
prep_fn=lambda df: self._build_ft_shap_matrix(
|
|
2976
|
+
df).astype(np.float64),
|
|
2977
|
+
predict_fn=self._ft_shap_predict_wrapper,
|
|
2978
|
+
cleanup_fn=cleanup
|
|
2979
|
+
)
|
|
2980
|
+
return self.shap_ft
|
|
2981
|
+
|
|
2982
|
+
# ========= GLM SHAP =========
|
|
2983
|
+
def compute_shap_glm(self, n_background: int = 500,
|
|
2984
|
+
n_samples: int = 200,
|
|
2985
|
+
on_train: bool = True):
|
|
2986
|
+
data = self.train_oht_scl_data if on_train else self.test_oht_scl_data
|
|
2987
|
+
design_all = self._build_glm_design(data)
|
|
2988
|
+
design_cols = list(design_all.columns)
|
|
2989
|
+
|
|
2990
|
+
def predict_wrapper(x_np):
|
|
2991
|
+
x_df = pd.DataFrame(x_np, columns=design_cols)
|
|
2992
|
+
y_pred = self.glm_best.predict(x_df)
|
|
2993
|
+
return np.asarray(y_pred, dtype=np.float64).reshape(-1)
|
|
2994
|
+
|
|
2995
|
+
res = self._compute_shap_core(
|
|
2996
|
+
'glm', n_background, n_samples, on_train,
|
|
2997
|
+
X_df=design_all,
|
|
2998
|
+
prep_fn=lambda df: df.to_numpy(dtype=np.float64),
|
|
2999
|
+
predict_fn=predict_wrapper
|
|
3000
|
+
)
|
|
3001
|
+
return res
|