ins-pricing 0.3.3__tar.gz → 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/PKG-INFO +162 -162
- ins_pricing-0.4.0/ins_pricing/docs/LOSS_FUNCTIONS.md +78 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/docs/modelling/BayesOpt_USAGE.md +3 -3
- ins_pricing-0.4.0/ins_pricing/frontend/QUICKSTART.md +152 -0
- ins_pricing-0.4.0/ins_pricing/frontend/README.md +388 -0
- ins_pricing-0.4.0/ins_pricing/frontend/__init__.py +10 -0
- ins_pricing-0.4.0/ins_pricing/frontend/app.py +903 -0
- ins_pricing-0.4.0/ins_pricing/frontend/config_builder.py +352 -0
- ins_pricing-0.4.0/ins_pricing/frontend/example_config.json +36 -0
- ins_pricing-0.4.0/ins_pricing/frontend/example_workflows.py +979 -0
- ins_pricing-0.4.0/ins_pricing/frontend/ft_workflow.py +316 -0
- ins_pricing-0.4.0/ins_pricing/frontend/runner.py +388 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/config_preprocess.py +12 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/core.py +21 -8
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/models/model_ft_trainer.py +16 -6
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/models/model_gnn.py +16 -6
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/models/model_resn.py +16 -7
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_base.py +2 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_ft.py +25 -8
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_glm.py +14 -11
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_gnn.py +29 -10
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_resn.py +28 -12
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/trainer_xgb.py +13 -14
- ins_pricing-0.4.0/ins_pricing/modelling/core/bayesopt/utils/losses.py +129 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/metrics_and_devices.py +18 -3
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/torch_trainer_mixin.py +24 -3
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/predict.py +693 -635
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/setup.py +1 -1
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/metrics.py +27 -3
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing.egg-info/PKG-INFO +162 -162
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing.egg-info/SOURCES.txt +11 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/pyproject.toml +1 -1
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/setup.cfg +4 -4
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/MANIFEST.in +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/CHANGELOG.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/RELEASE_NOTES_0.2.8.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/BayesOpt_entry.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/BayesOpt_incremental.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/Explain_Run.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/Explain_entry.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/Pricing_Run.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/bayesopt_entry_runner.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/cli_common.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/cli_config.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/evaluation_context.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/import_resolver.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/notebook_utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/utils/run_logging.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/cli/watchdog_run.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/docs/modelling/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/exceptions.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/approval.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/audit.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/registry.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/governance/release.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/BayesOpt.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/PHASE2_REFACTORING_SUMMARY.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/PHASE3_REFACTORING_SUMMARY.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/REFACTORING_SUMMARY.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/config_components.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/model_explain_mixin.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/model_plotting_mixin.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/models/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/models/model_ft_components.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/trainers/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/constants.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/distributed_utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils/io_utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/bayesopt/utils_backup.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/core/evaluation.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/explain/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/explain/gradients.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/explain/metrics.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/explain/permutation.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/explain/shap_utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/common.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/curves.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/diagnostics.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/geo.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/modelling/plotting/importance.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/calibration.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/data_quality.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/exposure.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/factors.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/monitoring.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/pricing/rate_table.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/drift.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/monitoring.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/preprocess.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/production/scoring.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/reporting/README.md +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/reporting/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/reporting/report_builder.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/reporting/scheduler.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/governance/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/governance/test_audit.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/governance/test_registry.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/governance/test_release.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/conftest.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_cross_val_generic.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_distributed_utils.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_explain.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_geo_tokens_split.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_graph_cache.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_plotting.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_plotting_library.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/modelling/test_preprocessor.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/pricing/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/pricing/test_calibration.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/pricing/test_exposure.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/pricing/test_factors.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/pricing/test_rate_table.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/production/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/production/test_monitoring.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/production/test_predict.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/production/test_preprocess.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/tests/production/test_scoring.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/__init__.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/device.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/logging.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/paths.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/profiling.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/torch_compat.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing/utils/validation.py +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing.egg-info/dependency_links.txt +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing.egg-info/requires.txt +0 -0
- {ins_pricing-0.3.3 → ins_pricing-0.4.0}/ins_pricing.egg-info/top_level.txt +0 -0
|
@@ -1,162 +1,162 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ins_pricing
|
|
3
|
-
Version: 0.
|
|
4
|
-
Summary: Reusable modelling, pricing, governance, and reporting utilities.
|
|
5
|
-
Author: meishi125478
|
|
6
|
-
License: Proprietary
|
|
7
|
-
Keywords: pricing,insurance,bayesopt,ml
|
|
8
|
-
Classifier: Programming Language :: Python :: 3
|
|
9
|
-
Classifier: Programming Language :: Python :: 3 :: Only
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
-
Classifier: License :: Other/Proprietary License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Classifier: Intended Audience :: Developers
|
|
14
|
-
Requires-Python: >=3.9
|
|
15
|
-
Description-Content-Type: text/markdown
|
|
16
|
-
Requires-Dist: numpy>=1.20
|
|
17
|
-
Requires-Dist: pandas>=1.4
|
|
18
|
-
Provides-Extra: bayesopt
|
|
19
|
-
Requires-Dist: torch>=1.13; extra == "bayesopt"
|
|
20
|
-
Requires-Dist: optuna>=3.0; extra == "bayesopt"
|
|
21
|
-
Requires-Dist: xgboost>=1.6; extra == "bayesopt"
|
|
22
|
-
Requires-Dist: scikit-learn>=1.1; extra == "bayesopt"
|
|
23
|
-
Requires-Dist: statsmodels>=0.13; extra == "bayesopt"
|
|
24
|
-
Requires-Dist: joblib>=1.2; extra == "bayesopt"
|
|
25
|
-
Requires-Dist: matplotlib>=3.5; extra == "bayesopt"
|
|
26
|
-
Provides-Extra: plotting
|
|
27
|
-
Requires-Dist: matplotlib>=3.5; extra == "plotting"
|
|
28
|
-
Requires-Dist: scikit-learn>=1.1; extra == "plotting"
|
|
29
|
-
Provides-Extra: explain
|
|
30
|
-
Requires-Dist: torch>=1.13; extra == "explain"
|
|
31
|
-
Requires-Dist: shap>=0.41; extra == "explain"
|
|
32
|
-
Requires-Dist: scikit-learn>=1.1; extra == "explain"
|
|
33
|
-
Provides-Extra: geo
|
|
34
|
-
Requires-Dist: contextily>=1.3; extra == "geo"
|
|
35
|
-
Requires-Dist: matplotlib>=3.5; extra == "geo"
|
|
36
|
-
Provides-Extra: gnn
|
|
37
|
-
Requires-Dist: torch>=1.13; extra == "gnn"
|
|
38
|
-
Requires-Dist: pynndescent>=0.5; extra == "gnn"
|
|
39
|
-
Requires-Dist: torch-geometric>=2.3; extra == "gnn"
|
|
40
|
-
Provides-Extra: all
|
|
41
|
-
Requires-Dist: torch>=1.13; extra == "all"
|
|
42
|
-
Requires-Dist: optuna>=3.0; extra == "all"
|
|
43
|
-
Requires-Dist: xgboost>=1.6; extra == "all"
|
|
44
|
-
Requires-Dist: scikit-learn>=1.1; extra == "all"
|
|
45
|
-
Requires-Dist: statsmodels>=0.13; extra == "all"
|
|
46
|
-
Requires-Dist: joblib>=1.2; extra == "all"
|
|
47
|
-
Requires-Dist: matplotlib>=3.5; extra == "all"
|
|
48
|
-
Requires-Dist: shap>=0.41; extra == "all"
|
|
49
|
-
Requires-Dist: contextily>=1.3; extra == "all"
|
|
50
|
-
Requires-Dist: pynndescent>=0.5; extra == "all"
|
|
51
|
-
Requires-Dist: torch-geometric>=2.3; extra == "all"
|
|
52
|
-
|
|
53
|
-
# Insurance-Pricing
|
|
54
|
-
|
|
55
|
-
A reusable toolkit for insurance modeling, pricing, governance, and reporting.
|
|
56
|
-
|
|
57
|
-
## Overview
|
|
58
|
-
|
|
59
|
-
Insurance-Pricing (ins_pricing) is an enterprise-grade Python library designed for machine learning model training, pricing calculations, and model governance workflows in the insurance industry.
|
|
60
|
-
|
|
61
|
-
### Core Modules
|
|
62
|
-
|
|
63
|
-
| Module | Description |
|
|
64
|
-
|--------|-------------|
|
|
65
|
-
| **modelling** | ML model training (GLM, XGBoost, ResNet, FT-Transformer, GNN) and model interpretability (SHAP, permutation importance) |
|
|
66
|
-
| **pricing** | Factor table construction, numeric binning, premium calibration, exposure calculation, PSI monitoring |
|
|
67
|
-
| **production** | Model prediction, batch scoring, data drift detection, production metrics monitoring |
|
|
68
|
-
| **governance** | Model registry, version management, approval workflows, audit logging |
|
|
69
|
-
| **reporting** | Report generation (Markdown format), report scheduling |
|
|
70
|
-
| **utils** | Data validation, performance profiling, device management, logging configuration |
|
|
71
|
-
|
|
72
|
-
### Quick Start
|
|
73
|
-
|
|
74
|
-
```python
|
|
75
|
-
# Model training with Bayesian optimization
|
|
76
|
-
from ins_pricing import bayesopt as ropt
|
|
77
|
-
|
|
78
|
-
model = ropt.BayesOptModel(
|
|
79
|
-
train_data, test_data,
|
|
80
|
-
model_name='my_model',
|
|
81
|
-
resp_nme='target',
|
|
82
|
-
weight_nme='weight',
|
|
83
|
-
factor_nmes=feature_list,
|
|
84
|
-
cate_list=categorical_features,
|
|
85
|
-
)
|
|
86
|
-
model.bayesopt_xgb(max_evals=100) # Train XGBoost
|
|
87
|
-
model.bayesopt_resnet(max_evals=50) # Train ResNet
|
|
88
|
-
model.bayesopt_ft(max_evals=50) # Train FT-Transformer
|
|
89
|
-
|
|
90
|
-
# Pricing: build factor table
|
|
91
|
-
from ins_pricing.pricing import build_factor_table
|
|
92
|
-
factors = build_factor_table(
|
|
93
|
-
df,
|
|
94
|
-
factor_col='age_band',
|
|
95
|
-
loss_col='claim_amount',
|
|
96
|
-
exposure_col='exposure',
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
# Production: batch scoring
|
|
100
|
-
from ins_pricing.production import batch_score
|
|
101
|
-
scores = batch_score(model.trainers['xgb'].predict, df)
|
|
102
|
-
|
|
103
|
-
# Model governance
|
|
104
|
-
from ins_pricing.governance import ModelRegistry
|
|
105
|
-
registry = ModelRegistry('models.json')
|
|
106
|
-
registry.register(model_name, version, metrics=metrics)
|
|
107
|
-
```
|
|
108
|
-
|
|
109
|
-
### Project Structure
|
|
110
|
-
|
|
111
|
-
```
|
|
112
|
-
ins_pricing/
|
|
113
|
-
├── cli/ # Command-line entry points
|
|
114
|
-
├── modelling/
|
|
115
|
-
│ ├── core/bayesopt/ # ML model training core
|
|
116
|
-
│ ├── explain/ # Model interpretability
|
|
117
|
-
│ └── plotting/ # Model visualization
|
|
118
|
-
├── pricing/ # Insurance pricing module
|
|
119
|
-
├── production/ # Production deployment module
|
|
120
|
-
├── governance/ # Model governance
|
|
121
|
-
├── reporting/ # Report generation
|
|
122
|
-
├── utils/ # Utilities
|
|
123
|
-
└── tests/ # Test suite
|
|
124
|
-
```
|
|
125
|
-
|
|
126
|
-
### Installation
|
|
127
|
-
|
|
128
|
-
```bash
|
|
129
|
-
# Basic installation
|
|
130
|
-
pip install ins_pricing
|
|
131
|
-
|
|
132
|
-
# Full installation (all optional dependencies)
|
|
133
|
-
pip install ins_pricing[all]
|
|
134
|
-
|
|
135
|
-
# Install specific extras
|
|
136
|
-
pip install ins_pricing[bayesopt] # Model training
|
|
137
|
-
pip install ins_pricing[explain] # Model explanation
|
|
138
|
-
pip install ins_pricing[plotting] # Visualization
|
|
139
|
-
pip install ins_pricing[gnn] # Graph neural networks
|
|
140
|
-
```
|
|
141
|
-
|
|
142
|
-
#### Multi-platform & GPU installation notes
|
|
143
|
-
|
|
144
|
-
- **PyTorch (CPU/GPU/MPS)**: Install the correct PyTorch build for your platform/GPU first (CUDA on
|
|
145
|
-
Linux/Windows, ROCm on supported AMD platforms, or MPS on Apple Silicon). Then install the
|
|
146
|
-
optional extras you need (e.g., `bayesopt`, `explain`, or `gnn`). This avoids pip pulling a
|
|
147
|
-
mismatched wheel.
|
|
148
|
-
- **Torch Geometric (GNN)**: `torch-geometric` often requires platform-specific wheels (e.g.,
|
|
149
|
-
`torch-scatter`, `torch-sparse`). Follow the official PyG installation instructions for your
|
|
150
|
-
CUDA/ROCm/CPU environment, then install `ins_pricing[gnn]`.
|
|
151
|
-
- **Multi-GPU**: Training code will use CUDA when available and can enable multi-GPU via
|
|
152
|
-
`torch.distributed`/`DataParallel` where supported. On Windows, CUDA DDP is not supported and will
|
|
153
|
-
fall back to single-GPU or DataParallel where possible.
|
|
154
|
-
|
|
155
|
-
### Requirements
|
|
156
|
-
|
|
157
|
-
- Python >= 3.9
|
|
158
|
-
- Core dependencies: numpy >= 1.20, pandas >= 1.4
|
|
159
|
-
|
|
160
|
-
### License
|
|
161
|
-
|
|
162
|
-
Proprietary
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ins_pricing
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: Reusable modelling, pricing, governance, and reporting utilities.
|
|
5
|
+
Author: meishi125478
|
|
6
|
+
License: Proprietary
|
|
7
|
+
Keywords: pricing,insurance,bayesopt,ml
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: License :: Other/Proprietary License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Requires-Python: >=3.9
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
Requires-Dist: numpy>=1.20
|
|
17
|
+
Requires-Dist: pandas>=1.4
|
|
18
|
+
Provides-Extra: bayesopt
|
|
19
|
+
Requires-Dist: torch>=1.13; extra == "bayesopt"
|
|
20
|
+
Requires-Dist: optuna>=3.0; extra == "bayesopt"
|
|
21
|
+
Requires-Dist: xgboost>=1.6; extra == "bayesopt"
|
|
22
|
+
Requires-Dist: scikit-learn>=1.1; extra == "bayesopt"
|
|
23
|
+
Requires-Dist: statsmodels>=0.13; extra == "bayesopt"
|
|
24
|
+
Requires-Dist: joblib>=1.2; extra == "bayesopt"
|
|
25
|
+
Requires-Dist: matplotlib>=3.5; extra == "bayesopt"
|
|
26
|
+
Provides-Extra: plotting
|
|
27
|
+
Requires-Dist: matplotlib>=3.5; extra == "plotting"
|
|
28
|
+
Requires-Dist: scikit-learn>=1.1; extra == "plotting"
|
|
29
|
+
Provides-Extra: explain
|
|
30
|
+
Requires-Dist: torch>=1.13; extra == "explain"
|
|
31
|
+
Requires-Dist: shap>=0.41; extra == "explain"
|
|
32
|
+
Requires-Dist: scikit-learn>=1.1; extra == "explain"
|
|
33
|
+
Provides-Extra: geo
|
|
34
|
+
Requires-Dist: contextily>=1.3; extra == "geo"
|
|
35
|
+
Requires-Dist: matplotlib>=3.5; extra == "geo"
|
|
36
|
+
Provides-Extra: gnn
|
|
37
|
+
Requires-Dist: torch>=1.13; extra == "gnn"
|
|
38
|
+
Requires-Dist: pynndescent>=0.5; extra == "gnn"
|
|
39
|
+
Requires-Dist: torch-geometric>=2.3; extra == "gnn"
|
|
40
|
+
Provides-Extra: all
|
|
41
|
+
Requires-Dist: torch>=1.13; extra == "all"
|
|
42
|
+
Requires-Dist: optuna>=3.0; extra == "all"
|
|
43
|
+
Requires-Dist: xgboost>=1.6; extra == "all"
|
|
44
|
+
Requires-Dist: scikit-learn>=1.1; extra == "all"
|
|
45
|
+
Requires-Dist: statsmodels>=0.13; extra == "all"
|
|
46
|
+
Requires-Dist: joblib>=1.2; extra == "all"
|
|
47
|
+
Requires-Dist: matplotlib>=3.5; extra == "all"
|
|
48
|
+
Requires-Dist: shap>=0.41; extra == "all"
|
|
49
|
+
Requires-Dist: contextily>=1.3; extra == "all"
|
|
50
|
+
Requires-Dist: pynndescent>=0.5; extra == "all"
|
|
51
|
+
Requires-Dist: torch-geometric>=2.3; extra == "all"
|
|
52
|
+
|
|
53
|
+
# Insurance-Pricing
|
|
54
|
+
|
|
55
|
+
A reusable toolkit for insurance modeling, pricing, governance, and reporting.
|
|
56
|
+
|
|
57
|
+
## Overview
|
|
58
|
+
|
|
59
|
+
Insurance-Pricing (ins_pricing) is an enterprise-grade Python library designed for machine learning model training, pricing calculations, and model governance workflows in the insurance industry.
|
|
60
|
+
|
|
61
|
+
### Core Modules
|
|
62
|
+
|
|
63
|
+
| Module | Description |
|
|
64
|
+
|--------|-------------|
|
|
65
|
+
| **modelling** | ML model training (GLM, XGBoost, ResNet, FT-Transformer, GNN) and model interpretability (SHAP, permutation importance) |
|
|
66
|
+
| **pricing** | Factor table construction, numeric binning, premium calibration, exposure calculation, PSI monitoring |
|
|
67
|
+
| **production** | Model prediction, batch scoring, data drift detection, production metrics monitoring |
|
|
68
|
+
| **governance** | Model registry, version management, approval workflows, audit logging |
|
|
69
|
+
| **reporting** | Report generation (Markdown format), report scheduling |
|
|
70
|
+
| **utils** | Data validation, performance profiling, device management, logging configuration |
|
|
71
|
+
|
|
72
|
+
### Quick Start
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
# Model training with Bayesian optimization
|
|
76
|
+
from ins_pricing import bayesopt as ropt
|
|
77
|
+
|
|
78
|
+
model = ropt.BayesOptModel(
|
|
79
|
+
train_data, test_data,
|
|
80
|
+
model_name='my_model',
|
|
81
|
+
resp_nme='target',
|
|
82
|
+
weight_nme='weight',
|
|
83
|
+
factor_nmes=feature_list,
|
|
84
|
+
cate_list=categorical_features,
|
|
85
|
+
)
|
|
86
|
+
model.bayesopt_xgb(max_evals=100) # Train XGBoost
|
|
87
|
+
model.bayesopt_resnet(max_evals=50) # Train ResNet
|
|
88
|
+
model.bayesopt_ft(max_evals=50) # Train FT-Transformer
|
|
89
|
+
|
|
90
|
+
# Pricing: build factor table
|
|
91
|
+
from ins_pricing.pricing import build_factor_table
|
|
92
|
+
factors = build_factor_table(
|
|
93
|
+
df,
|
|
94
|
+
factor_col='age_band',
|
|
95
|
+
loss_col='claim_amount',
|
|
96
|
+
exposure_col='exposure',
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Production: batch scoring
|
|
100
|
+
from ins_pricing.production import batch_score
|
|
101
|
+
scores = batch_score(model.trainers['xgb'].predict, df)
|
|
102
|
+
|
|
103
|
+
# Model governance
|
|
104
|
+
from ins_pricing.governance import ModelRegistry
|
|
105
|
+
registry = ModelRegistry('models.json')
|
|
106
|
+
registry.register(model_name, version, metrics=metrics)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### Project Structure
|
|
110
|
+
|
|
111
|
+
```
|
|
112
|
+
ins_pricing/
|
|
113
|
+
├── cli/ # Command-line entry points
|
|
114
|
+
├── modelling/
|
|
115
|
+
│ ├── core/bayesopt/ # ML model training core
|
|
116
|
+
│ ├── explain/ # Model interpretability
|
|
117
|
+
│ └── plotting/ # Model visualization
|
|
118
|
+
├── pricing/ # Insurance pricing module
|
|
119
|
+
├── production/ # Production deployment module
|
|
120
|
+
├── governance/ # Model governance
|
|
121
|
+
├── reporting/ # Report generation
|
|
122
|
+
├── utils/ # Utilities
|
|
123
|
+
└── tests/ # Test suite
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### Installation
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
# Basic installation
|
|
130
|
+
pip install ins_pricing
|
|
131
|
+
|
|
132
|
+
# Full installation (all optional dependencies)
|
|
133
|
+
pip install ins_pricing[all]
|
|
134
|
+
|
|
135
|
+
# Install specific extras
|
|
136
|
+
pip install ins_pricing[bayesopt] # Model training
|
|
137
|
+
pip install ins_pricing[explain] # Model explanation
|
|
138
|
+
pip install ins_pricing[plotting] # Visualization
|
|
139
|
+
pip install ins_pricing[gnn] # Graph neural networks
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
#### Multi-platform & GPU installation notes
|
|
143
|
+
|
|
144
|
+
- **PyTorch (CPU/GPU/MPS)**: Install the correct PyTorch build for your platform/GPU first (CUDA on
|
|
145
|
+
Linux/Windows, ROCm on supported AMD platforms, or MPS on Apple Silicon). Then install the
|
|
146
|
+
optional extras you need (e.g., `bayesopt`, `explain`, or `gnn`). This avoids pip pulling a
|
|
147
|
+
mismatched wheel.
|
|
148
|
+
- **Torch Geometric (GNN)**: `torch-geometric` often requires platform-specific wheels (e.g.,
|
|
149
|
+
`torch-scatter`, `torch-sparse`). Follow the official PyG installation instructions for your
|
|
150
|
+
CUDA/ROCm/CPU environment, then install `ins_pricing[gnn]`.
|
|
151
|
+
- **Multi-GPU**: Training code will use CUDA when available and can enable multi-GPU via
|
|
152
|
+
`torch.distributed`/`DataParallel` where supported. On Windows, CUDA DDP is not supported and will
|
|
153
|
+
fall back to single-GPU or DataParallel where possible.
|
|
154
|
+
|
|
155
|
+
### Requirements
|
|
156
|
+
|
|
157
|
+
- Python >= 3.9
|
|
158
|
+
- Core dependencies: numpy >= 1.20, pandas >= 1.4
|
|
159
|
+
|
|
160
|
+
### License
|
|
161
|
+
|
|
162
|
+
Proprietary
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
LOSS FUNCTIONS
|
|
2
|
+
|
|
3
|
+
Overview
|
|
4
|
+
This document describes the loss-function changes in ins_pricing. The training
|
|
5
|
+
stack now supports multiple regression losses (not just Tweedie deviance) and
|
|
6
|
+
propagates the selected loss into tuning, training, and inference.
|
|
7
|
+
|
|
8
|
+
Supported loss_name values
|
|
9
|
+
- auto (default): keep legacy behavior based on model name
|
|
10
|
+
- tweedie: Tweedie deviance (uses tw_power / tweedie_variance_power when tuning)
|
|
11
|
+
- poisson: Poisson deviance (power=1)
|
|
12
|
+
- gamma: Gamma deviance (power=2)
|
|
13
|
+
- mse: mean squared error
|
|
14
|
+
- mae: mean absolute error
|
|
15
|
+
|
|
16
|
+
Loss name mapping (all options)
|
|
17
|
+
- Tweedie deviance -> tweedie
|
|
18
|
+
- Poisson deviance -> poisson
|
|
19
|
+
- Gamma deviance -> gamma
|
|
20
|
+
- Mean squared error -> mse
|
|
21
|
+
- Mean absolute error -> mae
|
|
22
|
+
- Classification log loss -> logloss (classification only)
|
|
23
|
+
- Classification BCE -> bce (classification only)
|
|
24
|
+
|
|
25
|
+
Classification tasks
|
|
26
|
+
- loss_name can be auto, logloss, or bce
|
|
27
|
+
- training continues to use BCEWithLogits for torch models; evaluation uses logloss
|
|
28
|
+
|
|
29
|
+
Where to set loss_name
|
|
30
|
+
Add to any BayesOpt config JSON:
|
|
31
|
+
|
|
32
|
+
{
|
|
33
|
+
"task_type": "regression",
|
|
34
|
+
"loss_name": "mse"
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
Behavior changes
|
|
38
|
+
1) Tuning and metrics
|
|
39
|
+
- When loss_name is mse/mae, tuning does not sample Tweedie power.
|
|
40
|
+
- When loss_name is poisson/gamma, power is fixed (1.0/2.0).
|
|
41
|
+
- When loss_name is tweedie, power is sampled as before.
|
|
42
|
+
|
|
43
|
+
2) Torch training (ResNet/FT/GNN)
|
|
44
|
+
- Loss computation is routed by loss_name.
|
|
45
|
+
- For tweedie/poisson/gamma, predictions are clamped positive.
|
|
46
|
+
- For mse/mae, no Tweedie power is used.
|
|
47
|
+
|
|
48
|
+
3) XGBoost objective
|
|
49
|
+
- loss_name controls XGB objective:
|
|
50
|
+
- tweedie -> reg:tweedie
|
|
51
|
+
- poisson -> count:poisson
|
|
52
|
+
- gamma -> reg:gamma
|
|
53
|
+
- mse -> reg:squarederror
|
|
54
|
+
- mae -> reg:absoluteerror
|
|
55
|
+
|
|
56
|
+
4) Inference
|
|
57
|
+
- ResNet/GNN constructors now receive loss_name.
|
|
58
|
+
- When loss_name is not tweedie, tw_power is not applied at inference.
|
|
59
|
+
|
|
60
|
+
Legacy defaults (auto)
|
|
61
|
+
- If loss_name is omitted, behavior is unchanged:
|
|
62
|
+
- model name contains "f" -> poisson
|
|
63
|
+
- model name contains "s" -> gamma
|
|
64
|
+
- otherwise -> tweedie
|
|
65
|
+
|
|
66
|
+
Examples
|
|
67
|
+
- ResNet direct training (MSE):
|
|
68
|
+
"loss_name": "mse"
|
|
69
|
+
|
|
70
|
+
- FT embed -> ResNet (MSE):
|
|
71
|
+
"loss_name": "mse"
|
|
72
|
+
|
|
73
|
+
- XGB direct training (unchanged):
|
|
74
|
+
omit loss_name or set "loss_name": "auto"
|
|
75
|
+
|
|
76
|
+
Notes
|
|
77
|
+
- loss_name is global per config. If you need different losses for different
|
|
78
|
+
models, split into separate configs and run them independently.
|
|
@@ -75,13 +75,13 @@ Under `ins_pricing/modelling/core/bayesopt/`:
|
|
|
75
75
|
|
|
76
76
|
1) **Tools and utilities**
|
|
77
77
|
|
|
78
|
-
- `IOUtils / TrainingUtils / PlotUtils`: I/O, training utilities (batch size,
|
|
78
|
+
- `IOUtils / TrainingUtils / PlotUtils`: I/O, training utilities (batch size, loss functions, free_cuda), plotting helpers
|
|
79
79
|
- `DistributedUtils`: DDP init, rank/world_size helpers
|
|
80
80
|
|
|
81
81
|
2) **TorchTrainerMixin (common components for torch tabular training)**
|
|
82
82
|
|
|
83
83
|
- DataLoader: `_build_dataloader()` / `_build_val_dataloader()` (prints batch/accum/workers)
|
|
84
|
-
- Loss: `_compute_losses()` / `_compute_weighted_loss()` (regression
|
|
84
|
+
- Loss: `_compute_losses()` / `_compute_weighted_loss()` (regression supports tweedie/poisson/gamma/mse/mae; classification uses BCEWithLogits)
|
|
85
85
|
- Early stop: `_early_stop_update()`
|
|
86
86
|
|
|
87
87
|
3) **Sklearn-style model classes (core training objects)**
|
|
@@ -292,7 +292,7 @@ FT role is controlled by `ft_role` (from config or CLI `--ft-role`):
|
|
|
292
292
|
### 4.1 Supervised models (GLM/XGB/ResNet/FT-as-model)
|
|
293
293
|
|
|
294
294
|
- `TrainerBase.tune()` calls each trainer's `cross_val()` and minimizes validation metric (default direction `minimize`)
|
|
295
|
-
- Regression
|
|
295
|
+
- Regression loss is configurable (tweedie/poisson/gamma/mse/mae); classification uses logloss
|
|
296
296
|
|
|
297
297
|
### 4.2 FT self-supervised (`unsupervised_embedding`)
|
|
298
298
|
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# Quick Start Guide
|
|
2
|
+
|
|
3
|
+
Get started with the Insurance Pricing Model Training Frontend in 3 easy steps.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
1. Install the `ins_pricing` package
|
|
8
|
+
2. Install Gradio:
|
|
9
|
+
```bash
|
|
10
|
+
pip install gradio>=4.0.0
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Step 1: Launch the Application
|
|
14
|
+
|
|
15
|
+
### On Windows:
|
|
16
|
+
Double-click `start_app.bat` or run:
|
|
17
|
+
```bash
|
|
18
|
+
python -m ins_pricing.frontend.app
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
### On Linux/Mac:
|
|
22
|
+
Run the shell script:
|
|
23
|
+
```bash
|
|
24
|
+
./start_app.sh
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
Or use Python directly:
|
|
28
|
+
```bash
|
|
29
|
+
python -m ins_pricing.frontend.app
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
The web interface will automatically open at `http://localhost:7860`
|
|
33
|
+
|
|
34
|
+
## Step 2: Configure Your Model
|
|
35
|
+
|
|
36
|
+
### Option A: Upload Existing Config (Recommended)
|
|
37
|
+
1. Go to the **Configuration** tab
|
|
38
|
+
2. Click **"Upload JSON Config File"**
|
|
39
|
+
3. Select a config file (e.g., `config_xgb_direct.json` from `examples/`)
|
|
40
|
+
4. Click **"Load Config"**
|
|
41
|
+
|
|
42
|
+
### Option B: Manual Configuration
|
|
43
|
+
1. Go to the **Configuration** tab
|
|
44
|
+
2. Scroll to **"Manual Configuration"**
|
|
45
|
+
3. Fill in the required fields:
|
|
46
|
+
- **Data Directory**: Path to your data folder
|
|
47
|
+
- **Model List**: Model name(s)
|
|
48
|
+
- **Target Column**: Your target variable
|
|
49
|
+
- **Weight Column**: Your weight variable
|
|
50
|
+
- **Feature List**: Comma-separated features
|
|
51
|
+
- **Categorical Features**: Comma-separated categorical features
|
|
52
|
+
4. Adjust other settings as needed
|
|
53
|
+
5. Click **"Build Configuration"**
|
|
54
|
+
|
|
55
|
+
## Step 3: Run Training
|
|
56
|
+
|
|
57
|
+
1. Switch to the **Run Task** tab
|
|
58
|
+
2. Click **"Run Task"**
|
|
59
|
+
3. Watch real-time logs appear below
|
|
60
|
+
|
|
61
|
+
Training will start automatically and logs will update in real-time!
|
|
62
|
+
|
|
63
|
+
## New Features
|
|
64
|
+
|
|
65
|
+
### FT Two-Step Workflow
|
|
66
|
+
|
|
67
|
+
For advanced FT-Transformer → XGB/ResN training:
|
|
68
|
+
|
|
69
|
+
1. **Prepare Base Config**: Create or load a base configuration
|
|
70
|
+
2. **Go to FT Two-Step Workflow tab**
|
|
71
|
+
3. **Step 1 - FT Embedding Generation**:
|
|
72
|
+
- Configure DDP settings
|
|
73
|
+
- Click "Prepare Step 1 Config"
|
|
74
|
+
- Copy the config to Configuration tab
|
|
75
|
+
- Run it in "Run Task" tab
|
|
76
|
+
4. **Step 2 - Train XGB/ResN**:
|
|
77
|
+
- After Step 1 completes, click "Prepare Step 2 Configs"
|
|
78
|
+
- Choose which models to train (XGB, ResN, or both)
|
|
79
|
+
- Copy the generated configs and run them
|
|
80
|
+
|
|
81
|
+
### Open Results Folder
|
|
82
|
+
|
|
83
|
+
- In the **Run Task** tab, click **"📁 Open Results Folder"**
|
|
84
|
+
- Automatically opens the output directory in your file explorer
|
|
85
|
+
- Works on Windows, macOS, and Linux
|
|
86
|
+
|
|
87
|
+
## Example Configuration
|
|
88
|
+
|
|
89
|
+
Here's a minimal example to get started:
|
|
90
|
+
|
|
91
|
+
```json
|
|
92
|
+
{
|
|
93
|
+
"data_dir": "./Data",
|
|
94
|
+
"model_list": ["od"],
|
|
95
|
+
"model_categories": ["bc"],
|
|
96
|
+
"target": "response",
|
|
97
|
+
"weight": "weights",
|
|
98
|
+
"feature_list": ["age", "gender", "region"],
|
|
99
|
+
"categorical_features": ["gender", "region"],
|
|
100
|
+
"runner": {
|
|
101
|
+
"mode": "entry",
|
|
102
|
+
"model_keys": ["xgb"],
|
|
103
|
+
"max_evals": 50
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
Save this as `my_first_config.json` and upload it!
|
|
109
|
+
|
|
110
|
+
## Tips
|
|
111
|
+
|
|
112
|
+
- **Save Your Config**: After building a configuration, save it using the "Save Configuration" button for reuse
|
|
113
|
+
- **Check Logs**: Training logs update in real-time - watch for errors or progress indicators
|
|
114
|
+
- **GPU Usage**: Toggle "Use GPU" checkbox in Training Settings to enable/disable GPU acceleration
|
|
115
|
+
- **Model Selection**: Specify which models to train in "Model Keys" (xgb, resn, ft, gnn)
|
|
116
|
+
- **Open Results**: Use the "📁 Open Results Folder" button to quickly access output files
|
|
117
|
+
- **FT Workflow**: Use the dedicated FT tab for automated two-step FT → XGB/ResN training
|
|
118
|
+
|
|
119
|
+
## Troubleshooting
|
|
120
|
+
|
|
121
|
+
**Problem**: Interface doesn't load
|
|
122
|
+
- **Solution**: Check that port 7860 is not in use, or specify a different port
|
|
123
|
+
|
|
124
|
+
**Problem**: Configuration validation fails
|
|
125
|
+
- **Solution**: Ensure all required fields are filled and feature lists are properly formatted
|
|
126
|
+
|
|
127
|
+
**Problem**: Training doesn't start
|
|
128
|
+
- **Solution**: Verify data paths exist and configuration is valid
|
|
129
|
+
|
|
130
|
+
**Problem**: Results folder won't open
|
|
131
|
+
- **Solution**: Make sure the task has run at least once to create the output directory
|
|
132
|
+
|
|
133
|
+
**Problem**: Step 2 configs fail to generate
|
|
134
|
+
- **Solution**: Ensure Step 1 completed successfully and embedding files exist
|
|
135
|
+
|
|
136
|
+
## Next Steps
|
|
137
|
+
|
|
138
|
+
- Explore advanced options in the Configuration tab
|
|
139
|
+
- Try the FT Two-Step Workflow for better model performance
|
|
140
|
+
- Experiment with different model combinations (xgb, resn, ft)
|
|
141
|
+
- Try different split strategies
|
|
142
|
+
- Use the Explain mode for model interpretability
|
|
143
|
+
- Check the full [README.md](README.md) for detailed documentation
|
|
144
|
+
|
|
145
|
+
## Support
|
|
146
|
+
|
|
147
|
+
For issues or questions, refer to:
|
|
148
|
+
- Full documentation: [README.md](README.md)
|
|
149
|
+
- Example configs: `ins_pricing/examples/`
|
|
150
|
+
- Package documentation: `ins_pricing/docs/`
|
|
151
|
+
|
|
152
|
+
Happy modeling!
|