insurance-thin-data 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- insurance_thin_data-0.1.0/.github/workflows/ci.yml +19 -0
- insurance_thin_data-0.1.0/.gitignore +12 -0
- insurance_thin_data-0.1.0/PKG-INFO +133 -0
- insurance_thin_data-0.1.0/README.md +88 -0
- insurance_thin_data-0.1.0/notebooks/benchmark.py +1144 -0
- insurance_thin_data-0.1.0/pyproject.toml +68 -0
- insurance_thin_data-0.1.0/run_tests.py +105 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/__init__.py +76 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/__init__.py +26 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/backends.py +295 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/benchmark.py +404 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/model.py +344 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/relativities.py +217 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/report.py +313 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/tabpfn/validators.py +239 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/__init__.py +30 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/cann_transfer.py +442 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/diagnostic.py +304 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/gbm_transfer.py +258 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/glm_transfer.py +566 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/pipeline.py +299 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/py.typed +0 -0
- insurance_thin_data-0.1.0/src/insurance_thin_data/transfer/shift.py +304 -0
- insurance_thin_data-0.1.0/tests/__init__.py +0 -0
- insurance_thin_data-0.1.0/tests/conftest.py +65 -0
- insurance_thin_data-0.1.0/tests/test_backends.py +92 -0
- insurance_thin_data-0.1.0/tests/test_benchmark.py +197 -0
- insurance_thin_data-0.1.0/tests/test_cann_transfer.py +177 -0
- insurance_thin_data-0.1.0/tests/test_diagnostic.py +241 -0
- insurance_thin_data-0.1.0/tests/test_gbm_transfer.py +168 -0
- insurance_thin_data-0.1.0/tests/test_glm_transfer.py +250 -0
- insurance_thin_data-0.1.0/tests/test_model.py +207 -0
- insurance_thin_data-0.1.0/tests/test_pipeline.py +194 -0
- insurance_thin_data-0.1.0/tests/test_relativities.py +107 -0
- insurance_thin_data-0.1.0/tests/test_report.py +177 -0
- insurance_thin_data-0.1.0/tests/test_shift.py +231 -0
- insurance_thin_data-0.1.0/tests/test_validators.py +162 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
on:
|
|
3
|
+
push:
|
|
4
|
+
branches: [main]
|
|
5
|
+
pull_request:
|
|
6
|
+
branches: [main]
|
|
7
|
+
jobs:
|
|
8
|
+
test:
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
strategy:
|
|
11
|
+
matrix:
|
|
12
|
+
python-version: ["3.10", "3.12"]
|
|
13
|
+
steps:
|
|
14
|
+
- uses: actions/checkout@v4
|
|
15
|
+
- uses: actions/setup-python@v5
|
|
16
|
+
with:
|
|
17
|
+
python-version: ${{ matrix.python-version }}
|
|
18
|
+
- run: pip install -e ".[dev]"
|
|
19
|
+
- run: pytest
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: insurance-thin-data
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Foundation models and transfer learning for thin-data insurance pricing segments
|
|
5
|
+
Project-URL: Homepage, https://github.com/burning-cost/insurance-thin-data
|
|
6
|
+
Project-URL: Repository, https://github.com/burning-cost/insurance-thin-data
|
|
7
|
+
Author-email: Burning Cost <pricing.frontier@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Keywords: actuarial,foundation-model,insurance,pricing,tabpfn,tabular,thin-data,transfer-learning
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Financial and Insurance Industry
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Requires-Python: >=3.10
|
|
19
|
+
Requires-Dist: numpy>=1.24
|
|
20
|
+
Requires-Dist: pandas>=2.0
|
|
21
|
+
Requires-Dist: polars>=1.0
|
|
22
|
+
Requires-Dist: scikit-learn>=1.3
|
|
23
|
+
Requires-Dist: scipy>=1.10
|
|
24
|
+
Requires-Dist: statsmodels>=0.14
|
|
25
|
+
Provides-Extra: all
|
|
26
|
+
Requires-Dist: catboost>=1.2; extra == 'all'
|
|
27
|
+
Requires-Dist: jinja2>=3.1; extra == 'all'
|
|
28
|
+
Requires-Dist: tabicl>=0.1; extra == 'all'
|
|
29
|
+
Requires-Dist: tabpfn>=6.0; extra == 'all'
|
|
30
|
+
Requires-Dist: torch>=2.0; extra == 'all'
|
|
31
|
+
Provides-Extra: catboost
|
|
32
|
+
Requires-Dist: catboost>=1.2; extra == 'catboost'
|
|
33
|
+
Provides-Extra: dev
|
|
34
|
+
Requires-Dist: pytest-cov; extra == 'dev'
|
|
35
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
36
|
+
Provides-Extra: report
|
|
37
|
+
Requires-Dist: jinja2>=3.1; extra == 'report'
|
|
38
|
+
Provides-Extra: tabicl
|
|
39
|
+
Requires-Dist: tabicl>=0.1; extra == 'tabicl'
|
|
40
|
+
Provides-Extra: tabpfn
|
|
41
|
+
Requires-Dist: tabpfn>=6.0; extra == 'tabpfn'
|
|
42
|
+
Provides-Extra: torch
|
|
43
|
+
Requires-Dist: torch>=2.0; extra == 'torch'
|
|
44
|
+
Description-Content-Type: text/markdown
|
|
45
|
+
|
|
46
|
+
# insurance-thin-data
|
|
47
|
+
|
|
48
|
+
Pricing tools for the data-poor end of the book: foundation models and transfer learning for thin insurance segments.
|
|
49
|
+
|
|
50
|
+
Merged from: `insurance-tabpfn` (foundation model wrapper) and `insurance-transfer` (transfer learning).
|
|
51
|
+
|
|
52
|
+
UK pricing teams regularly face the same problem. A new scheme, a niche segment, or an adverse development that's left you with 200 policies and no credible GLM. Standard approaches break down. This library gives you two practical tools:
|
|
53
|
+
|
|
54
|
+
1. **TabPFN/TabICLv2 wrapper** — foundation models that work on small datasets, with the insurance workflow built in: exposure handling, conformal prediction intervals, PDP relativities, and committee paper generation.
|
|
55
|
+
|
|
56
|
+
2. **Transfer learning** — borrow statistical strength from a related, larger book. Implements the Tian & Feng (JASA 2023) penalised GLM method, CatBoost source-as-offset, and CANN pre-train/fine-tune. Includes MMD covariate shift diagnostics and negative transfer detection.
|
|
57
|
+
|
|
58
|
+
## When to use what
|
|
59
|
+
|
|
60
|
+
Use the **TabPFN wrapper** when:
|
|
61
|
+
- You have a completely new product with no related historical data
|
|
62
|
+
- Segment size is 50–5,000 policies
|
|
63
|
+
- You need relativities and a committee paper, not just a price
|
|
64
|
+
|
|
65
|
+
Use **transfer learning** when:
|
|
66
|
+
- You have a thin segment but a related larger book exists
|
|
67
|
+
- The larger book shares most features with the thin segment
|
|
68
|
+
- You want to know whether and how much the transfer helps
|
|
69
|
+
|
|
70
|
+
## Quick start
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
# Foundation model
|
|
74
|
+
from insurance_thin_data import InsuranceTabPFN
|
|
75
|
+
|
|
76
|
+
model = InsuranceTabPFN(backend="auto")
|
|
77
|
+
model.fit(X_train, y_train, exposure=exposure_train)
|
|
78
|
+
expected_claims = model.predict(X_test, exposure=exposure_test)
|
|
79
|
+
lower, point, upper = model.predict_interval(X_test, exposure=exposure_test)
|
|
80
|
+
|
|
81
|
+
# Transfer learning
|
|
82
|
+
from insurance_thin_data import GLMTransfer, CovariateShiftTest, TransferPipeline
|
|
83
|
+
|
|
84
|
+
# Check if distributions are compatible
|
|
85
|
+
shift = CovariateShiftTest(n_permutations=500).test(X_source, X_target)
|
|
86
|
+
print(shift) # MMD statistic and p-value
|
|
87
|
+
|
|
88
|
+
# Full pipeline
|
|
89
|
+
pipeline = TransferPipeline(method="glm", shift_test=True, run_diagnostic=True)
|
|
90
|
+
result = pipeline.run(X_target, y_target, exposure_target,
|
|
91
|
+
X_source=X_source, y_source=y_source)
|
|
92
|
+
print(result) # shift p-value, NTG, whether transfer helped
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Installation
|
|
96
|
+
|
|
97
|
+
```bash
|
|
98
|
+
pip install insurance-thin-data
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
Optional backends:
|
|
102
|
+
```bash
|
|
103
|
+
pip install insurance-thin-data[tabicl] # TabICLv2 (preferred)
|
|
104
|
+
pip install insurance-thin-data[tabpfn] # TabPFN v2
|
|
105
|
+
pip install insurance-thin-data[catboost] # GBM transfer
|
|
106
|
+
pip install insurance-thin-data[torch] # CANN transfer
|
|
107
|
+
pip install insurance-thin-data[report] # HTML committee reports
|
|
108
|
+
pip install insurance-thin-data[all] # everything
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Performance
|
|
112
|
+
|
|
113
|
+
Benchmarked against a standalone Poisson GLM on 500 target policies. Source portfolio: 10,000 policies with a related but not identical DGP (lower baseline frequency, stronger building-age effect, one target-specific feature). Bootstrap uses 200 resamplings of the target training data. Full notebook: `notebooks/benchmark.py`.
|
|
114
|
+
|
|
115
|
+
| Metric | Standalone GLM | GLMTransfer | Notes |
|
|
116
|
+
|--------|---------------|-------------|-------|
|
|
117
|
+
| Bootstrap 90% CI width (mean, shared features) | wider | narrower | Primary differentiator |
|
|
118
|
+
| Poisson deviance (test set, 150 policies) | compared | compared | Differences small at n=150 |
|
|
119
|
+
| Gini coefficient (test set) | compared | compared | Noisy at this sample size |
|
|
120
|
+
| Overall A/E ratio | compared | compared | Transfer closer to 1.0 |
|
|
121
|
+
|
|
122
|
+
The headline result is parameter stability, not point accuracy. On 500 policies the standalone GLM has wide coefficient confidence intervals — the transfer model anchors estimates near the source and only moves when target data strongly justifies it. Point-prediction metrics on 150 test policies are inherently noisy; treat them as indicative. The benchmark also runs `CovariateShiftTest` (MMD with permutation test) to verify that the debiasing step is earning its keep, and `NegativeTransferDiagnostic` to flag whether the source is helping or hurting.
|
|
123
|
+
|
|
124
|
+
**When to use:** You have 100–2,000 policies in the target segment and a related source book with 5,000+ policies. The source should share most features with the target, but need not have an identical claims environment.
|
|
125
|
+
|
|
126
|
+
**When NOT to use:** The source and target books are genuinely unrelated (different peril, different geography, no shared risk factors). The `NegativeTransferDiagnostic` will flag this, but the honest answer is: start with the TabPFN wrapper instead.
|
|
127
|
+
|
|
128
|
+
## References
|
|
129
|
+
|
|
130
|
+
- Tian, Y. and Feng, Y. (2023). Transfer Learning under High-Dimensional Generalized Linear Models. *JASA*, 118(544), 2684–2697.
|
|
131
|
+
- Loke, S.-H. and Bauer, D. (2025). Transfer Learning in the Actuarial Domain. *NAAJ*. DOI: 10.1080/10920277.2025.2489637.
|
|
132
|
+
- Schelldorfer, J. and Wuthrich, M. (2019). Nesting Classical Actuarial Models into Neural Networks.
|
|
133
|
+
- Hollmann, N. et al. (2025). TabPFN v2. *Nature*, 637, 319–326.
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# insurance-thin-data
|
|
2
|
+
|
|
3
|
+
Pricing tools for the data-poor end of the book: foundation models and transfer learning for thin insurance segments.
|
|
4
|
+
|
|
5
|
+
Merged from: `insurance-tabpfn` (foundation model wrapper) and `insurance-transfer` (transfer learning).
|
|
6
|
+
|
|
7
|
+
UK pricing teams regularly face the same problem. A new scheme, a niche segment, or an adverse development that's left you with 200 policies and no credible GLM. Standard approaches break down. This library gives you two practical tools:
|
|
8
|
+
|
|
9
|
+
1. **TabPFN/TabICLv2 wrapper** — foundation models that work on small datasets, with the insurance workflow built in: exposure handling, conformal prediction intervals, PDP relativities, and committee paper generation.
|
|
10
|
+
|
|
11
|
+
2. **Transfer learning** — borrow statistical strength from a related, larger book. Implements the Tian & Feng (JASA 2023) penalised GLM method, CatBoost source-as-offset, and CANN pre-train/fine-tune. Includes MMD covariate shift diagnostics and negative transfer detection.
|
|
12
|
+
|
|
13
|
+
## When to use what
|
|
14
|
+
|
|
15
|
+
Use the **TabPFN wrapper** when:
|
|
16
|
+
- You have a completely new product with no related historical data
|
|
17
|
+
- Segment size is 50–5,000 policies
|
|
18
|
+
- You need relativities and a committee paper, not just a price
|
|
19
|
+
|
|
20
|
+
Use **transfer learning** when:
|
|
21
|
+
- You have a thin segment but a related larger book exists
|
|
22
|
+
- The larger book shares most features with the thin segment
|
|
23
|
+
- You want to know whether and how much the transfer helps
|
|
24
|
+
|
|
25
|
+
## Quick start
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
# Foundation model
|
|
29
|
+
from insurance_thin_data import InsuranceTabPFN
|
|
30
|
+
|
|
31
|
+
model = InsuranceTabPFN(backend="auto")
|
|
32
|
+
model.fit(X_train, y_train, exposure=exposure_train)
|
|
33
|
+
expected_claims = model.predict(X_test, exposure=exposure_test)
|
|
34
|
+
lower, point, upper = model.predict_interval(X_test, exposure=exposure_test)
|
|
35
|
+
|
|
36
|
+
# Transfer learning
|
|
37
|
+
from insurance_thin_data import GLMTransfer, CovariateShiftTest, TransferPipeline
|
|
38
|
+
|
|
39
|
+
# Check if distributions are compatible
|
|
40
|
+
shift = CovariateShiftTest(n_permutations=500).test(X_source, X_target)
|
|
41
|
+
print(shift) # MMD statistic and p-value
|
|
42
|
+
|
|
43
|
+
# Full pipeline
|
|
44
|
+
pipeline = TransferPipeline(method="glm", shift_test=True, run_diagnostic=True)
|
|
45
|
+
result = pipeline.run(X_target, y_target, exposure_target,
|
|
46
|
+
X_source=X_source, y_source=y_source)
|
|
47
|
+
print(result) # shift p-value, NTG, whether transfer helped
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Installation
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pip install insurance-thin-data
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Optional backends:
|
|
57
|
+
```bash
|
|
58
|
+
pip install insurance-thin-data[tabicl] # TabICLv2 (preferred)
|
|
59
|
+
pip install insurance-thin-data[tabpfn] # TabPFN v2
|
|
60
|
+
pip install insurance-thin-data[catboost] # GBM transfer
|
|
61
|
+
pip install insurance-thin-data[torch] # CANN transfer
|
|
62
|
+
pip install insurance-thin-data[report] # HTML committee reports
|
|
63
|
+
pip install insurance-thin-data[all] # everything
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Performance
|
|
67
|
+
|
|
68
|
+
Benchmarked against a standalone Poisson GLM on 500 target policies. Source portfolio: 10,000 policies with a related but not identical DGP (lower baseline frequency, stronger building-age effect, one target-specific feature). Bootstrap uses 200 resamplings of the target training data. Full notebook: `notebooks/benchmark.py`.
|
|
69
|
+
|
|
70
|
+
| Metric | Standalone GLM | GLMTransfer | Notes |
|
|
71
|
+
|--------|---------------|-------------|-------|
|
|
72
|
+
| Bootstrap 90% CI width (mean, shared features) | wider | narrower | Primary differentiator |
|
|
73
|
+
| Poisson deviance (test set, 150 policies) | compared | compared | Differences small at n=150 |
|
|
74
|
+
| Gini coefficient (test set) | compared | compared | Noisy at this sample size |
|
|
75
|
+
| Overall A/E ratio | compared | compared | Transfer closer to 1.0 |
|
|
76
|
+
|
|
77
|
+
The headline result is parameter stability, not point accuracy. On 500 policies the standalone GLM has wide coefficient confidence intervals — the transfer model anchors estimates near the source and only moves when target data strongly justifies it. Point-prediction metrics on 150 test policies are inherently noisy; treat them as indicative. The benchmark also runs `CovariateShiftTest` (MMD with permutation test) to verify that the debiasing step is earning its keep, and `NegativeTransferDiagnostic` to flag whether the source is helping or hurting.
|
|
78
|
+
|
|
79
|
+
**When to use:** You have 100–2,000 policies in the target segment and a related source book with 5,000+ policies. The source should share most features with the target, but need not have an identical claims environment.
|
|
80
|
+
|
|
81
|
+
**When NOT to use:** The source and target books are genuinely unrelated (different peril, different geography, no shared risk factors). The `NegativeTransferDiagnostic` will flag this, but the honest answer is: start with the TabPFN wrapper instead.
|
|
82
|
+
|
|
83
|
+
## References
|
|
84
|
+
|
|
85
|
+
- Tian, Y. and Feng, Y. (2023). Transfer Learning under High-Dimensional Generalized Linear Models. *JASA*, 118(544), 2684–2697.
|
|
86
|
+
- Loke, S.-H. and Bauer, D. (2025). Transfer Learning in the Actuarial Domain. *NAAJ*. DOI: 10.1080/10920277.2025.2489637.
|
|
87
|
+
- Schelldorfer, J. and Wuthrich, M. (2019). Nesting Classical Actuarial Models into Neural Networks.
|
|
88
|
+
- Hollmann, N. et al. (2025). TabPFN v2. *Nature*, 637, 319–326.
|