lossmodels 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lossmodels-0.1.0/PKG-INFO +214 -0
- lossmodels-0.1.0/README.md +193 -0
- lossmodels-0.1.0/pyproject.toml +62 -0
- lossmodels-0.1.0/setup.cfg +4 -0
- lossmodels-0.1.0/src/lossmodels/__init__.py +1 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/__init__.py +48 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/base.py +89 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/collective.py +85 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/discretization.py +100 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/fft.py +117 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/panjer.py +185 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/risk_measures.py +125 -0
- lossmodels-0.1.0/src/lossmodels/aggregate/risk_measures_pmf.py +77 -0
- lossmodels-0.1.0/src/lossmodels/coverage/__init__.py +5 -0
- lossmodels-0.1.0/src/lossmodels/coverage/deductible.py +77 -0
- lossmodels-0.1.0/src/lossmodels/coverage/layer.py +86 -0
- lossmodels-0.1.0/src/lossmodels/coverage/limit.py +77 -0
- lossmodels-0.1.0/src/lossmodels/credibility/__init__.py +7 -0
- lossmodels-0.1.0/src/lossmodels/credibility/buhlmann.py +136 -0
- lossmodels-0.1.0/src/lossmodels/credibility/buhlmann_straub.py +191 -0
- lossmodels-0.1.0/src/lossmodels/credibility/emirical_bayes.py +0 -0
- lossmodels-0.1.0/src/lossmodels/empirical/__init__.py +6 -0
- lossmodels-0.1.0/src/lossmodels/empirical/distribution.py +130 -0
- lossmodels-0.1.0/src/lossmodels/estimation/__init__.py +41 -0
- lossmodels-0.1.0/src/lossmodels/estimation/diagnostics.py +99 -0
- lossmodels-0.1.0/src/lossmodels/estimation/frequency_selection.py +82 -0
- lossmodels-0.1.0/src/lossmodels/estimation/mle.py +388 -0
- lossmodels-0.1.0/src/lossmodels/estimation/model_selection.py +115 -0
- lossmodels-0.1.0/src/lossmodels/estimation/moments.py +215 -0
- lossmodels-0.1.0/src/lossmodels/frequency/__init__.py +13 -0
- lossmodels-0.1.0/src/lossmodels/frequency/base.py +47 -0
- lossmodels-0.1.0/src/lossmodels/frequency/binomial.py +44 -0
- lossmodels-0.1.0/src/lossmodels/frequency/geometric.py +60 -0
- lossmodels-0.1.0/src/lossmodels/frequency/negbinomial.py +82 -0
- lossmodels-0.1.0/src/lossmodels/frequency/poisson.py +68 -0
- lossmodels-0.1.0/src/lossmodels/severity/__init__.py +15 -0
- lossmodels-0.1.0/src/lossmodels/severity/base.py +51 -0
- lossmodels-0.1.0/src/lossmodels/severity/exponential.py +77 -0
- lossmodels-0.1.0/src/lossmodels/severity/gamma.py +56 -0
- lossmodels-0.1.0/src/lossmodels/severity/lognormal.py +55 -0
- lossmodels-0.1.0/src/lossmodels/severity/pareto.py +63 -0
- lossmodels-0.1.0/src/lossmodels/severity/weibull.py +59 -0
- lossmodels-0.1.0/src/lossmodels/utils/__init__.py +0 -0
- lossmodels-0.1.0/src/lossmodels/utils/numeric.py +0 -0
- lossmodels-0.1.0/src/lossmodels/utils/random.py +0 -0
- lossmodels-0.1.0/src/lossmodels/utils/validation.py +0 -0
- lossmodels-0.1.0/src/lossmodels.egg-info/PKG-INFO +214 -0
- lossmodels-0.1.0/src/lossmodels.egg-info/SOURCES.txt +49 -0
- lossmodels-0.1.0/src/lossmodels.egg-info/dependency_links.txt +1 -0
- lossmodels-0.1.0/src/lossmodels.egg-info/requires.txt +6 -0
- lossmodels-0.1.0/src/lossmodels.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lossmodels
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python library for actuarial loss modeling using frequency-severity methods.
|
|
5
|
+
Author: Michael Bryant
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/michaelabryant/lossmodels
|
|
8
|
+
Project-URL: Repository, https://github.com/michaelabryant/lossmodels
|
|
9
|
+
Project-URL: Issues, https://github.com/michaelabryant/lossmodels/issues
|
|
10
|
+
Keywords: actuarial,insurance,risk,statistics,simulation,loss modeling,frequency severity
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Requires-Python: >=3.10
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
Requires-Dist: numpy>=1.22
|
|
17
|
+
Requires-Dist: scipy>=1.8
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
20
|
+
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
|
21
|
+
|
|
22
|
+
# lossmodels
|
|
23
|
+
|
|
24
|
+
[]()
|
|
25
|
+
[]()
|
|
26
|
+
|
|
27
|
+
A Python library for actuarial loss modeling using frequency–severity methods.
|
|
28
|
+
|
|
29
|
+
---
|
|
30
|
+
|
|
31
|
+
## Overview
|
|
32
|
+
|
|
33
|
+
`lossmodels` provides a clean, modular implementation of core actuarial techniques from *Loss Models: Data to Decisions* (Klugman, Panjer, Willmot), including:
|
|
34
|
+
|
|
35
|
+
- frequency–severity modeling
|
|
36
|
+
- aggregate loss modeling (simulation, Panjer recursion, FFT)
|
|
37
|
+
- parameter estimation (MLE, method of moments)
|
|
38
|
+
- credibility theory
|
|
39
|
+
- risk measurement (VaR, TVaR, stop-loss)
|
|
40
|
+
|
|
41
|
+
Designed for:
|
|
42
|
+
- actuaries and actuarial analysts
|
|
43
|
+
- quantitative developers
|
|
44
|
+
- data scientists in insurance
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Quick Example
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from lossmodels.frequency import Poisson
|
|
52
|
+
from lossmodels.severity import Lognormal
|
|
53
|
+
from lossmodels.aggregate import CollectiveRiskModel
|
|
54
|
+
|
|
55
|
+
freq = Poisson(lam=2.0)
|
|
56
|
+
sev = Lognormal(mu=10.0, sigma=0.8)
|
|
57
|
+
|
|
58
|
+
model = CollectiveRiskModel(freq, sev)
|
|
59
|
+
|
|
60
|
+
print("Mean:", model.mean())
|
|
61
|
+
print("VaR 95%:", model.var(0.95))
|
|
62
|
+
print("TVaR 95%:", model.tvar(0.95))
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## Installation
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install lossmodels
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Or for development:
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
pip install -e .
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
---
|
|
80
|
+
|
|
81
|
+
## Core Features
|
|
82
|
+
|
|
83
|
+
### Frequency Models
|
|
84
|
+
- Poisson
|
|
85
|
+
- Negative Binomial
|
|
86
|
+
- Binomial
|
|
87
|
+
- Geometric
|
|
88
|
+
- Empirical frequency
|
|
89
|
+
|
|
90
|
+
### Severity Models
|
|
91
|
+
- Exponential
|
|
92
|
+
- Gamma
|
|
93
|
+
- Lognormal
|
|
94
|
+
- Pareto
|
|
95
|
+
- Weibull
|
|
96
|
+
- Empirical severity
|
|
97
|
+
|
|
98
|
+
### Aggregate Modeling
|
|
99
|
+
- Monte Carlo simulation
|
|
100
|
+
- Panjer recursion
|
|
101
|
+
- FFT (Fast Fourier Transform)
|
|
102
|
+
|
|
103
|
+
### Estimation
|
|
104
|
+
- Maximum Likelihood Estimation (MLE)
|
|
105
|
+
- Method of Moments
|
|
106
|
+
- Generic numerical MLE
|
|
107
|
+
|
|
108
|
+
### Model Selection
|
|
109
|
+
- Best severity selection (AIC / BIC)
|
|
110
|
+
- Best frequency selection (Poisson, Negative Binomial)
|
|
111
|
+
|
|
112
|
+
### Credibility
|
|
113
|
+
- Bühlmann
|
|
114
|
+
- Bühlmann–Straub
|
|
115
|
+
|
|
116
|
+
### Risk Measures
|
|
117
|
+
- VaR
|
|
118
|
+
- TVaR
|
|
119
|
+
- Stop-loss
|
|
120
|
+
- Limited Expected Value (LEV)
|
|
121
|
+
- PMF-based VaR / TVaR / stop-loss
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
|
|
125
|
+
## Aggregate Methods
|
|
126
|
+
|
|
127
|
+
### Simulation
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
samples = model.sample(100_000)
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### Panjer Recursion
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
from lossmodels.aggregate import discretize_severity, panjer_recursion
|
|
137
|
+
|
|
138
|
+
pmf = discretize_severity(sev, h=0.01, max_loss=20.0)
|
|
139
|
+
agg = panjer_recursion(freq, pmf, n_steps=5000)
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### FFT
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from lossmodels.aggregate import fft_aggregate_poisson
|
|
146
|
+
|
|
147
|
+
agg = fft_aggregate_poisson(freq, pmf, n_steps=5000)
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
---
|
|
151
|
+
|
|
152
|
+
## Risk Measures from PMF
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
from lossmodels.aggregate import var_from_pmf, tvar_from_pmf
|
|
156
|
+
|
|
157
|
+
var95 = var_from_pmf(agg, h=0.01, q=0.95)
|
|
158
|
+
tvar95 = tvar_from_pmf(agg, h=0.01, q=0.95)
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## Parameter Estimation
|
|
164
|
+
|
|
165
|
+
```python
|
|
166
|
+
from lossmodels.estimation import fit_lognormal, fit_best_severity
|
|
167
|
+
|
|
168
|
+
model = fit_lognormal(data)
|
|
169
|
+
best = fit_best_severity(data)
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Examples
|
|
175
|
+
|
|
176
|
+
See the `examples/` directory:
|
|
177
|
+
|
|
178
|
+
- `fit_and_compare_models.py`
|
|
179
|
+
- `panjer_vs_simulation.py`
|
|
180
|
+
- `panjer_vs_fft_vs_simulation.py`
|
|
181
|
+
- `credibility_example.py`
|
|
182
|
+
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
## Testing
|
|
186
|
+
|
|
187
|
+
```bash
|
|
188
|
+
pytest -v
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
Fast tests only:
|
|
192
|
+
|
|
193
|
+
```bash
|
|
194
|
+
pytest -v -m "not slow"
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
---
|
|
198
|
+
|
|
199
|
+
## Project Status
|
|
200
|
+
|
|
201
|
+
Core *Loss Models* functionality is implemented.
|
|
202
|
+
|
|
203
|
+
Planned improvements:
|
|
204
|
+
- Extreme Value Theory (EVT)
|
|
205
|
+
- Bootstrap methods
|
|
206
|
+
- Performance optimization
|
|
207
|
+
- Additional distributions
|
|
208
|
+
- Documentation
|
|
209
|
+
|
|
210
|
+
---
|
|
211
|
+
|
|
212
|
+
## License
|
|
213
|
+
|
|
214
|
+
MIT License
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# lossmodels
|
|
2
|
+
|
|
3
|
+
[]()
|
|
4
|
+
[]()
|
|
5
|
+
|
|
6
|
+
A Python library for actuarial loss modeling using frequency–severity methods.
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Overview
|
|
11
|
+
|
|
12
|
+
`lossmodels` provides a clean, modular implementation of core actuarial techniques from *Loss Models: Data to Decisions* (Klugman, Panjer, Willmot), including:
|
|
13
|
+
|
|
14
|
+
- frequency–severity modeling
|
|
15
|
+
- aggregate loss modeling (simulation, Panjer recursion, FFT)
|
|
16
|
+
- parameter estimation (MLE, method of moments)
|
|
17
|
+
- credibility theory
|
|
18
|
+
- risk measurement (VaR, TVaR, stop-loss)
|
|
19
|
+
|
|
20
|
+
Designed for:
|
|
21
|
+
- actuaries and actuarial analysts
|
|
22
|
+
- quantitative developers
|
|
23
|
+
- data scientists in insurance
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## Quick Example
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from lossmodels.frequency import Poisson
|
|
31
|
+
from lossmodels.severity import Lognormal
|
|
32
|
+
from lossmodels.aggregate import CollectiveRiskModel
|
|
33
|
+
|
|
34
|
+
freq = Poisson(lam=2.0)
|
|
35
|
+
sev = Lognormal(mu=10.0, sigma=0.8)
|
|
36
|
+
|
|
37
|
+
model = CollectiveRiskModel(freq, sev)
|
|
38
|
+
|
|
39
|
+
print("Mean:", model.mean())
|
|
40
|
+
print("VaR 95%:", model.var(0.95))
|
|
41
|
+
print("TVaR 95%:", model.tvar(0.95))
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Installation
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install lossmodels
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Or for development:
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
pip install -e .
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
---
|
|
59
|
+
|
|
60
|
+
## Core Features
|
|
61
|
+
|
|
62
|
+
### Frequency Models
|
|
63
|
+
- Poisson
|
|
64
|
+
- Negative Binomial
|
|
65
|
+
- Binomial
|
|
66
|
+
- Geometric
|
|
67
|
+
- Empirical frequency
|
|
68
|
+
|
|
69
|
+
### Severity Models
|
|
70
|
+
- Exponential
|
|
71
|
+
- Gamma
|
|
72
|
+
- Lognormal
|
|
73
|
+
- Pareto
|
|
74
|
+
- Weibull
|
|
75
|
+
- Empirical severity
|
|
76
|
+
|
|
77
|
+
### Aggregate Modeling
|
|
78
|
+
- Monte Carlo simulation
|
|
79
|
+
- Panjer recursion
|
|
80
|
+
- FFT (Fast Fourier Transform)
|
|
81
|
+
|
|
82
|
+
### Estimation
|
|
83
|
+
- Maximum Likelihood Estimation (MLE)
|
|
84
|
+
- Method of Moments
|
|
85
|
+
- Generic numerical MLE
|
|
86
|
+
|
|
87
|
+
### Model Selection
|
|
88
|
+
- Best severity selection (AIC / BIC)
|
|
89
|
+
- Best frequency selection (Poisson, Negative Binomial)
|
|
90
|
+
|
|
91
|
+
### Credibility
|
|
92
|
+
- Bühlmann
|
|
93
|
+
- Bühlmann–Straub
|
|
94
|
+
|
|
95
|
+
### Risk Measures
|
|
96
|
+
- VaR
|
|
97
|
+
- TVaR
|
|
98
|
+
- Stop-loss
|
|
99
|
+
- Limited Expected Value (LEV)
|
|
100
|
+
- PMF-based VaR / TVaR / stop-loss
|
|
101
|
+
|
|
102
|
+
---
|
|
103
|
+
|
|
104
|
+
## Aggregate Methods
|
|
105
|
+
|
|
106
|
+
### Simulation
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
samples = model.sample(100_000)
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### Panjer Recursion
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from lossmodels.aggregate import discretize_severity, panjer_recursion
|
|
116
|
+
|
|
117
|
+
pmf = discretize_severity(sev, h=0.01, max_loss=20.0)
|
|
118
|
+
agg = panjer_recursion(freq, pmf, n_steps=5000)
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### FFT
|
|
122
|
+
|
|
123
|
+
```python
|
|
124
|
+
from lossmodels.aggregate import fft_aggregate_poisson
|
|
125
|
+
|
|
126
|
+
agg = fft_aggregate_poisson(freq, pmf, n_steps=5000)
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
## Risk Measures from PMF
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
from lossmodels.aggregate import var_from_pmf, tvar_from_pmf
|
|
135
|
+
|
|
136
|
+
var95 = var_from_pmf(agg, h=0.01, q=0.95)
|
|
137
|
+
tvar95 = tvar_from_pmf(agg, h=0.01, q=0.95)
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
## Parameter Estimation
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from lossmodels.estimation import fit_lognormal, fit_best_severity
|
|
146
|
+
|
|
147
|
+
model = fit_lognormal(data)
|
|
148
|
+
best = fit_best_severity(data)
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
---
|
|
152
|
+
|
|
153
|
+
## Examples
|
|
154
|
+
|
|
155
|
+
See the `examples/` directory:
|
|
156
|
+
|
|
157
|
+
- `fit_and_compare_models.py`
|
|
158
|
+
- `panjer_vs_simulation.py`
|
|
159
|
+
- `panjer_vs_fft_vs_simulation.py`
|
|
160
|
+
- `credibility_example.py`
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Testing
|
|
165
|
+
|
|
166
|
+
```bash
|
|
167
|
+
pytest -v
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Fast tests only:
|
|
171
|
+
|
|
172
|
+
```bash
|
|
173
|
+
pytest -v -m "not slow"
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
## Project Status
|
|
179
|
+
|
|
180
|
+
Core *Loss Models* functionality is implemented.
|
|
181
|
+
|
|
182
|
+
Planned improvements:
|
|
183
|
+
- Extreme Value Theory (EVT)
|
|
184
|
+
- Bootstrap methods
|
|
185
|
+
- Performance optimization
|
|
186
|
+
- Additional distributions
|
|
187
|
+
- Documentation
|
|
188
|
+
|
|
189
|
+
---
|
|
190
|
+
|
|
191
|
+
## License
|
|
192
|
+
|
|
193
|
+
MIT License
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "lossmodels"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A Python library for actuarial loss modeling using frequency-severity methods."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Michael Bryant" }
|
|
14
|
+
]
|
|
15
|
+
keywords = [
|
|
16
|
+
"actuarial",
|
|
17
|
+
"insurance",
|
|
18
|
+
"risk",
|
|
19
|
+
"statistics",
|
|
20
|
+
"simulation",
|
|
21
|
+
"loss modeling",
|
|
22
|
+
"frequency severity"
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
dependencies = [
|
|
26
|
+
"numpy>=1.22",
|
|
27
|
+
"scipy>=1.8"
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
classifiers = [
|
|
31
|
+
"Programming Language :: Python :: 3",
|
|
32
|
+
"License :: OSI Approved :: MIT License",
|
|
33
|
+
"Operating System :: OS Independent",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
dev = [
|
|
38
|
+
"pytest>=7.0",
|
|
39
|
+
"pytest-cov>=4.0"
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
[project.urls]
|
|
43
|
+
Homepage = "https://github.com/michaelabryant/lossmodels"
|
|
44
|
+
Repository = "https://github.com/michaelabryant/lossmodels"
|
|
45
|
+
Issues = "https://github.com/michaelabryant/lossmodels/issues"
|
|
46
|
+
|
|
47
|
+
[tool.setuptools]
|
|
48
|
+
package-dir = {"" = "src"}
|
|
49
|
+
license-files = []
|
|
50
|
+
|
|
51
|
+
[tool.setuptools.packages.find]
|
|
52
|
+
where = ["src"]
|
|
53
|
+
|
|
54
|
+
[tool.pytest.ini_options]
|
|
55
|
+
minversion = "7.0"
|
|
56
|
+
addopts = "-ra -q"
|
|
57
|
+
testpaths = [
|
|
58
|
+
"tests"
|
|
59
|
+
]
|
|
60
|
+
markers = [
|
|
61
|
+
"slow: marks tests as slow",
|
|
62
|
+
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from .base import AggregateModel
|
|
2
|
+
from .collective import CollectiveRiskModel
|
|
3
|
+
from .risk_measures import var, tvar, stop_loss, lev, exceedance_probability
|
|
4
|
+
from .discretization import (
|
|
5
|
+
discretize_severity,
|
|
6
|
+
bucket_representatives,
|
|
7
|
+
mean_from_discretized_pmf,
|
|
8
|
+
)
|
|
9
|
+
from .panjer import (
|
|
10
|
+
panjer_recursion,
|
|
11
|
+
cdf_from_pmf,
|
|
12
|
+
mean_from_aggregate_pmf,
|
|
13
|
+
)
|
|
14
|
+
from .fft import (
|
|
15
|
+
fft_aggregate_poisson,
|
|
16
|
+
cdf_from_pmf_fft,
|
|
17
|
+
mean_from_aggregate_pmf_fft,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from .risk_measures_pmf import (
|
|
21
|
+
var_from_pmf,
|
|
22
|
+
tvar_from_pmf,
|
|
23
|
+
stop_loss_from_pmf,
|
|
24
|
+
mean_from_pmf,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"AggregateModel",
|
|
29
|
+
"CollectiveRiskModel",
|
|
30
|
+
"var",
|
|
31
|
+
"tvar",
|
|
32
|
+
"stop_loss",
|
|
33
|
+
"lev",
|
|
34
|
+
"exceedance_probability",
|
|
35
|
+
"discretize_severity",
|
|
36
|
+
"bucket_representatives",
|
|
37
|
+
"mean_from_discretized_pmf",
|
|
38
|
+
"panjer_recursion",
|
|
39
|
+
"cdf_from_pmf",
|
|
40
|
+
"mean_from_aggregate_pmf",
|
|
41
|
+
"fft_aggregate_poisson",
|
|
42
|
+
"cdf_from_pmf_fft",
|
|
43
|
+
"mean_from_aggregate_pmf_fft",
|
|
44
|
+
"var_from_pmf",
|
|
45
|
+
"tvar_from_pmf",
|
|
46
|
+
"stop_loss_from_pmf",
|
|
47
|
+
"mean_from_pmf",
|
|
48
|
+
]
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class AggregateModel(ABC):
|
|
6
|
+
"""
|
|
7
|
+
Base class for aggregate loss models.
|
|
8
|
+
|
|
9
|
+
All aggregate models must implement:
|
|
10
|
+
- sample
|
|
11
|
+
- mean
|
|
12
|
+
- variance
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def sample(self, size: int = 1) -> np.ndarray:
|
|
17
|
+
"""
|
|
18
|
+
Generate random samples of aggregate loss.
|
|
19
|
+
"""
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
@abstractmethod
|
|
23
|
+
def mean(self) -> float:
|
|
24
|
+
"""
|
|
25
|
+
Expected aggregate loss.
|
|
26
|
+
"""
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def variance(self) -> float:
|
|
31
|
+
"""
|
|
32
|
+
Variance of aggregate loss.
|
|
33
|
+
"""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
def std(self) -> float:
|
|
37
|
+
"""
|
|
38
|
+
Standard deviation of aggregate loss.
|
|
39
|
+
"""
|
|
40
|
+
return np.sqrt(self.variance())
|
|
41
|
+
|
|
42
|
+
def var(self, q: float, n_sim: int = 100_000) -> float:
|
|
43
|
+
"""
|
|
44
|
+
Value-at-Risk at probability level q using simulation.
|
|
45
|
+
"""
|
|
46
|
+
if not (0 < q < 1):
|
|
47
|
+
raise ValueError("q must be between 0 and 1")
|
|
48
|
+
|
|
49
|
+
samples = self.sample(n_sim)
|
|
50
|
+
return float(np.quantile(samples, q))
|
|
51
|
+
|
|
52
|
+
def tvar(self, q: float, n_sim: int = 100_000) -> float:
|
|
53
|
+
"""
|
|
54
|
+
Tail Value-at-Risk at probability level q using simulation.
|
|
55
|
+
"""
|
|
56
|
+
if not (0 < q < 1):
|
|
57
|
+
raise ValueError("q must be between 0 and 1")
|
|
58
|
+
|
|
59
|
+
samples = self.sample(n_sim)
|
|
60
|
+
var_q = np.quantile(samples, q)
|
|
61
|
+
tail = samples[samples > var_q]
|
|
62
|
+
|
|
63
|
+
if len(tail) == 0:
|
|
64
|
+
return float(var_q)
|
|
65
|
+
|
|
66
|
+
return float(np.mean(tail))
|
|
67
|
+
|
|
68
|
+
def stop_loss(self, d: float, n_sim: int = 100_000) -> float:
|
|
69
|
+
"""
|
|
70
|
+
Expected stop-loss premium E[(S - d)+] using simulation.
|
|
71
|
+
"""
|
|
72
|
+
if d < 0:
|
|
73
|
+
raise ValueError("d must be nonnegative")
|
|
74
|
+
|
|
75
|
+
samples = self.sample(n_sim)
|
|
76
|
+
return float(np.mean(np.maximum(samples - d, 0.0)))
|
|
77
|
+
|
|
78
|
+
def limited_expected_value(self, d: float, n_sim: int = 100_000) -> float:
|
|
79
|
+
"""
|
|
80
|
+
Limited expected value E[min(S, d)] using simulation.
|
|
81
|
+
"""
|
|
82
|
+
if d < 0:
|
|
83
|
+
raise ValueError("d must be nonnegative")
|
|
84
|
+
|
|
85
|
+
samples = self.sample(n_sim)
|
|
86
|
+
return float(np.mean(np.minimum(samples, d)))
|
|
87
|
+
|
|
88
|
+
def __repr__(self):
|
|
89
|
+
return f"{self.__class__.__name__}()"
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from .base import AggregateModel
|
|
4
|
+
from ..frequency.base import FrequencyModel
|
|
5
|
+
from ..severity.base import SeverityModel
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CollectiveRiskModel(AggregateModel):
|
|
9
|
+
"""
|
|
10
|
+
Collective risk model for aggregate loss:
|
|
11
|
+
|
|
12
|
+
S = X1 + X2 + ... + XN
|
|
13
|
+
|
|
14
|
+
where:
|
|
15
|
+
- N is the claim count random variable (frequency)
|
|
16
|
+
- Xi are iid claim severities
|
|
17
|
+
|
|
18
|
+
Assumes:
|
|
19
|
+
- severities are iid
|
|
20
|
+
- N is independent of severities
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, frequency: FrequencyModel, severity: SeverityModel):
|
|
24
|
+
self.frequency = frequency
|
|
25
|
+
self.severity = severity
|
|
26
|
+
|
|
27
|
+
def sample(self, size: int = 1) -> np.ndarray:
|
|
28
|
+
"""
|
|
29
|
+
Generate random samples of aggregate loss.
|
|
30
|
+
"""
|
|
31
|
+
if size <= 0:
|
|
32
|
+
raise ValueError("size must be positive")
|
|
33
|
+
|
|
34
|
+
counts = self.frequency.sample(size=size)
|
|
35
|
+
aggregate_losses = np.zeros(size, dtype=float)
|
|
36
|
+
|
|
37
|
+
for i, n_claims in enumerate(counts):
|
|
38
|
+
if n_claims > 0:
|
|
39
|
+
aggregate_losses[i] = np.sum(self.severity.sample(size=int(n_claims)))
|
|
40
|
+
|
|
41
|
+
return aggregate_losses
|
|
42
|
+
|
|
43
|
+
def mean(self) -> float:
|
|
44
|
+
"""
|
|
45
|
+
E[S] = E[N] * E[X]
|
|
46
|
+
"""
|
|
47
|
+
return self.frequency.mean() * self.severity.mean()
|
|
48
|
+
|
|
49
|
+
def variance(self) -> float:
|
|
50
|
+
"""
|
|
51
|
+
Var(S) = E[N] Var(X) + Var(N) (E[X])^2
|
|
52
|
+
"""
|
|
53
|
+
en = self.frequency.mean()
|
|
54
|
+
vn = self.frequency.variance()
|
|
55
|
+
ex = self.severity.mean()
|
|
56
|
+
vx = self.severity.variance()
|
|
57
|
+
|
|
58
|
+
return en * vx + vn * (ex ** 2)
|
|
59
|
+
|
|
60
|
+
def frequency_mean(self) -> float:
|
|
61
|
+
return self.frequency.mean()
|
|
62
|
+
|
|
63
|
+
def severity_mean(self) -> float:
|
|
64
|
+
return self.severity.mean()
|
|
65
|
+
|
|
66
|
+
def summary(self) -> dict:
|
|
67
|
+
"""
|
|
68
|
+
Return a small summary of the model.
|
|
69
|
+
"""
|
|
70
|
+
return {
|
|
71
|
+
"frequency_model": repr(self.frequency),
|
|
72
|
+
"severity_model": repr(self.severity),
|
|
73
|
+
"frequency_mean": self.frequency.mean(),
|
|
74
|
+
"severity_mean": self.severity.mean(),
|
|
75
|
+
"aggregate_mean": self.mean(),
|
|
76
|
+
"aggregate_variance": self.variance(),
|
|
77
|
+
"aggregate_std": self.std(),
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
def __repr__(self):
|
|
81
|
+
return (
|
|
82
|
+
f"CollectiveRiskModel("
|
|
83
|
+
f"frequency={repr(self.frequency)}, "
|
|
84
|
+
f"severity={repr(self.severity)})"
|
|
85
|
+
)
|