sdevpy 1.0.2__tar.gz → 1.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sdevpy-1.0.2/src/sdevpy.egg-info → sdevpy-1.0.4}/PKG-INFO +2 -2
- {sdevpy-1.0.2 → sdevpy-1.0.4}/pyproject.toml +3 -3
- sdevpy-1.0.4/src/sdevpy/__init__.py +1 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/bachelier.py +15 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/black.py +2 -1
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/machinelearning/learningmodel.py +6 -2
- sdevpy-1.0.4/src/sdevpy/machinelearning/learningschedules.py +78 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/machinelearning/topology.py +1 -1
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/maths/optimization.py +8 -4
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/stovol/stovolgen.py +3 -3
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/stovol/stovolplot.py +4 -3
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/stovol/stovoltrain.py +9 -8
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/stovolinverse/stovolinvgen.py +12 -5
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/stovolinverse/stovolinvtrain.py +69 -84
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/__init__.py +48 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/constants.py +64 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/erf_cody.py +447 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/exceptions.py +67 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/lets_be_rational.py +800 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/normaldistribution.py +193 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/numba_helper.py +13 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_lets_be_rational/rationalcubic.py +271 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/__init__.py +31 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black/__init__.py +179 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black/greeks/analytical.py +272 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black/greeks/numerical.py +250 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black/implied_volatility.py +291 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes/__init__.py +83 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes/greeks/analytical.py +277 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes/greeks/numerical.py +317 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes/implied_volatility.py +101 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes_merton/__init__.py +89 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes_merton/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes_merton/greeks/analytical.py +308 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes_merton/greeks/numerical.py +265 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/black_scholes_merton/implied_volatility.py +115 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/__init__.py +113 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/constants.py +51 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/distributions.py +225 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/doctest_helper.py +61 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/exceptions.py +58 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/helpers/numerical_greeks.py +216 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/__init__.py +31 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black/__init__.py +235 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black/greeks/analytical.py +276 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black/greeks/numerical.py +221 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black/implied_volatility.py +116 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes/__init__.py +156 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes/greeks/analytical.py +278 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes/greeks/numerical.py +289 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes/implied_volatility.py +109 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes_merton/__init__.py +230 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes_merton/greeks/__init__.py +0 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes_merton/greeks/analytical.py +310 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes_merton/greeks/numerical.py +222 -0
- sdevpy-1.0.4/src/sdevpy/thirdparty/py_vollib/ref_python/black_scholes_merton/implied_volatility.py +103 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/filemanager.py +1 -2
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/sabrgenerator.py +57 -38
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/smilegenerator.py +36 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4/src/sdevpy.egg-info}/PKG-INFO +2 -2
- sdevpy-1.0.4/src/sdevpy.egg-info/SOURCES.txt +97 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy.egg-info/requires.txt +1 -1
- sdevpy-1.0.2/src/sdevpy/__init__.py +0 -1
- sdevpy-1.0.2/src/sdevpy/machinelearning/learningschedules.py +0 -54
- sdevpy-1.0.2/src/sdevpy.egg-info/SOURCES.txt +0 -51
- {sdevpy-1.0.2 → sdevpy-1.0.4}/LICENSE +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/README.md +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/setup.cfg +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/fbsabr.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/mcheston.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/mcsabr.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/mczabr.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/analytics/sabr.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/machinelearning/callbacks.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/machinelearning/datasets.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/maths/interpolations.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/maths/metrics.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/maths/rand.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/montecarlo/smoothers.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/aad/aad_mc.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/aad/aad_mc_nd.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/projects/datafiles.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/settings.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/test.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/clipboard.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/constants.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/jsonmanager.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/timegrids.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/timer.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/tools/utils.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/fbsabrgenerator.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/mchestongenerator.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/mcsabrgenerator.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/mczabrgenerator.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy/volsurfacegen/stovolfactory.py +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy.egg-info/dependency_links.txt +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/src/sdevpy.egg-info/top_level.txt +0 -0
- {sdevpy-1.0.2 → sdevpy-1.0.4}/tests/test.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: sdevpy
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.4
|
|
4
4
|
Summary: Python package for Machine Learning in Finance
|
|
5
5
|
Author-email: Sebastien Gurrieri <sebgur@gmail.com>
|
|
6
6
|
Project-URL: Git page, https://github.com/sebgur/SDev.Python
|
|
@@ -13,11 +13,11 @@ Description-Content-Type: text/markdown
|
|
|
13
13
|
License-File: LICENSE
|
|
14
14
|
Requires-Dist: pandas
|
|
15
15
|
Requires-Dist: pyperclip
|
|
16
|
-
Requires-Dist: py_vollib
|
|
17
16
|
Requires-Dist: numpy
|
|
18
17
|
Requires-Dist: tensorflow
|
|
19
18
|
Requires-Dist: scikit-learn
|
|
20
19
|
Requires-Dist: tensorflow_probability
|
|
20
|
+
Requires-Dist: silence_tensorflow
|
|
21
21
|
|
|
22
22
|
# SDev.Python
|
|
23
23
|
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "sdevpy"
|
|
7
|
-
version = "1.0.
|
|
7
|
+
version = "1.0.4"
|
|
8
8
|
authors = [
|
|
9
9
|
{ name="Sebastien Gurrieri", email="sebgur@gmail.com" },
|
|
10
10
|
]
|
|
@@ -17,8 +17,8 @@ classifiers = [
|
|
|
17
17
|
"Operating System :: OS Independent",
|
|
18
18
|
]
|
|
19
19
|
dependencies = [
|
|
20
|
-
"pandas","pyperclip","
|
|
21
|
-
"scikit-learn", "tensorflow_probability"
|
|
20
|
+
"pandas","pyperclip","numpy","tensorflow",
|
|
21
|
+
"scikit-learn", "tensorflow_probability", "silence_tensorflow"
|
|
22
22
|
]
|
|
23
23
|
|
|
24
24
|
[project.urls]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = '1.0.4'
|
|
@@ -12,6 +12,21 @@ def price(expiry, strike, is_call, fwd, vol):
|
|
|
12
12
|
return stdev * (wd * norm.cdf(wd) + norm.pdf(d))
|
|
13
13
|
|
|
14
14
|
|
|
15
|
+
def price_straddles(expiries, strikes, fwd, vols):
|
|
16
|
+
expiries_ = np.asarray(expiries).reshape(-1, 1)
|
|
17
|
+
prices = []
|
|
18
|
+
for i, expiry in enumerate(expiries_):
|
|
19
|
+
k_prices = []
|
|
20
|
+
for j, k in enumerate(strikes[i]):
|
|
21
|
+
iv = vols[i, j]
|
|
22
|
+
call_price = price(expiry, k, True, fwd, iv)
|
|
23
|
+
put_price = price(expiry, k, False, fwd, iv)
|
|
24
|
+
k_prices.append(call_price[0] + put_price[0])
|
|
25
|
+
prices.append(k_prices)
|
|
26
|
+
|
|
27
|
+
return np.asarray(prices)
|
|
28
|
+
|
|
29
|
+
|
|
15
30
|
def implied_vol(expiry, strike, is_call, fwd, fwd_price):
|
|
16
31
|
""" P. Jaeckel's method in "Implied Normal Volatility", 6th Jun. 2017 """
|
|
17
32
|
m = fwd - strike
|
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
import numpy as np
|
|
3
3
|
import scipy.stats
|
|
4
4
|
from scipy.optimize import minimize_scalar
|
|
5
|
-
import py_vollib.black.implied_volatility as jaeckel
|
|
5
|
+
# import py_vollib.black.implied_volatility as jaeckel
|
|
6
|
+
from sdevpy.thirdparty.py_vollib.black import implied_volatility as jaeckel
|
|
6
7
|
import tensorflow as tf
|
|
7
8
|
import tensorflow_probability as tfp
|
|
8
9
|
from sdevpy import settings
|
|
@@ -7,6 +7,7 @@ import tensorflow as tf
|
|
|
7
7
|
import joblib
|
|
8
8
|
import absl.logging
|
|
9
9
|
from sdevpy.tools import jsonmanager
|
|
10
|
+
from sdevpy.tools import filemanager
|
|
10
11
|
|
|
11
12
|
class LearningModel:
|
|
12
13
|
""" Wrapper class for machine learning models, including scalers, and simplifying
|
|
@@ -54,10 +55,12 @@ class LearningModel:
|
|
|
54
55
|
|
|
55
56
|
def save(self, path):
|
|
56
57
|
""" Save model and its scalers to files """
|
|
58
|
+
filemanager.check_directory(path)
|
|
57
59
|
# Save keras model first. Turn dummy warning off temporarily.
|
|
58
60
|
verbosity = absl.logging.get_verbosity()
|
|
59
61
|
absl.logging.set_verbosity(absl.logging.ERROR)
|
|
60
|
-
|
|
62
|
+
model_file = os.path.join(path, "model.keras")
|
|
63
|
+
self.model.save(model_file)
|
|
61
64
|
absl.logging.set_verbosity(verbosity)
|
|
62
65
|
|
|
63
66
|
# Save scalers
|
|
@@ -132,7 +135,8 @@ def load_learning_model(path, compile_=False):
|
|
|
132
135
|
if os.path.exists(path) is False:
|
|
133
136
|
raise RuntimeError("Model folder does not exist: " + path)
|
|
134
137
|
|
|
135
|
-
|
|
138
|
+
model_file = os.path.join(path, "model.keras")
|
|
139
|
+
keras_model = tf.keras.models.load_model(model_file, compile=compile_)
|
|
136
140
|
|
|
137
141
|
x_scaler_file, y_scaler_file = scaler_files(path)
|
|
138
142
|
if os.path.exists(x_scaler_file) and os.path.exists(y_scaler_file):
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
""" Custom learning schedules """
|
|
2
|
+
import tensorflow as tf
|
|
3
|
+
import math
|
|
4
|
+
from sdevpy.tools.constants import TWO_PI
|
|
5
|
+
|
|
6
|
+
# Custom learning rate scheduler, exponentially decreases between given values
|
|
7
|
+
class FlooredExponentialDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
|
|
8
|
+
""" Custom learning rate scheduler, exponentially decreases between given values """
|
|
9
|
+
def __init__(self, num_samples, batch_size, target_epoch, initial_lr=1e-1, final_lr=1e-4):
|
|
10
|
+
self.initial_lr = initial_lr
|
|
11
|
+
self.final_lr = final_lr
|
|
12
|
+
# self.decay = decay
|
|
13
|
+
# self.decay_steps = decay_steps
|
|
14
|
+
# A step is the usage of one gradient, i.e. for one batch. As we go through the whole sample
|
|
15
|
+
# in 1 epoch, the number of steps per epoch is given by the number of batches per epoch
|
|
16
|
+
# i.e. the formula below.
|
|
17
|
+
steps_per_epoch = num_samples / batch_size
|
|
18
|
+
percent_reached = 0.10 # Percentage of the final LR reached by the chosen epoch
|
|
19
|
+
self.decay = final_lr * percent_reached / (initial_lr - final_lr)
|
|
20
|
+
self.steps_to_target = np.float32(steps_per_epoch * target_epoch)
|
|
21
|
+
|
|
22
|
+
def __call__(self, step):
|
|
23
|
+
ratio = tf.cast(step / self.steps_to_target, tf.float32)
|
|
24
|
+
coeff = tf.pow(self.decay, ratio)
|
|
25
|
+
ampl = self.initial_lr - self.final_lr
|
|
26
|
+
return self.final_lr + ampl * coeff
|
|
27
|
+
|
|
28
|
+
# def __call__(self, step):
|
|
29
|
+
# ratio = tf.cast(step / self.decay_steps, tf.float32)
|
|
30
|
+
# coeff = tf.pow(self.decay, ratio)
|
|
31
|
+
# return self.initial_lr * coeff + self.final_lr * (1.0 - coeff)
|
|
32
|
+
|
|
33
|
+
def get_config(self):
|
|
34
|
+
config = { 'initial_lr': self.initial_lr,
|
|
35
|
+
'final_lr': self.final_lr,
|
|
36
|
+
'decay': self.decay,
|
|
37
|
+
'decay_steps': self.steps_to_target }
|
|
38
|
+
return config
|
|
39
|
+
|
|
40
|
+
import numpy as np
|
|
41
|
+
|
|
42
|
+
# Custom learning rate scheduler, cyclically exponentially decreases between given values
|
|
43
|
+
class CyclicalExponentialDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
|
|
44
|
+
""" Custom learning rate scheduler, cyclically exponentially decreases between given values """
|
|
45
|
+
def __init__(self, num_samples, batch_size, target_epoch, initial_lr=1e-1, final_lr=1e-4,
|
|
46
|
+
periods=10.0):
|
|
47
|
+
# Amplitude decay
|
|
48
|
+
self.initial_lr = initial_lr
|
|
49
|
+
self.final_lr = final_lr
|
|
50
|
+
# self.period = periods
|
|
51
|
+
# A step is the usage of one gradient, i.e. for one batch. As we go through the whole sample
|
|
52
|
+
# in 1 epoch, the number of steps per epoch is given by the number of batches per epoch
|
|
53
|
+
# i.e. the formula below.
|
|
54
|
+
steps_per_epoch = num_samples / batch_size
|
|
55
|
+
percent_reached = 0.10 # Percentage of the final LR reached by the chosen epoch
|
|
56
|
+
self.decay = final_lr * percent_reached / (initial_lr - final_lr)
|
|
57
|
+
self.steps_to_target = np.float32(steps_per_epoch * target_epoch)
|
|
58
|
+
|
|
59
|
+
# Oscillations
|
|
60
|
+
self.steps_per_period = np.float32(target_epoch * steps_per_epoch / periods)
|
|
61
|
+
|
|
62
|
+
def __call__(self, step):
|
|
63
|
+
ratio = tf.cast(step / self.steps_to_target, tf.float32)
|
|
64
|
+
coeff = tf.pow(self.decay, ratio)
|
|
65
|
+
ampl = self.initial_lr - self.final_lr
|
|
66
|
+
two_pi = tf.cast(TWO_PI, tf.float32)
|
|
67
|
+
arg = tf.cast(step / self.steps_per_period, tf.float32)
|
|
68
|
+
oscillation = (2.0 + tf.math.cos(arg * two_pi)) / 2.0 # Between 0.5 and 1.5
|
|
69
|
+
ampl = ampl * oscillation
|
|
70
|
+
return self.final_lr + ampl * coeff
|
|
71
|
+
|
|
72
|
+
def get_config(self):
|
|
73
|
+
config = { 'initial_lr': self.initial_lr,
|
|
74
|
+
'final_lr': self.final_lr,
|
|
75
|
+
'decay': self.decay,
|
|
76
|
+
'steps_to_target': self.steps_to_target,
|
|
77
|
+
'steps_per_period': self.steps_per_period }
|
|
78
|
+
return config
|
|
@@ -20,7 +20,7 @@ def compose_model(num_inputs, num_outputs, hidden_layers, neurons, dropout=0.2):
|
|
|
20
20
|
model = tf.keras.Sequential()
|
|
21
21
|
|
|
22
22
|
# Input layer
|
|
23
|
-
model.add(tf.keras.Input(num_inputs))
|
|
23
|
+
model.add(tf.keras.Input(shape=(num_inputs,)))
|
|
24
24
|
|
|
25
25
|
# Hidden layers
|
|
26
26
|
for layer in hidden_layers:
|
|
@@ -66,7 +66,7 @@ class SciPyOptimizer(Optimizer):
|
|
|
66
66
|
popsize = self.kwargs.get('popsize', 15)
|
|
67
67
|
strategy = self.kwargs.get('strategy', 'best1bin')
|
|
68
68
|
recombination = self.kwargs.get('recombination', 0.7)
|
|
69
|
-
mutation = self.kwargs.get('mutation', (0.5, 1.0))
|
|
69
|
+
mutation = self.kwargs.get('mutation', (0.5, 1.0)) # ToDo: parameter not used
|
|
70
70
|
result = opt.differential_evolution(f, x0=x0, args=args, bounds=bounds, atol=atol,
|
|
71
71
|
popsize=popsize, strategy=strategy,
|
|
72
72
|
recombination=recombination)
|
|
@@ -88,16 +88,20 @@ class MultiOptimizer(Optimizer):
|
|
|
88
88
|
|
|
89
89
|
def minimize(self, f, x0=None, args=(), bounds=None):
|
|
90
90
|
result = None
|
|
91
|
+
nfev = 0
|
|
91
92
|
for i, optimizer in enumerate(self.optimizers_):
|
|
92
93
|
print("Trying optimization using " + self.methods_[i] + ": ", end='')
|
|
93
94
|
result = optimizer.minimize(f, x0, args, bounds)
|
|
95
|
+
nfev = nfev + result.nfev
|
|
94
96
|
if result.fun < self.mtol_:
|
|
95
|
-
print("
|
|
97
|
+
print("Good enough!")
|
|
96
98
|
break
|
|
99
|
+
elif i < len(self.methods_) - 1:
|
|
100
|
+
print("Continuing")
|
|
97
101
|
else:
|
|
98
|
-
print("
|
|
102
|
+
print("Stopping")
|
|
99
103
|
|
|
100
|
-
return result
|
|
104
|
+
return result, nfev
|
|
101
105
|
|
|
102
106
|
|
|
103
107
|
if __name__ == "__main__":
|
|
@@ -10,13 +10,13 @@ from sdevpy.tools.timer import Stopwatch
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
# ################ Runtime configuration ##########################################################
|
|
13
|
-
|
|
14
|
-
MODEL_TYPE = "McSABR"
|
|
13
|
+
MODEL_TYPE = "SABR"
|
|
14
|
+
# MODEL_TYPE = "McSABR"
|
|
15
15
|
# MODEL_TYPE = "FbSABR"
|
|
16
16
|
# MODEL_TYPE = "McZABR"
|
|
17
17
|
# MODEL_TYPE = "McHeston"
|
|
18
18
|
SHIFT = 0.03
|
|
19
|
-
NUM_SAMPLES = 35 *
|
|
19
|
+
NUM_SAMPLES = 35 * 100
|
|
20
20
|
# The 4 parameters below are only relevant for models whose reference is calculated by MC
|
|
21
21
|
NUM_EXPIRIES = 10
|
|
22
22
|
NUM_STRIKES = 5
|
|
@@ -8,7 +8,8 @@ from sdevpy.tools import clipboard
|
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def plot_transform_surface(expiries, strikes, are_calls, fwd, ref_prices, mod_prices, title_,
|
|
11
|
-
transform='ShiftedBlackScholes'
|
|
11
|
+
transform='ShiftedBlackScholes', ref_name='Reference',
|
|
12
|
+
mod_name='Model'):
|
|
12
13
|
""" Calculate quantities to display for the surface and display them in charts. Transformed
|
|
13
14
|
quantities available are: Price, ShiftedBlackScholes (3%) and Bachelier (normal vols). """
|
|
14
15
|
# Transform prices
|
|
@@ -27,8 +28,8 @@ def plot_transform_surface(expiries, strikes, are_calls, fwd, ref_prices, mod_pr
|
|
|
27
28
|
for i in range(num_rows):
|
|
28
29
|
for j in range(num_cols):
|
|
29
30
|
k = num_cols * i + j
|
|
30
|
-
axs[i, j].plot(strikes[k], ref_disp[k], color='blue', label=
|
|
31
|
-
axs[i, j].plot(strikes[k], mod_disp[k], color='red', label=
|
|
31
|
+
axs[i, j].plot(strikes[k], ref_disp[k], color='blue', label=ref_name)
|
|
32
|
+
axs[i, j].plot(strikes[k], mod_disp[k], color='red', label=mod_name)
|
|
32
33
|
axs[i, j].xaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=1))
|
|
33
34
|
axs[i, j].yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=0))
|
|
34
35
|
axs[i, j].set_xlabel('Strike')
|
|
@@ -23,8 +23,8 @@ from sdevpy.projects.stovol import stovolplot as xplt
|
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
# ################ Runtime configuration ##########################################################
|
|
26
|
-
|
|
27
|
-
MODEL_TYPE = "McSABR"
|
|
26
|
+
MODEL_TYPE = "SABR"
|
|
27
|
+
# MODEL_TYPE = "McSABR"
|
|
28
28
|
# MODEL_TYPE = "FbSABR"
|
|
29
29
|
# MODEL_TYPE = "McZABR"
|
|
30
30
|
# MODEL_TYPE = "McHeston"
|
|
@@ -38,9 +38,9 @@ TRAIN = True
|
|
|
38
38
|
if USE_TRAINED is False and TRAIN is False:
|
|
39
39
|
raise RuntimeError("When not using pre-trained models, a new model must be trained")
|
|
40
40
|
|
|
41
|
-
NUM_SAMPLES =
|
|
41
|
+
NUM_SAMPLES = 2 * 1000 * 1000 # Number of samples to read from sample files
|
|
42
42
|
TRAIN_PERCENT = 0.90 # Proportion of dataset used for training (rest used for test)
|
|
43
|
-
EPOCHS =
|
|
43
|
+
EPOCHS = 100
|
|
44
44
|
BATCH_SIZE = 1000
|
|
45
45
|
SHOW_VOL_CHARTS = True # Show smile section charts
|
|
46
46
|
# For comparison to reference values (accuracy of reference)
|
|
@@ -141,11 +141,12 @@ print(f"> Drop-out rate: {DROP_OUT:.2f}")
|
|
|
141
141
|
# ################ Train the model ################################################################
|
|
142
142
|
if TRAIN:
|
|
143
143
|
# Learning rate scheduler
|
|
144
|
-
INIT_LR = 1.0e-
|
|
144
|
+
INIT_LR = 1.0e-3
|
|
145
145
|
FINAL_LR = 1.0e-4
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
lr_schedule = FlooredExponentialDecay(INIT_LR, FINAL_LR, DECAY, STEPS)
|
|
146
|
+
TARGET_EPOCH = EPOCHS * 0.90 # Epoch by which we plan to be down to 110% of final LR
|
|
147
|
+
PERIODS = 10 # Number of oscillation periods until target epoch
|
|
148
|
+
# lr_schedule = FlooredExponentialDecay(INIT_LR, FINAL_LR, DECAY, STEPS)
|
|
149
|
+
lr_schedule = FlooredExponentialDecay(NUM_SAMPLES, BATCH_SIZE, TARGET_EPOCH, INIT_LR, FINAL_LR)
|
|
149
150
|
|
|
150
151
|
# Optimizer
|
|
151
152
|
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
|
|
@@ -15,13 +15,15 @@ MODEL_TYPE = "SABR"
|
|
|
15
15
|
# MODEL_TYPE = "McZABR"
|
|
16
16
|
# MODEL_TYPE = "McHeston"
|
|
17
17
|
SHIFT = 0.03
|
|
18
|
-
NUM_SAMPLES =
|
|
18
|
+
NUM_SAMPLES = 1000 * 1000
|
|
19
19
|
# The 4 parameters below are only relevant for models whose reference is calculated by MC
|
|
20
20
|
NUM_EXPIRIES = 15
|
|
21
21
|
NUM_MC = 100 * 1000 # 100 * 1000
|
|
22
22
|
POINTS_PER_YEAR = 25 # 25
|
|
23
|
-
SEED =
|
|
23
|
+
SEED = 8888 # [1357, 8642, 1000, 8888, 4444, 2222, 1111, 4321, 1234, 42]
|
|
24
24
|
SPREADS = [-200, -100, -75, -50, -25, -10, 0, 10, 25, 50, 75, 100, 200]
|
|
25
|
+
USE_NVOL = True
|
|
26
|
+
NOISE = 0.01 # Relative size of noise
|
|
25
27
|
|
|
26
28
|
print(">> Set up runtime configuration")
|
|
27
29
|
project_folder = os.path.join(settings.WORKFOLDER, "stovolinv")
|
|
@@ -37,8 +39,13 @@ generator = stovolfactory.set_generator(MODEL_TYPE, SHIFT, NUM_EXPIRIES, num_mc=
|
|
|
37
39
|
|
|
38
40
|
# ################ Select training ranges #########################################################
|
|
39
41
|
# SABR
|
|
40
|
-
RANGES = {'Ttm': [1.0 / 12.0,
|
|
41
|
-
|
|
42
|
+
# RANGES = {'Ttm': [1.0 / 12.0, 6.0], 'F': [0.05, 0.06], 'LnVol': [0.20, 0.40],
|
|
43
|
+
# 'Beta': [0.1, 0.9], 'Nu': [0.2, 1.0], 'Rho': [-0.5, 0.2]} # 6y
|
|
44
|
+
RANGES = {'Ttm': [1.0 / 12.0, 35.0], 'F': [0.05, 0.06], 'LnVol': [0.20, 0.40],
|
|
45
|
+
'Beta': [0.1, 0.9], 'Nu': [0.2, 1.0], 'Rho': [-0.5, 0.2]} # All times
|
|
46
|
+
|
|
47
|
+
# RANGES = {'Ttm': [1.0 / 12.0, 35.0], 'F': [-0.009, 0.041], 'LnVol': [0.05, 0.5],
|
|
48
|
+
# 'Beta': [0.1, 0.9], 'Nu': [0.1, 1.0], 'Rho': [-0.6, 0.6]}
|
|
42
49
|
# # FBSABR
|
|
43
50
|
# RANGES = {'Ttm': [1.0 / 12.0, 5.0], 'F': [-0.009, 0.041], 'LnVol': [0.05, 0.5],
|
|
44
51
|
# 'Beta': [0.25, 0.75], 'Nu': [0.1, 1.0], 'Rho': [-0.6, 0.6]}
|
|
@@ -57,7 +64,7 @@ print(">> Generate dataset")
|
|
|
57
64
|
print(f"> Generate {NUM_SAMPLES:,} price samples")
|
|
58
65
|
timer_gen = Stopwatch("Generating Samples")
|
|
59
66
|
timer_gen.trigger()
|
|
60
|
-
data_df = generator.generate_samples_inverse(NUM_SAMPLES, RANGES, SPREADS)
|
|
67
|
+
data_df = generator.generate_samples_inverse(NUM_SAMPLES, RANGES, SPREADS, USE_NVOL, rel_noise=NOISE)
|
|
61
68
|
timer_gen.stop()
|
|
62
69
|
|
|
63
70
|
timer_out = Stopwatch("File Output")
|
|
@@ -17,25 +17,28 @@ from sdevpy.machinelearning.callbacks import RefCallback
|
|
|
17
17
|
from sdevpy.machinelearning import datasets
|
|
18
18
|
from sdevpy.tools import filemanager
|
|
19
19
|
from sdevpy.tools.timer import Stopwatch
|
|
20
|
-
# from sdevpy.tools import clipboard
|
|
21
20
|
from sdevpy.maths.metrics import bps_rmse, tf_bps_rmse, tf_mse, mse, tf_rmse, rmse
|
|
22
21
|
from sdevpy.volsurfacegen.stovolfactory import set_generator
|
|
23
22
|
from sdevpy.projects.stovol import stovolplot as xplt
|
|
23
|
+
from sdevpy.analytics import bachelier
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
# ################ ToDo ###########################################################################
|
|
27
|
-
#
|
|
28
|
-
#
|
|
29
|
-
#
|
|
30
|
-
#
|
|
31
|
-
#
|
|
32
|
-
#
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
#
|
|
36
|
-
#
|
|
37
|
-
|
|
38
|
-
#
|
|
27
|
+
# At the comparison check between model and calibration, we tried adding random noise to the normal
|
|
28
|
+
# vols used for comparison, on top of their values that come from a chosen SABR model. This way the
|
|
29
|
+
# vols we apply the model/calibration to are not longer exactly SABR.
|
|
30
|
+
# The issue was that the model was going off track very quickly with increasing size of noise.
|
|
31
|
+
# Incidentally, the optimization says "FAILURE" but this is likely to be because the tolerance is
|
|
32
|
+
# very small. We should try to make the tolerance larger and see how it goes.
|
|
33
|
+
|
|
34
|
+
# So the idea would be to include non-SABR points in the training dataset. This could be done by the
|
|
35
|
+
# same noise technique in reverse. Generate random SABR parameters, calculate the normal vols, then
|
|
36
|
+
# add noise on the normal vols, then calculate prices. But then in principle we should calibrate
|
|
37
|
+
# SABR to those prices again, to lear the truly optimium SABR parameters. However, isn't it possible
|
|
38
|
+
# to simply consider the original SABR (before the noise) as the likely optimum? At least, we could
|
|
39
|
+
# start the optimization there. It sounds likely that over a big range of data, these original SABRs
|
|
40
|
+
# could overall be near-optimum enough. Which would then avoid us the trouble of going through the
|
|
41
|
+
# calibration during the training.
|
|
39
42
|
# ################ Module versions ################################################################
|
|
40
43
|
print("TensorFlow version: " + tf.__version__)
|
|
41
44
|
# print("Keras version: " + tf.keras.__version__)
|
|
@@ -50,7 +53,7 @@ MODEL_TYPE = "SABR"
|
|
|
50
53
|
# MODEL_TYPE = "McZABR"
|
|
51
54
|
# MODEL_TYPE = "McHeston"
|
|
52
55
|
# MODEL_ID = "SABR_3L_64n" # For pre-trained model ID (we can pre-train several versions)
|
|
53
|
-
MODEL_ID = MODEL_TYPE # For pre-trained model ID (we can pre-train several versions)
|
|
56
|
+
MODEL_ID = "SABR" # MODEL_TYPE # For pre-trained model ID (we can pre-train several versions)
|
|
54
57
|
SHIFT = 0.03
|
|
55
58
|
USE_TRAINED = True
|
|
56
59
|
DOWNLOAD_MODELS = False # Only used when USE_TRAINED is True
|
|
@@ -59,14 +62,15 @@ TRAIN = False
|
|
|
59
62
|
if USE_TRAINED is False and TRAIN is False:
|
|
60
63
|
raise RuntimeError("When not using pre-trained models, a new model must be trained")
|
|
61
64
|
|
|
62
|
-
NUM_SAMPLES =
|
|
65
|
+
NUM_SAMPLES = 1000 * 1000#2 * 1000 * 1000 # Number of samples to read from sample files
|
|
63
66
|
TRAIN_PERCENT = 0.90 # Proportion of dataset used for training (rest used for test)
|
|
64
|
-
EPOCHS =
|
|
67
|
+
EPOCHS = 300
|
|
65
68
|
BATCH_SIZE = 1000
|
|
66
69
|
SHOW_VOL_CHARTS = True # Show smile section charts
|
|
67
70
|
# For comparison to reference values (accuracy of reference)
|
|
68
71
|
NUM_MC = 100 * 1000 # 100 * 1000
|
|
69
72
|
POINTS_PER_YEAR = 25# 25
|
|
73
|
+
USE_NVOL = True
|
|
70
74
|
project_folder = os.path.join(settings.WORKFOLDER, "stovolinv")
|
|
71
75
|
|
|
72
76
|
print(">> Set up runtime configuration")
|
|
@@ -146,7 +150,8 @@ else:
|
|
|
146
150
|
print(">> Composing new model")
|
|
147
151
|
# Initialize the model
|
|
148
152
|
HIDDEN_LAYERS = ['softplus', 'softplus', 'softplus']
|
|
149
|
-
NUM_NEURONS =
|
|
153
|
+
# NUM_NEURONS = 128
|
|
154
|
+
NUM_NEURONS = 128
|
|
150
155
|
DROP_OUT = 0.0
|
|
151
156
|
keras_model = compose_model(input_dim, output_dim, HIDDEN_LAYERS, NUM_NEURONS, DROP_OUT)
|
|
152
157
|
topology = { 'layers': HIDDEN_LAYERS, 'neurons': NUM_NEURONS, 'dropout': DROP_OUT}
|
|
@@ -162,13 +167,14 @@ print(f"> Drop-out rate: {DROP_OUT:.2f}")
|
|
|
162
167
|
# ################ Train the model ################################################################
|
|
163
168
|
if TRAIN:
|
|
164
169
|
# Learning rate scheduler
|
|
165
|
-
INIT_LR = 1.0e-2
|
|
166
|
-
FINAL_LR = 1.0e-4
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
lr_schedule = CyclicalExponentialDecay(
|
|
171
|
-
#
|
|
170
|
+
INIT_LR = 1.0e-3#1.0e-2
|
|
171
|
+
FINAL_LR = 1.0e-4#1.0e-4
|
|
172
|
+
TARGET_EPOCH = EPOCHS * 0.90 # Epoch by which we plan to be down to 110% of final LR
|
|
173
|
+
PERIODS = 10 # Number of oscillation periods until target epoch
|
|
174
|
+
|
|
175
|
+
# lr_schedule = CyclicalExponentialDecay(NUM_SAMPLES, BATCH_SIZE, TARGET_EPOCH, INIT_LR, FINAL_LR,
|
|
176
|
+
# PERIODS)
|
|
177
|
+
lr_schedule = FlooredExponentialDecay(NUM_SAMPLES, BATCH_SIZE, TARGET_EPOCH, INIT_LR, FINAL_LR)
|
|
172
178
|
|
|
173
179
|
# Optimizer
|
|
174
180
|
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
|
|
@@ -183,7 +189,7 @@ if TRAIN:
|
|
|
183
189
|
keras_model.compile(loss=tf_bps_rmse, optimizer=optimizer)
|
|
184
190
|
|
|
185
191
|
# Callbacks
|
|
186
|
-
EPOCH_SAMPLING =
|
|
192
|
+
EPOCH_SAMPLING = 1
|
|
187
193
|
callback = RefCallback(x_test, y_test, bps_rmse, optimizer=optimizer,
|
|
188
194
|
epoch_sampling=EPOCH_SAMPLING, x_train=x_train, y_train=y_train)
|
|
189
195
|
|
|
@@ -221,15 +227,16 @@ print(f"> RMSE on test set: {test_rmse:,.2f}")
|
|
|
221
227
|
if SHOW_VOL_CHARTS:
|
|
222
228
|
print("> Choosing a sample parameter set to display chart")
|
|
223
229
|
NUM_STRIKES = 100
|
|
224
|
-
PARAMS = { 'LnVol': 0.
|
|
230
|
+
PARAMS = { 'LnVol': 0.30, 'Beta': 0.5, 'Nu': 0.50, 'Rho': -0.10, 'Gamma': 0.7, 'Kappa': 1.0,
|
|
225
231
|
'Theta': 0.03, 'Xi': 0.35 }
|
|
226
|
-
FWD = 0.
|
|
232
|
+
FWD = 0.055
|
|
227
233
|
|
|
228
234
|
# Any number of expiries can be calculated, but for optimum display choose no more than 6
|
|
229
235
|
if MODEL_TYPE == "FbSABR":
|
|
230
236
|
EXPIRIES = np.asarray([0.25, 0.50, 1.0, 2.0, 5.0, 10.0]).reshape(-1, 1) # Only trained up to 5y
|
|
231
237
|
else:
|
|
232
238
|
EXPIRIES = np.asarray([0.25, 0.50, 1.0, 5.0, 10.0, 30.0]).reshape(-1, 1)
|
|
239
|
+
# EXPIRIES = np.asarray([0.25, 0.50, 1.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
|
|
233
240
|
NUM_EXPIRIES = EXPIRIES.shape[0]
|
|
234
241
|
|
|
235
242
|
# Calculate market strikes and prices on the training spreads
|
|
@@ -239,20 +246,29 @@ if SHOW_VOL_CHARTS:
|
|
|
239
246
|
TRAINING_SPREADS = np.asarray(TRAINING_SPREADS)
|
|
240
247
|
TRAINING_SPREADS = np.tile(TRAINING_SPREADS, (NUM_EXPIRIES, 1))
|
|
241
248
|
mkt_strikes = TRAINING_SPREADS / 10000.0 + FWD
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
249
|
+
|
|
250
|
+
# Calculate market prices and vols
|
|
251
|
+
rel_noise = 0.02
|
|
252
|
+
noise_thresh = 0.9
|
|
253
|
+
mkt_vols = generator.price_straddles_ref(EXPIRIES, mkt_strikes, FWD, PARAMS, True,
|
|
254
|
+
rel_noise=rel_noise, noise_thresh=noise_thresh)
|
|
255
|
+
mkt_prices = bachelier.price_straddles(EXPIRIES, mkt_strikes, FWD, mkt_vols)
|
|
256
|
+
# mkt_prices = generator.price_straddles_ref(EXPIRIES, mkt_strikes, FWD, PARAMS, False)
|
|
245
257
|
|
|
246
258
|
# Use model to get parameters at each expiry, then calculate parameters and then prices
|
|
247
|
-
mod_params,
|
|
248
|
-
|
|
249
|
-
|
|
259
|
+
mod_params, mod_vols = generator.price_straddles_mod(model, EXPIRIES, mkt_strikes, FWD,
|
|
260
|
+
mkt_vols, True)
|
|
261
|
+
# mod_vols = generator.price_straddles_ref(EXPIRIES, mkt_strikes, FWD, mod_params, True)
|
|
262
|
+
|
|
263
|
+
# mkt_prices = generator.price_straddles_ref(EXPIRIES, mkt_strikes, FWD, PARAMS, False)
|
|
264
|
+
rmse_mkt_mod = bps_rmse(mkt_vols, mod_vols)
|
|
250
265
|
# print(mod_prices)
|
|
251
266
|
|
|
252
267
|
# Calibrate prices by optimization
|
|
253
268
|
weights = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
|
|
254
|
-
cal_params,
|
|
255
|
-
|
|
269
|
+
cal_params, cal_vols = generator.calibrate(EXPIRIES, mkt_strikes, FWD, mkt_prices, weights, True)
|
|
270
|
+
# cal_vols = generator.price_straddles_ref(EXPIRIES, mkt_strikes, FWD, cal_params, True)
|
|
271
|
+
rmse_mkt_cal = bps_rmse(mkt_vols, cal_vols)
|
|
256
272
|
print(f"RMSE market-model: {rmse_mkt_mod:,.2f}")
|
|
257
273
|
print(f"RMSE market-calibration: {rmse_mkt_cal:,.2f}")
|
|
258
274
|
# print(cal_params)
|
|
@@ -288,81 +304,50 @@ if SHOW_VOL_CHARTS:
|
|
|
288
304
|
|
|
289
305
|
# PV
|
|
290
306
|
plot_spreads = TRAINING_SPREADS[0]
|
|
291
|
-
axs[0, 0].plot(plot_spreads,
|
|
292
|
-
axs[0, 0].plot(plot_spreads,
|
|
293
|
-
axs[0, 0].plot(plot_spreads,
|
|
307
|
+
axs[0, 0].plot(plot_spreads, mkt_vols[0], color='red', label='Target')
|
|
308
|
+
axs[0, 0].plot(plot_spreads, mod_vols[0], color='blue', label='Model')
|
|
309
|
+
axs[0, 0].plot(plot_spreads, cal_vols[0], 'g--', alpha=0.8, label='Calibration')
|
|
294
310
|
axs[0, 0].set_xlabel('Spread')
|
|
295
311
|
axs[0, 0].set_title(f"Fit vs Target at T={EXPIRIES[0]}")
|
|
296
312
|
axs[0, 0].legend(loc='upper right')
|
|
297
313
|
|
|
298
|
-
axs[0, 1].plot(plot_spreads,
|
|
299
|
-
axs[0, 1].plot(plot_spreads,
|
|
300
|
-
axs[0, 1].plot(plot_spreads,
|
|
314
|
+
axs[0, 1].plot(plot_spreads, mkt_vols[1], color='red', label='Target')
|
|
315
|
+
axs[0, 1].plot(plot_spreads, mod_vols[1], color='blue', label='Model')
|
|
316
|
+
axs[0, 1].plot(plot_spreads, cal_vols[1], 'g--', alpha=0.8, label='Calibration')
|
|
301
317
|
axs[0, 1].set_xlabel('Spread')
|
|
302
318
|
axs[0, 1].set_title(f"Fit vs Target at T={EXPIRIES[1]}")
|
|
303
319
|
axs[0, 1].legend(loc='upper right')
|
|
304
320
|
|
|
305
|
-
axs[1, 0].plot(plot_spreads,
|
|
306
|
-
axs[1, 0].plot(plot_spreads,
|
|
307
|
-
axs[1, 0].plot(plot_spreads,
|
|
321
|
+
axs[1, 0].plot(plot_spreads, mkt_vols[2], color='red', label='Target')
|
|
322
|
+
axs[1, 0].plot(plot_spreads, mod_vols[2], color='blue', label='Model')
|
|
323
|
+
axs[1, 0].plot(plot_spreads, cal_vols[2], 'g--', alpha=0.8, label='Calibration')
|
|
308
324
|
axs[1, 0].set_xlabel('Spread')
|
|
309
325
|
axs[1, 0].set_title(f"Fit vs Target at T={EXPIRIES[2]}")
|
|
310
326
|
axs[1, 0].legend(loc='upper right')
|
|
311
327
|
|
|
312
|
-
axs[1, 1].plot(plot_spreads,
|
|
313
|
-
axs[1, 1].plot(plot_spreads,
|
|
314
|
-
axs[1, 1].plot(plot_spreads,
|
|
328
|
+
axs[1, 1].plot(plot_spreads, mkt_vols[3], color='red', label='Target')
|
|
329
|
+
axs[1, 1].plot(plot_spreads, mod_vols[3], color='blue', label='Model')
|
|
330
|
+
axs[1, 1].plot(plot_spreads, cal_vols[3], 'g--', alpha=0.8, label='Calibration')
|
|
315
331
|
axs[1, 1].set_xlabel('Spread')
|
|
316
332
|
axs[1, 1].set_title(f"Fit vs Target at T={EXPIRIES[3]}")
|
|
317
333
|
axs[1, 1].legend(loc='upper right')
|
|
318
334
|
|
|
319
|
-
axs[2, 0].plot(plot_spreads,
|
|
320
|
-
axs[2, 0].plot(plot_spreads,
|
|
321
|
-
axs[2, 0].plot(plot_spreads,
|
|
335
|
+
axs[2, 0].plot(plot_spreads, mkt_vols[4], color='red', label='Target')
|
|
336
|
+
axs[2, 0].plot(plot_spreads, mod_vols[4], color='blue', label='Model')
|
|
337
|
+
axs[2, 0].plot(plot_spreads, cal_vols[4], 'g--', alpha=0.8, label='Calibration')
|
|
322
338
|
axs[2, 0].set_xlabel('Spread')
|
|
323
339
|
axs[2, 0].set_title(f"Fit vs Target at T={EXPIRIES[4]}")
|
|
324
340
|
axs[2, 0].legend(loc='upper right')
|
|
325
341
|
|
|
326
|
-
axs[2, 1].plot(plot_spreads,
|
|
327
|
-
axs[2, 1].plot(plot_spreads,
|
|
328
|
-
axs[2, 1].plot(plot_spreads,
|
|
342
|
+
axs[2, 1].plot(plot_spreads, mkt_vols[5], color='red', label='Target')
|
|
343
|
+
axs[2, 1].plot(plot_spreads, mod_vols[5], color='blue', label='Model')
|
|
344
|
+
axs[2, 1].plot(plot_spreads, cal_vols[5], 'g--', alpha=0.8, label='Calibration')
|
|
329
345
|
axs[2, 1].set_xlabel('Spread')
|
|
330
346
|
axs[2, 1].set_title(f"Fit vs Target at T={EXPIRIES[5]}")
|
|
331
347
|
axs[2, 1].legend(loc='upper right')
|
|
332
348
|
|
|
333
349
|
plt.show()
|
|
334
350
|
|
|
335
|
-
# METHOD = 'Percentiles'
|
|
336
|
-
# PERCENTS = np.linspace(0.01, 0.99, num=NUM_STRIKES)
|
|
337
|
-
# PERCENTS = np.asarray([PERCENTS] * NUM_EXPIRIES)
|
|
338
|
-
|
|
339
|
-
# strikes = generator.convert_strikes(EXPIRIES, PERCENTS, FWD, PARAMS, METHOD)
|
|
340
|
-
|
|
341
|
-
# print("> Calculating chart surface with reference model")
|
|
342
|
-
# timer_ref = Stopwatch("Reference surface calculation")
|
|
343
|
-
# timer_ref.trigger()
|
|
344
|
-
# ref_prices = generator.price_straddle(EXPIRIES, strikes, FWD, PARAMS)
|
|
345
|
-
# timer_ref.stop()
|
|
346
|
-
# clipboard.export2d(ref_prices)
|
|
347
|
-
# print("> Calculating chart surface with trained model")
|
|
348
|
-
# timer_mod = Stopwatch("Model surface calculation")
|
|
349
|
-
# timer_mod.trigger()
|
|
350
|
-
# mod_prices = generator.price_surface_mod(model, EXPIRIES, strikes, ARE_CALLS, FWD, PARAMS)
|
|
351
|
-
# timer_mod.stop()
|
|
352
|
-
# print(f"> Ref-Mod RMSE(price): {bps_rmse(ref_prices, mod_prices):.2f}")
|
|
353
|
-
|
|
354
|
-
# Display timers
|
|
355
|
-
# timer_ref.print()
|
|
356
|
-
# timer_mod.print()
|
|
357
|
-
|
|
358
|
-
# Available tranforms: Price, ShiftedBlackScholes, Bachelier
|
|
359
|
-
# TITLE = f"{MODEL_TYPE} smile sections, forward={FWD*100:.2f}"#,%\n parameters={PARAMS}"
|
|
360
|
-
# TRANSFORM = "Bachelier"
|
|
361
|
-
# TRANSFORM = "Price"
|
|
362
|
-
# TRANSFORM = "ShiftedBlackScholes"
|
|
363
|
-
# xplt.plot_transform_surface(EXPIRIES, strikes, ARE_CALLS, FWD, ref_prices, mod_prices,
|
|
364
|
-
# TITLE, transform=TRANSFORM)
|
|
365
|
-
|
|
366
351
|
# Show training history
|
|
367
352
|
if TRAIN:
|
|
368
353
|
hist_epochs = callback.epochs
|