sdevpy 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sdevpy/__init__.py +0 -0
- sdevpy/analytics/bachelier.py +66 -0
- sdevpy/analytics/black.py +81 -0
- sdevpy/analytics/fbsabr.py +183 -0
- sdevpy/analytics/mcheston.py +203 -0
- sdevpy/analytics/mcsabr.py +221 -0
- sdevpy/analytics/mczabr.py +220 -0
- sdevpy/analytics/sabr.py +72 -0
- sdevpy/example.py +2 -0
- sdevpy/machinelearning/callbacks.py +112 -0
- sdevpy/machinelearning/datasets.py +32 -0
- sdevpy/machinelearning/learningmodel.py +151 -0
- sdevpy/machinelearning/learningschedules.py +23 -0
- sdevpy/machinelearning/topology.py +65 -0
- sdevpy/maths/interpolations.py +28 -0
- sdevpy/maths/metrics.py +14 -0
- sdevpy/maths/optimization.py +1 -0
- sdevpy/maths/rand.py +99 -0
- sdevpy/projects/datafiles.py +28 -0
- sdevpy/projects/pinns/ernst_pinns.py +324 -0
- sdevpy/projects/pinns/pinns.py +345 -0
- sdevpy/projects/pinns/pinns_worst_of.py +635 -0
- sdevpy/projects/stovol/stovolgen.py +65 -0
- sdevpy/projects/stovol/stovolplot.py +110 -0
- sdevpy/projects/stovol/stovoltrain.py +247 -0
- sdevpy/projects/stovol/xsabrfit.py +255 -0
- sdevpy/settings.py +14 -0
- sdevpy/test.py +199 -0
- sdevpy/tools/clipboard.py +40 -0
- sdevpy/tools/constants.py +3 -0
- sdevpy/tools/filemanager.py +59 -0
- sdevpy/tools/jsonmanager.py +48 -0
- sdevpy/tools/timegrids.py +89 -0
- sdevpy/tools/timer.py +32 -0
- sdevpy/volsurfacegen/fbsabrgenerator.py +64 -0
- sdevpy/volsurfacegen/mchestongenerator.py +216 -0
- sdevpy/volsurfacegen/mcsabrgenerator.py +228 -0
- sdevpy/volsurfacegen/mczabrgenerator.py +227 -0
- sdevpy/volsurfacegen/sabrgenerator.py +282 -0
- sdevpy/volsurfacegen/smilegenerator.py +124 -0
- sdevpy/volsurfacegen/stovolfactory.py +44 -0
- sdevpy-0.0.1.dist-info/LICENSE +21 -0
- sdevpy-0.0.1.dist-info/METADATA +21 -0
- sdevpy-0.0.1.dist-info/RECORD +46 -0
- sdevpy-0.0.1.dist-info/WHEEL +5 -0
- sdevpy-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
import tensorflow as tf
|
|
2
|
+
import numpy as np
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
from time import time
|
|
5
|
+
|
|
6
|
+
# Strongly inspired by Blechschmidt and Ernst
|
|
7
|
+
# https://onlinelibrary.wiley.com/doi/full/10.1002/gamm.202100006
|
|
8
|
+
|
|
9
|
+
###################################################################################################
|
|
10
|
+
# ######## Runtime configuration
|
|
11
|
+
###################################################################################################
|
|
12
|
+
show_colocation = True
|
|
13
|
+
show_2d = False
|
|
14
|
+
show_1d = True
|
|
15
|
+
show_loss = True
|
|
16
|
+
n_epochs = 5000
|
|
17
|
+
DTYPE = 'float32'
|
|
18
|
+
|
|
19
|
+
# Boundary
|
|
20
|
+
tmin = 0.0
|
|
21
|
+
tmax = 1.0
|
|
22
|
+
xmin = -1.0
|
|
23
|
+
xmax = 1.0
|
|
24
|
+
|
|
25
|
+
###################################################################################################
|
|
26
|
+
# ######## Set problem specific data
|
|
27
|
+
###################################################################################################
|
|
28
|
+
# Set data type
|
|
29
|
+
tf.keras.backend.set_floatx(DTYPE)
|
|
30
|
+
print('TensorFlow version used: {}'.format(tf.__version__))
|
|
31
|
+
|
|
32
|
+
# Set constants
|
|
33
|
+
pi = tf.constant(np.pi, dtype=DTYPE)
|
|
34
|
+
viscosity = .01 / pi
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Define final payoff
|
|
38
|
+
def final_payoff(x):
|
|
39
|
+
return -tf.sin(pi * x)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# Define boundary condition
|
|
43
|
+
def lw_boundary(t, x):
|
|
44
|
+
n = x.shape[0]
|
|
45
|
+
return tf.zeros((n, 1), dtype=DTYPE)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def up_boundary(t, x):
|
|
49
|
+
n = x.shape[0]
|
|
50
|
+
return tf.zeros((n, 1), dtype=DTYPE)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# PDE = 0
|
|
54
|
+
def burgers_pde(t, x, u, u_t, u_x, u_xx):
|
|
55
|
+
return u_t + u * u_x - viscosity * u_xx
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def pde(t, x, u, u_t, u_x, u_xx):
|
|
59
|
+
return burgers_pde(t, x, u, u_t, u_x, u_xx)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
#################################################
|
|
63
|
+
# ######## Generate a set of collocation points
|
|
64
|
+
#################################################
|
|
65
|
+
# Set number of data points
|
|
66
|
+
n_final = 50
|
|
67
|
+
n_boundary = 50
|
|
68
|
+
n_pde = 10000
|
|
69
|
+
|
|
70
|
+
# Lower bounds
|
|
71
|
+
lb = tf.constant([tmin, xmin], dtype=DTYPE)
|
|
72
|
+
|
|
73
|
+
# Upper bounds
|
|
74
|
+
ub = tf.constant([tmax, xmax], dtype=DTYPE)
|
|
75
|
+
|
|
76
|
+
# Set random seed for reproducible results
|
|
77
|
+
tf.random.set_seed(0)
|
|
78
|
+
|
|
79
|
+
# Draw final payoff samples
|
|
80
|
+
t_0 = tf.ones((n_final, 1), dtype=DTYPE) * lb[0]
|
|
81
|
+
x_0 = tf.random.uniform((n_final, 1), lb[1], ub[1], dtype=DTYPE)
|
|
82
|
+
payoffPoints = tf.concat([t_0, x_0], axis=1)
|
|
83
|
+
# Evaluate payoff
|
|
84
|
+
payoffValues = final_payoff(x_0)
|
|
85
|
+
|
|
86
|
+
# Draw boundary samples
|
|
87
|
+
nb = 25
|
|
88
|
+
lw_t_b = tf.random.uniform((nb, 1), lb[0], ub[0], dtype=DTYPE)
|
|
89
|
+
lw_x_b = tf.ones((nb, 1), dtype=DTYPE) * lb[1]
|
|
90
|
+
# Reuse the same times
|
|
91
|
+
up_t_b = lw_t_b
|
|
92
|
+
up_x_b = tf.ones((nb, 1), dtype=DTYPE) * ub[1]
|
|
93
|
+
# Concatenate
|
|
94
|
+
t_b = tf.concat([lw_t_b, up_t_b], axis=0)
|
|
95
|
+
x_b = tf.concat([lw_x_b, up_x_b], axis=0)
|
|
96
|
+
boundary_points = tf.concat([t_b, x_b], axis=1)
|
|
97
|
+
|
|
98
|
+
# Evaluate boundary condition at (t_b, x_b)
|
|
99
|
+
lwBoundaryValues = lw_boundary(lw_t_b, lw_x_b)
|
|
100
|
+
upBoundaryValues = up_boundary(up_t_b, up_x_b)
|
|
101
|
+
boundary_values = tf.concat([lwBoundaryValues, upBoundaryValues], axis=0)
|
|
102
|
+
|
|
103
|
+
# Draw uniformly sampled collocation points
|
|
104
|
+
t_r = tf.random.uniform((n_pde, 1), lb[0], ub[0], dtype=DTYPE)
|
|
105
|
+
x_r = tf.random.uniform((n_pde, 1), lb[1], ub[1], dtype=DTYPE)
|
|
106
|
+
pde_points = tf.concat([t_r, x_r], axis=1)
|
|
107
|
+
|
|
108
|
+
# Collect boundary and initial data in lists
|
|
109
|
+
other_points = [payoffPoints, boundary_points]
|
|
110
|
+
other_values = [payoffValues, boundary_values]
|
|
111
|
+
|
|
112
|
+
#################################################
|
|
113
|
+
# ######## Illustrate collocation points
|
|
114
|
+
#################################################
|
|
115
|
+
if show_colocation:
|
|
116
|
+
fig = plt.figure(figsize=(9, 6))
|
|
117
|
+
plt.scatter(t_0, x_0, c=payoffValues, marker='X', vmin=-1, vmax=1)
|
|
118
|
+
plt.scatter(t_b, x_b, c=boundary_values, marker='X', vmin=-1, vmax=1)
|
|
119
|
+
plt.scatter(t_r, x_r, c='r', marker='.', alpha=0.1)
|
|
120
|
+
plt.xlabel('$t$')
|
|
121
|
+
plt.ylabel('$x$')
|
|
122
|
+
plt.title('Positions of collocation points and boundary date')
|
|
123
|
+
plt.show()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
#################################################
|
|
127
|
+
# ######## Set up network architecture
|
|
128
|
+
#################################################
|
|
129
|
+
def init_model(num_hidden_layers=8, num_neurons_per_layer=20):
|
|
130
|
+
# Initialize a feedforward neural network
|
|
131
|
+
model_ = tf.keras.Sequential()
|
|
132
|
+
|
|
133
|
+
# Input is two-dimensional (time + one spatial dimension)
|
|
134
|
+
model_.add(tf.keras.Input(2))
|
|
135
|
+
|
|
136
|
+
# Introduce a scaling layer to map input to [lb, ub]
|
|
137
|
+
scaling_layer = tf.keras.layers.Lambda(lambda x: 2.0 * (x - lb) / (ub - lb) - 1.0)
|
|
138
|
+
model_.add(scaling_layer)
|
|
139
|
+
|
|
140
|
+
# Append hidden layers
|
|
141
|
+
for _ in range(num_hidden_layers):
|
|
142
|
+
model_.add(tf.keras.layers.Dense(num_neurons_per_layer,
|
|
143
|
+
activation=tf.keras.activations.get('tanh'),
|
|
144
|
+
kernel_initializer='glorot_normal'))
|
|
145
|
+
|
|
146
|
+
# Output is one-dimensional
|
|
147
|
+
model_.add(tf.keras.layers.Dense(1))
|
|
148
|
+
|
|
149
|
+
return model_
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
###########################################################
|
|
153
|
+
# ######## Define routines to determine loss and gradient
|
|
154
|
+
###########################################################
|
|
155
|
+
def get_r(model_, pde_points_):
|
|
156
|
+
# A tf.GradientTape is used to compute derivatives in TensorFlow
|
|
157
|
+
with tf.GradientTape(persistent=True) as tape:
|
|
158
|
+
# Split t and x to compute partial derivatives
|
|
159
|
+
t, x = pde_points_[:, 0:1], pde_points_[:, 1:2]
|
|
160
|
+
|
|
161
|
+
# Variables t and x are watched during tape to compute derivatives u_t and u_x
|
|
162
|
+
tape.watch(t)
|
|
163
|
+
tape.watch(x)
|
|
164
|
+
|
|
165
|
+
# Determine residual
|
|
166
|
+
u = model_(tf.stack([t[:, 0], x[:, 0]], axis=1))
|
|
167
|
+
|
|
168
|
+
# Compute gradient u_x within the GradientTape since we need the second derivatives
|
|
169
|
+
u_x = tape.gradient(u, x)
|
|
170
|
+
|
|
171
|
+
u_t = tape.gradient(u, t)
|
|
172
|
+
u_xx = tape.gradient(u_x, x)
|
|
173
|
+
|
|
174
|
+
del tape
|
|
175
|
+
|
|
176
|
+
return pde(t, x, u, u_t, u_x, u_xx)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def compute_loss(model_, pde_points_, other_points_, other_values_):
|
|
180
|
+
# Compute phi^r
|
|
181
|
+
r = get_r(model_, pde_points_)
|
|
182
|
+
phi_r = tf.reduce_mean(tf.square(r))
|
|
183
|
+
|
|
184
|
+
# Initialize loss
|
|
185
|
+
loss_ = phi_r
|
|
186
|
+
|
|
187
|
+
# Add phi^0 and phi^b to the loss
|
|
188
|
+
for i_ in range(len(other_points_)):
|
|
189
|
+
u_pred = model_(other_points_[i_])
|
|
190
|
+
loss_ += tf.reduce_mean(tf.square(other_values_[i_] - u_pred))
|
|
191
|
+
|
|
192
|
+
return loss_
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def get_grad(model_, pde_points_, other_points_, other_values_):
|
|
196
|
+
with tf.GradientTape(persistent=True) as tape:
|
|
197
|
+
# This tape is for derivatives with respect to trainable variables
|
|
198
|
+
tape.watch(model_.trainable_variables)
|
|
199
|
+
loss_ = compute_loss(model_, pde_points_, other_points_, other_values_)
|
|
200
|
+
|
|
201
|
+
g = tape.gradient(loss_, model_.trainable_variables)
|
|
202
|
+
del tape
|
|
203
|
+
|
|
204
|
+
return loss_, g
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
###########################################################
|
|
208
|
+
# ######## Set up optimizer and train model
|
|
209
|
+
###########################################################
|
|
210
|
+
# Initialize model aka u_\theta
|
|
211
|
+
model = init_model()
|
|
212
|
+
|
|
213
|
+
# We choose a piecewise decay of the learning rate, i.e. the step size in the gradient descent
|
|
214
|
+
# type algorithm the first 1000 steps use a learning rate of 0.01,
|
|
215
|
+
# from 1000 - 3000: learning rate = 0.001
|
|
216
|
+
# from 3000 onwards: learning rate = 0.0005
|
|
217
|
+
|
|
218
|
+
lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay([1000, 3000], [1e-2, 1e-3, 5e-4])
|
|
219
|
+
|
|
220
|
+
# Choose the optimizer
|
|
221
|
+
optim = tf.keras.optimizers.Adam(learning_rate=lr)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
# Define one training step as a TensorFlow function to increase speed of training
|
|
225
|
+
@tf.function
|
|
226
|
+
def train_step():
|
|
227
|
+
# Compute current loss and gradient w.r.t. parameters
|
|
228
|
+
loss_, grad_theta = get_grad(model, pde_points, other_points, other_values)
|
|
229
|
+
|
|
230
|
+
# Perform gradient descent step
|
|
231
|
+
optim.apply_gradients(zip(grad_theta, model.trainable_variables))
|
|
232
|
+
|
|
233
|
+
return loss_
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
# Start training
|
|
237
|
+
hist = []
|
|
238
|
+
t0 = time()
|
|
239
|
+
|
|
240
|
+
for i in range(n_epochs + 1):
|
|
241
|
+
loss = train_step()
|
|
242
|
+
|
|
243
|
+
# Append current loss to hist
|
|
244
|
+
hist.append(loss.numpy())
|
|
245
|
+
|
|
246
|
+
# Output current loss after 50 iterates
|
|
247
|
+
if i % 50 == 0:
|
|
248
|
+
print('It {:05d}: loss = {:10.8e}'.format(i, loss))
|
|
249
|
+
|
|
250
|
+
# Print computation time
|
|
251
|
+
print('\nComputation time: {} seconds'.format(time()-t0))
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
###########################################################
|
|
255
|
+
# ######## Plot solution and evolution of loss
|
|
256
|
+
###########################################################
|
|
257
|
+
# #### Plot 1d solution
|
|
258
|
+
def plot_slice(n_points_, t_, x_min, x_max):
|
|
259
|
+
t_space_ = t_
|
|
260
|
+
x_space_ = np.linspace(x_min, x_max, n_points_)
|
|
261
|
+
t_mat, x_mat = np.meshgrid(t_space_, x_space_)
|
|
262
|
+
x_pred = np.vstack([t_mat.flatten(), x_mat.flatten()]).T
|
|
263
|
+
y_pred = model(tf.cast(x_pred, DTYPE))
|
|
264
|
+
plt.xlabel('Spot')
|
|
265
|
+
plt.ylabel('PV')
|
|
266
|
+
plt.plot(x_space_, y_pred, color='blue', label='Closed-Form')
|
|
267
|
+
plt.plot(x_space_, y_pred, color='red', label='NN')
|
|
268
|
+
plt.legend(loc='upper right')
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
if show_1d:
|
|
272
|
+
n_points = 100
|
|
273
|
+
# Plot solution
|
|
274
|
+
plt.ioff()
|
|
275
|
+
plt.figure(figsize=(12, 12))
|
|
276
|
+
plt.subplots_adjust(hspace=0.40)
|
|
277
|
+
plt.subplot(3, 2, 1)
|
|
278
|
+
plot_slice(n_points, 0.01, lb[1], ub[1])
|
|
279
|
+
plt.subplot(3, 2, 2)
|
|
280
|
+
plot_slice(n_points, 0.10, lb[1], ub[1])
|
|
281
|
+
plt.subplot(3, 2, 3)
|
|
282
|
+
plot_slice(n_points, 0.25, lb[1], ub[1])
|
|
283
|
+
plt.subplot(3, 2, 4)
|
|
284
|
+
plot_slice(n_points, 0.50, lb[1], ub[1])
|
|
285
|
+
plt.subplot(3, 2, 5)
|
|
286
|
+
plot_slice(n_points, 0.75, lb[1], ub[1])
|
|
287
|
+
plt.subplot(3, 2, 6)
|
|
288
|
+
plot_slice(n_points, 0.90, lb[1], ub[1])
|
|
289
|
+
plt.show()
|
|
290
|
+
|
|
291
|
+
# #### Plot 2d solution
|
|
292
|
+
if show_2d:
|
|
293
|
+
# Set up meshgrid
|
|
294
|
+
N = 600
|
|
295
|
+
tspace = np.linspace(lb[0], ub[0], N + 1)
|
|
296
|
+
xspace = np.linspace(lb[1], ub[1], N + 1)
|
|
297
|
+
T, X = np.meshgrid(tspace, xspace)
|
|
298
|
+
Xgrid = np.vstack([T.flatten(), X.flatten()]).T
|
|
299
|
+
|
|
300
|
+
# Determine predictions of u(t, x)
|
|
301
|
+
upred = model(tf.cast(Xgrid, DTYPE))
|
|
302
|
+
|
|
303
|
+
# Reshape upred
|
|
304
|
+
U = upred.numpy().reshape(N+1, N+1)
|
|
305
|
+
|
|
306
|
+
# Surface plot of solution u(t,x)
|
|
307
|
+
fig = plt.figure(figsize=(9, 6))
|
|
308
|
+
ax = fig.add_subplot(111, projection='3d')
|
|
309
|
+
ax.plot_surface(T, X, U, cmap='viridis')
|
|
310
|
+
ax.view_init(35, 35)
|
|
311
|
+
ax.set_xlabel('$t$')
|
|
312
|
+
ax.set_ylabel('$x$')
|
|
313
|
+
ax.set_zlabel('$u_\\theta(t,x)$')
|
|
314
|
+
ax.set_title('Solution of Burgers equation')
|
|
315
|
+
plt.show()
|
|
316
|
+
|
|
317
|
+
# Plot the evolution of loss
|
|
318
|
+
if show_loss:
|
|
319
|
+
fig = plt.figure(figsize=(9, 6))
|
|
320
|
+
ax = fig.add_subplot(111)
|
|
321
|
+
ax.semilogy(range(len(hist)), hist, 'k-')
|
|
322
|
+
ax.set_xlabel('$n_{epoch}$')
|
|
323
|
+
ax.set_ylabel('$\\phi_{n_epoch}}$')
|
|
324
|
+
plt.show()
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
import tensorflow as tf
|
|
2
|
+
import numpy as np
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
from tools.black import black_formula
|
|
5
|
+
from time import time
|
|
6
|
+
from enum import Enum
|
|
7
|
+
import scipy.stats
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PayoffType(Enum):
|
|
11
|
+
Forward = 1
|
|
12
|
+
Call = 2
|
|
13
|
+
Put = 3
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
###################################################################################################
|
|
17
|
+
# ######## Runtime configuration
|
|
18
|
+
###################################################################################################
|
|
19
|
+
show_colocation = False
|
|
20
|
+
show_pv = True
|
|
21
|
+
show_loss = True
|
|
22
|
+
n_epochs = 200
|
|
23
|
+
DTYPE = 'float32'
|
|
24
|
+
|
|
25
|
+
# Set number of data points
|
|
26
|
+
n_final = 250 # 50
|
|
27
|
+
nb = 25 # 25
|
|
28
|
+
n_boundary = 2 * nb # 50 # 50
|
|
29
|
+
n_pde = 10000 # 10000
|
|
30
|
+
|
|
31
|
+
###################################################################################################
|
|
32
|
+
# ######## Set problem specific data
|
|
33
|
+
###################################################################################################
|
|
34
|
+
# Set data type
|
|
35
|
+
tf.keras.backend.set_floatx(DTYPE)
|
|
36
|
+
|
|
37
|
+
# Set constants
|
|
38
|
+
vol = 0.25
|
|
39
|
+
drift = 0.0
|
|
40
|
+
rate = 0.0
|
|
41
|
+
T = 10.0
|
|
42
|
+
S0 = 100.0
|
|
43
|
+
F = S0
|
|
44
|
+
k = 0.95 * S0
|
|
45
|
+
K = tf.constant(k, dtype=DTYPE)
|
|
46
|
+
scale = tf.constant(S0, dtype=DTYPE)
|
|
47
|
+
floor = tf.constant(0.0, dtype=DTYPE)
|
|
48
|
+
# payoff_type = PayoffType['Forward']
|
|
49
|
+
# payoff_type = PayoffType['Call']
|
|
50
|
+
payoff_type = PayoffType['Put']
|
|
51
|
+
is_call = True if payoff_type == PayoffType.Call else False
|
|
52
|
+
|
|
53
|
+
stdev = vol * np.sqrt(T)
|
|
54
|
+
conf = 0.99
|
|
55
|
+
p = scipy.stats.norm.ppf(conf)
|
|
56
|
+
xmin = F * np.exp(-0.5 * stdev * stdev - p * stdev)
|
|
57
|
+
xmax = F * np.exp(-0.5 * stdev * stdev + p * stdev)
|
|
58
|
+
print(xmin)
|
|
59
|
+
print(xmax)
|
|
60
|
+
|
|
61
|
+
# Boundaries
|
|
62
|
+
tmin = T
|
|
63
|
+
tmax = 0.0
|
|
64
|
+
# xmin = 0.8 * S0
|
|
65
|
+
# xmax = 1.2 * S0
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# Define final payoff
|
|
69
|
+
def final_payoff(x):
|
|
70
|
+
if payoff_type == PayoffType.Forward:
|
|
71
|
+
return (x - K) / scale
|
|
72
|
+
elif payoff_type == PayoffType.Call:
|
|
73
|
+
return tf.math.maximum(x - K, floor) / scale
|
|
74
|
+
elif payoff_type == PayoffType.Put:
|
|
75
|
+
return tf.math.maximum(K - x, floor) / scale
|
|
76
|
+
else:
|
|
77
|
+
raise Exception("Unknown payoff type: " + str(payoff_type.value))
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# Define boundary condition
|
|
81
|
+
def lw_boundary(t, x):
|
|
82
|
+
n = x.shape[0]
|
|
83
|
+
if payoff_type == PayoffType.Forward:
|
|
84
|
+
return tf.ones((n, 1), dtype=DTYPE) * (x - K) / scale
|
|
85
|
+
elif payoff_type == PayoffType.Call:
|
|
86
|
+
return tf.zeros((n, 1), dtype=DTYPE)
|
|
87
|
+
elif payoff_type == PayoffType.Put:
|
|
88
|
+
return tf.ones((n, 1), dtype=DTYPE) * (K - x) / scale
|
|
89
|
+
else:
|
|
90
|
+
raise Exception("Unknown payoff type: " + str(payoff_type.value))
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def up_boundary(t, x):
|
|
94
|
+
n = x.shape[0]
|
|
95
|
+
if payoff_type == PayoffType.Forward:
|
|
96
|
+
return tf.ones((n, 1), dtype=DTYPE) * (x - K) / scale
|
|
97
|
+
elif payoff_type == PayoffType.Call:
|
|
98
|
+
return tf.ones((n, 1), dtype=DTYPE) * (x - K) / scale
|
|
99
|
+
elif payoff_type == PayoffType.Put:
|
|
100
|
+
return tf.zeros((n, 1), dtype=DTYPE)
|
|
101
|
+
else:
|
|
102
|
+
raise Exception("Unknown payoff type: " + str(payoff_type.value))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# PDE = 0
|
|
106
|
+
def pde(t, x, u, u_t, u_x, u_xx):
|
|
107
|
+
return u_t + 0.5 * vol * vol * x * x * u_xx
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
###################################################################################################
|
|
111
|
+
# ######## Generate a set of collocation points
|
|
112
|
+
###################################################################################################
|
|
113
|
+
# Lower bounds
|
|
114
|
+
lb = tf.constant([tmin, xmin], dtype=DTYPE)
|
|
115
|
+
|
|
116
|
+
# Upper bounds
|
|
117
|
+
ub = tf.constant([tmax, xmax], dtype=DTYPE)
|
|
118
|
+
|
|
119
|
+
# Set random seed for reproducible results
|
|
120
|
+
tf.random.set_seed(0)
|
|
121
|
+
|
|
122
|
+
# Draw final payoff samples
|
|
123
|
+
t_0 = tf.ones((n_final, 1), dtype=DTYPE) * lb[0]
|
|
124
|
+
x_0 = tf.random.uniform((n_final, 1), lb[1], ub[1], dtype=DTYPE)
|
|
125
|
+
payoffPoints = tf.concat([t_0, x_0], axis=1)
|
|
126
|
+
# Evaluate payoff
|
|
127
|
+
payoffValues = final_payoff(x_0)
|
|
128
|
+
|
|
129
|
+
# Draw boundary samples
|
|
130
|
+
lw_t_b = tf.random.uniform((nb, 1), lb[0], ub[0], dtype=DTYPE)
|
|
131
|
+
lw_x_b = tf.ones((nb, 1), dtype=DTYPE) * lb[1]
|
|
132
|
+
# Reuse the same times
|
|
133
|
+
up_t_b = lw_t_b
|
|
134
|
+
up_x_b = tf.ones((nb, 1), dtype=DTYPE) * ub[1]
|
|
135
|
+
# Concatenate
|
|
136
|
+
t_b = tf.concat([lw_t_b, up_t_b], axis=0)
|
|
137
|
+
x_b = tf.concat([lw_x_b, up_x_b], axis=0)
|
|
138
|
+
boundary_points = tf.concat([t_b, x_b], axis=1)
|
|
139
|
+
|
|
140
|
+
# Evaluate boundary condition at (t_b, x_b)
|
|
141
|
+
lwBoundaryValues = lw_boundary(lw_t_b, lw_x_b)
|
|
142
|
+
upBoundaryValues = up_boundary(up_t_b, up_x_b)
|
|
143
|
+
boundary_values = tf.concat([lwBoundaryValues, upBoundaryValues], axis=0)
|
|
144
|
+
|
|
145
|
+
# Draw uniformly sampled collocation points
|
|
146
|
+
t_r = tf.random.uniform((n_pde, 1), lb[0], ub[0], dtype=DTYPE)
|
|
147
|
+
x_r = tf.random.uniform((n_pde, 1), lb[1], ub[1], dtype=DTYPE)
|
|
148
|
+
pde_points = tf.concat([t_r, x_r], axis=1)
|
|
149
|
+
|
|
150
|
+
# Collect boundary and initial data in lists
|
|
151
|
+
other_points = [payoffPoints, boundary_points]
|
|
152
|
+
other_values = [payoffValues, boundary_values]
|
|
153
|
+
|
|
154
|
+
###################################################################################################
|
|
155
|
+
# ######## Illustrate collocation points
|
|
156
|
+
###################################################################################################
|
|
157
|
+
if show_colocation:
|
|
158
|
+
fig = plt.figure(figsize=(9, 6))
|
|
159
|
+
plt.scatter(t_0, x_0, c=payoffValues, marker='X', vmin=-1, vmax=1)
|
|
160
|
+
plt.scatter(t_b, x_b, c=boundary_values, marker='X', vmin=-1, vmax=1)
|
|
161
|
+
plt.scatter(t_r, x_r, c='r', marker='.', alpha=0.1)
|
|
162
|
+
plt.xlabel('$t$')
|
|
163
|
+
plt.ylabel('$x$')
|
|
164
|
+
plt.title('Positions of collocation points and boundary date')
|
|
165
|
+
plt.show()
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
###################################################################################################
|
|
169
|
+
# ######## Set up network architecture
|
|
170
|
+
###################################################################################################
|
|
171
|
+
def init_model(num_hidden_layers=8, num_neurons_per_layer=20):
|
|
172
|
+
# Initialize a feedforward neural network
|
|
173
|
+
model_ = tf.keras.Sequential()
|
|
174
|
+
|
|
175
|
+
# Input is two-dimensional (time + one spatial dimension)
|
|
176
|
+
model_.add(tf.keras.Input(2))
|
|
177
|
+
|
|
178
|
+
# Introduce a scaling layer to map input to [lb, ub]
|
|
179
|
+
scaling_layer = tf.keras.layers.Lambda(lambda x: 2.0 * (x - lb) / (ub - lb) - 1.0)
|
|
180
|
+
model_.add(scaling_layer)
|
|
181
|
+
|
|
182
|
+
# Append hidden layers
|
|
183
|
+
for _ in range(num_hidden_layers):
|
|
184
|
+
model_.add(tf.keras.layers.Dense(num_neurons_per_layer,
|
|
185
|
+
activation=tf.keras.activations.get('tanh'),
|
|
186
|
+
kernel_initializer='glorot_normal'))
|
|
187
|
+
|
|
188
|
+
# Output is one-dimensional
|
|
189
|
+
model_.add(tf.keras.layers.Dense(1))
|
|
190
|
+
|
|
191
|
+
return model_
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
###################################################################################################
|
|
195
|
+
# ######## Define routines to determine loss and gradient
|
|
196
|
+
###################################################################################################
|
|
197
|
+
def get_r(model_, pde_points_):
|
|
198
|
+
# A tf.GradientTape is used to compute derivatives in TensorFlow
|
|
199
|
+
with tf.GradientTape(persistent=True) as tape:
|
|
200
|
+
# Split t and x to compute partial derivatives
|
|
201
|
+
t, x = pde_points_[:, 0:1], pde_points_[:, 1:2]
|
|
202
|
+
|
|
203
|
+
# Variables t and x are watched during tape to compute derivatives u_t and u_x
|
|
204
|
+
tape.watch(t)
|
|
205
|
+
tape.watch(x)
|
|
206
|
+
|
|
207
|
+
# Determine residual
|
|
208
|
+
u = model_(tf.stack([t[:, 0], x[:, 0]], axis=1))
|
|
209
|
+
|
|
210
|
+
# Compute gradient u_x within the GradientTape since we need the second derivatives
|
|
211
|
+
u_x = tape.gradient(u, x)
|
|
212
|
+
|
|
213
|
+
u_t = tape.gradient(u, t)
|
|
214
|
+
u_xx = tape.gradient(u_x, x)
|
|
215
|
+
|
|
216
|
+
del tape
|
|
217
|
+
|
|
218
|
+
return pde(t, x, u, u_t, u_x, u_xx)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def compute_loss(model_, pde_points_, other_points_, other_values_):
|
|
222
|
+
# Compute phi^r
|
|
223
|
+
r = get_r(model_, pde_points_)
|
|
224
|
+
phi_r = tf.reduce_mean(tf.square(r))
|
|
225
|
+
|
|
226
|
+
# Initialize loss
|
|
227
|
+
loss_ = phi_r
|
|
228
|
+
|
|
229
|
+
# Add phi^0 and phi^b to the loss
|
|
230
|
+
for i_ in range(len(other_points_)):
|
|
231
|
+
u_pred = model_(other_points_[i_])
|
|
232
|
+
loss_ += tf.reduce_mean(tf.square(other_values_[i_] - u_pred))
|
|
233
|
+
|
|
234
|
+
return loss_
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def get_grad(model_, pde_points_, other_points_, other_values_):
|
|
238
|
+
with tf.GradientTape(persistent=True) as tape:
|
|
239
|
+
# This tape is for derivatives with respect to trainable variables
|
|
240
|
+
tape.watch(model_.trainable_variables)
|
|
241
|
+
loss_ = compute_loss(model_, pde_points_, other_points_, other_values_)
|
|
242
|
+
|
|
243
|
+
g = tape.gradient(loss_, model_.trainable_variables)
|
|
244
|
+
del tape
|
|
245
|
+
|
|
246
|
+
return loss_, g
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
###################################################################################################
|
|
250
|
+
# ######## Set up optimizer and define training steps
|
|
251
|
+
###################################################################################################
|
|
252
|
+
# Initialize model aka u_\theta
|
|
253
|
+
model = init_model()
|
|
254
|
+
|
|
255
|
+
# We choose a piecewise decay of the learning rate, i.e. the step size in the gradient descent
|
|
256
|
+
# type algorithm the first 1000 steps use a learning rate of 0.01,
|
|
257
|
+
# from 1000 - 3000: learning rate = 0.001
|
|
258
|
+
# from 3000 onwards: learning rate = 0.0005
|
|
259
|
+
lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay([1000, 3000], [1e-2, 1e-3, 5e-4])
|
|
260
|
+
|
|
261
|
+
# Choose the optimizer
|
|
262
|
+
optim = tf.keras.optimizers.Adam(learning_rate=lr)
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
# Define one training step as a TensorFlow function to increase speed of training
|
|
266
|
+
@tf.function
|
|
267
|
+
def train_step():
|
|
268
|
+
# Compute current loss and gradient w.r.t. parameters
|
|
269
|
+
loss_, grad_theta = get_grad(model, pde_points, other_points, other_values)
|
|
270
|
+
|
|
271
|
+
# Perform gradient descent step
|
|
272
|
+
optim.apply_gradients(zip(grad_theta, model.trainable_variables))
|
|
273
|
+
|
|
274
|
+
return loss_
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
###################################################################################################
|
|
278
|
+
# ############# Training
|
|
279
|
+
###################################################################################################
|
|
280
|
+
hist = []
|
|
281
|
+
t0 = time()
|
|
282
|
+
|
|
283
|
+
for i in range(n_epochs + 1):
|
|
284
|
+
loss = train_step()
|
|
285
|
+
|
|
286
|
+
# Append current loss to hist
|
|
287
|
+
hist.append(loss.numpy())
|
|
288
|
+
|
|
289
|
+
# Output current loss after 50 iterates
|
|
290
|
+
if i % 50 == 0:
|
|
291
|
+
print('It {:05d}: loss = {:10.8e}'.format(i, loss))
|
|
292
|
+
|
|
293
|
+
# Print computation time
|
|
294
|
+
print('\nComputation time: {} seconds'.format(time()-t0))
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
###################################################################################################
|
|
298
|
+
# ######## Plot solution and evolution of loss
|
|
299
|
+
###################################################################################################
|
|
300
|
+
# #### Plot 1d solution
|
|
301
|
+
def plot_slice(n_points_, t_, x_min, x_max):
|
|
302
|
+
t_space_ = t_
|
|
303
|
+
x_space_ = np.linspace(x_min, x_max, n_points_)
|
|
304
|
+
t_mat, x_mat = np.meshgrid(t_space_, x_space_)
|
|
305
|
+
x_pred = np.vstack([t_mat.flatten(), x_mat.flatten()]).T
|
|
306
|
+
y_pred = model(tf.cast(x_pred, DTYPE)) * scale
|
|
307
|
+
closed_form = black_formula(x_space_, k, vol, T - t_, is_call)
|
|
308
|
+
|
|
309
|
+
title = str(payoff_type.name) + " along spot direction at T = "
|
|
310
|
+
plt.title(title + '%.2f' % t_ + ', K = %.2f' % k)
|
|
311
|
+
plt.xlabel('Spot')
|
|
312
|
+
plt.ylabel('PV')
|
|
313
|
+
plt.plot(x_space_, closed_form, color='blue', label='Closed-Form')
|
|
314
|
+
plt.plot(x_space_, y_pred, color='red', label='NN')
|
|
315
|
+
plt.legend(loc='upper right')
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
if show_pv:
|
|
319
|
+
n_points = 100
|
|
320
|
+
# Plot solution
|
|
321
|
+
plt.ioff()
|
|
322
|
+
plt.figure(figsize=(12, 12))
|
|
323
|
+
plt.subplots_adjust(hspace=0.40)
|
|
324
|
+
plt.subplot(3, 2, 1)
|
|
325
|
+
plot_slice(n_points, T / 100, lb[1], ub[1])
|
|
326
|
+
plt.subplot(3, 2, 2)
|
|
327
|
+
plot_slice(n_points, T / 10, lb[1], ub[1])
|
|
328
|
+
plt.subplot(3, 2, 3)
|
|
329
|
+
plot_slice(n_points, T / 4, lb[1], ub[1])
|
|
330
|
+
plt.subplot(3, 2, 4)
|
|
331
|
+
plot_slice(n_points, T / 2, lb[1], ub[1])
|
|
332
|
+
plt.subplot(3, 2, 5)
|
|
333
|
+
plot_slice(n_points, 3 * T / 4, lb[1], ub[1])
|
|
334
|
+
plt.subplot(3, 2, 6)
|
|
335
|
+
plot_slice(n_points, 9 * T / 10, lb[1], ub[1])
|
|
336
|
+
plt.show()
|
|
337
|
+
|
|
338
|
+
# Plot the evolution of loss
|
|
339
|
+
if show_loss:
|
|
340
|
+
fig = plt.figure(figsize=(9, 6))
|
|
341
|
+
ax = fig.add_subplot(111)
|
|
342
|
+
ax.semilogy(range(len(hist)), hist, 'k-')
|
|
343
|
+
ax.set_xlabel('$n_{epoch}$')
|
|
344
|
+
ax.set_ylabel('$\\phi_{n_epoch}}$')
|
|
345
|
+
plt.show()
|