pyelq 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyelq/dlm.py ADDED
@@ -0,0 +1,497 @@
1
+ # SPDX-FileCopyrightText: 2024 Shell Global Solutions International B.V. All Rights Reserved.
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # -*- coding: utf-8 -*-
6
+ """DLM module.
7
+
8
+ This module provides a class definition for the Dynamic Linear Models following Harrison and West
9
+ 'Bayesian Forecasting and Dynamic Models' (2nd ed), Springer New York, NY, Chapter 4, https://doi.org/10.1007/b98971
10
+
11
+ """
12
+ from dataclasses import dataclass, field
13
+ from typing import Tuple, Union
14
+
15
+ import numpy as np
16
+ from scipy.stats import chi2
17
+
18
+
19
+ @dataclass
20
+ class DLM:
21
+ """Defines the DLM in line with Harrison and West (2nd edition) Chapter 4.
22
+
23
+ Attributes:
24
+ f_matrix (np.ndarray, optional): F matrix linking the state to the observables of
25
+ size [nof_state_parameters x nof_observables]
26
+ g_matrix (np.ndarray, optional): G matrix characterizing the state evolution of
27
+ size [nof_state_parameters x nof_state parameters]
28
+ v_matrix (np.ndarray, optional): V matrix being the covariance matrix of the zero mean observation noise
29
+ of size [nof_state_parameters x nof_observables]
30
+ w_matrix (np.ndarray, optional): W matrix being the covariance matrix of the zero mean system noise of
31
+ size [nof_state_parameters x nof_state parameters]
32
+ g_power (np.ndarray, optional): Attribute to store G^k, does not get initialized
33
+
34
+ """
35
+
36
+ f_matrix: np.ndarray = None
37
+ g_matrix: np.ndarray = None
38
+ v_matrix: np.ndarray = None
39
+ w_matrix: np.ndarray = None
40
+ g_power: np.ndarray = field(init=False)
41
+
42
+ @property
43
+ def nof_observables(self) -> int:
44
+ """Int: Number of observables as derived from the associated F matrix."""
45
+ if self.f_matrix is not None and isinstance(self.f_matrix, np.ndarray):
46
+ return self.f_matrix.shape[1]
47
+ return 0
48
+
49
+ @property
50
+ def nof_state_parameters(self) -> int:
51
+ """Int: Number of state parameters as derived from the associated G matrix."""
52
+ if self.g_matrix is not None and isinstance(self.g_matrix, np.ndarray):
53
+ return self.g_matrix.shape[0]
54
+ return 0
55
+
56
+ def calculate_g_power(self, max_power: int) -> None:
57
+ """Calculate the powers of the G matrix.
58
+
59
+ Calculate the powers upfront, so we don't have to calculate it at every iteration. Result gets stored in the
60
+ g_power attribute of the DLM class. We use an iterative way of calculating the power to have the fewest matrix
61
+ multiplications necessary, i.e. we are not using numpy.linalg.matrix_power as that would leak to k factorial
62
+ multiplications instead of the k we have now.
63
+
64
+ Args:
65
+ max_power (int): Maximum power to compute
66
+
67
+ """
68
+ if self.nof_state_parameters == 1:
69
+ self.g_power = self.g_matrix ** np.array([[range(max_power + 1)]])
70
+ else:
71
+ self.g_power = np.zeros((self.nof_state_parameters, self.nof_state_parameters, max_power + 1))
72
+ self.g_power[:, :, 0] = np.identity(self.nof_state_parameters)
73
+ for i in range(max_power):
74
+ self.g_power[:, :, i + 1] = self.g_power[:, :, i] @ self.g_matrix
75
+
76
+ def polynomial_f_g(self, nof_observables: int, order: int) -> None:
77
+ """Create F and G matrices associated with a polynomial DLM.
78
+
79
+ Following Harrison and West (Chapter 7 on polynomial DLMs) with the exception that we use order==0 for a
80
+ "constant" DLM and order==1 for linear growth DLM, order==2 for quadratic growth etc.
81
+ Hence, the definition of n-th order polynomial DLM in Harrison & West is implemented here with order=n-1
82
+ We stack the observables in a block diagonal form. So the first #order of rows belong to the first observable,
83
+ the second #order rows belong to the second observable etc.
84
+ Results are being stored in the f_matrix and g_matrix attributes respectively
85
+
86
+ Args:
87
+ nof_observables (int): Dimension of observation
88
+ order (int): Polynomial order (0=constant, 1=linear, 2=quadratic etc.)
89
+
90
+ """
91
+ e_n = np.append(1, np.zeros(order))[:, None]
92
+ self.f_matrix = np.kron(np.eye(nof_observables), e_n)
93
+
94
+ l_n = np.triu(np.ones((order + 1, order + 1)))
95
+ self.g_matrix = np.kron(np.eye(nof_observables), l_n)
96
+
97
+ def simulate_data(self, init_state: np.ndarray, nof_timesteps: int) -> Tuple[np.ndarray, np.ndarray]:
98
+ """Simulate data from DLM model.
99
+
100
+ Function to simulate state evolution and corresponding observations according to model as specified through DLM
101
+ class attributes (F, G, V and W matrices)
102
+
103
+ Args:
104
+ init_state (np.ndarray): Initial state vector to start simulating from of size [nof_state_parameters x 1]
105
+ nof_timesteps (int): Number of timesteps to simulate
106
+
107
+ Returns:
108
+ state (np.ndarray): Simulated state vectors of size [nof_state_parameters x nof_timesteps]
109
+ obs (np.ndarray): Simulated observations of size [nof_observables x nof_timesteps]
110
+
111
+ """
112
+ if self.f_matrix is None or self.g_matrix is None or self.v_matrix is None or self.w_matrix is None:
113
+ raise ValueError("Please specify all matrices (F, G, V and W)")
114
+
115
+ obs = np.empty((self.nof_observables, nof_timesteps))
116
+ state = np.empty((self.nof_state_parameters, nof_timesteps))
117
+
118
+ state[:, [0]] = init_state
119
+ mean_state_noise = np.zeros(self.nof_state_parameters)
120
+ mean_observation_noise = np.zeros(self.nof_observables)
121
+
122
+ for i in range(nof_timesteps):
123
+ if i == 0:
124
+ state[:, [i]] = (
125
+ self.g_matrix @ init_state
126
+ + np.random.multivariate_normal(mean_state_noise, self.w_matrix, size=1).T
127
+ )
128
+ else:
129
+ state[:, [i]] = (
130
+ self.g_matrix @ state[:, [i - 1]]
131
+ + np.random.multivariate_normal(mean_state_noise, self.w_matrix, size=1).T
132
+ )
133
+ obs[:, [i]] = (
134
+ self.f_matrix.T @ state[:, [i]]
135
+ + np.random.multivariate_normal(mean_observation_noise, self.v_matrix, size=1).T
136
+ )
137
+
138
+ return state, obs
139
+
140
+ def forecast_mean(
141
+ self, current_mean_state: np.ndarray, forecast_steps: Union[int, list, np.ndarray] = 1
142
+ ) -> Tuple[np.ndarray, np.ndarray]:
143
+ """Perform forecasting of the state and observation mean parameters.
144
+
145
+ Following Harrison and West (2nd ed) Chapter 4.4 (Forecast Distributions), corollary 4.1, assuming F and G are
146
+ constant over time.
147
+ Note that in the output the second axis of the output arrays is the forecast dimension consistent with the
148
+ forecast steps input, all forecast steps contained in the forecast steps argument are returned.
149
+
150
+ Args:
151
+ current_mean_state (np.ndarray): Current mean parameter for the state of size [nof_state_parameters x 1]
152
+ forecast_steps (Union[int, list, np.ndarray], optional): Steps ahead to forecast
153
+
154
+ Returns:
155
+ a_t_k (np.array): Forecast values of state mean parameter of the size
156
+ [nof_observables x size(forecast_steps)]
157
+ f_t_k (np.array): Forecast values of observation mean parameter of the size
158
+ [nof_observables x size(forecast_steps)]
159
+
160
+ """
161
+ min_forecast = np.amin(forecast_steps)
162
+
163
+ if min_forecast < 1:
164
+ raise ValueError(f"Minimum forecast should be >= 1, currently it is {min_forecast}")
165
+ if isinstance(forecast_steps, int):
166
+ forecast_steps = [forecast_steps]
167
+
168
+ a_t_k = np.hstack([self.g_power[:, :, step] @ current_mean_state for step in forecast_steps])
169
+ f_t_k = self.f_matrix.T @ a_t_k
170
+
171
+ return a_t_k, f_t_k
172
+
173
+ def forecast_covariance(
174
+ self, c_matrix: np.ndarray, forecast_steps: Union[int, list, np.ndarray] = 1
175
+ ) -> Tuple[np.ndarray, np.ndarray]:
176
+ """Perform forecasting of the state and observation covariance parameters.
177
+
178
+ Following Harrison and West (2nd ed) Chapter 4.4 (Forecast Distributions), assuming F, G, V and W are
179
+ constant over time.
180
+ Note that in the output the third axis of the output arrays is the forecast dimension consistent with the
181
+ forecast steps input, all forecast steps contained in the forecast steps argument are returned.
182
+ sum_g_w_g is initialized as G^k @ W @ G^k for k==0, hence we initialize as W
183
+ Because of zero based indexing, in the for loop i==1 means 2-step ahead forecast which requires element
184
+ (i+1) of the g_power attribute as the third dimension serves as the actual power of the G matrix
185
+
186
+ Args:
187
+ c_matrix (np.ndarray): Current posterior covariance estimate for the state of size
188
+ [nof_state_parameters x nof_state_parameters]
189
+ forecast_steps (Union[int, list, np.ndarray], optional): Steps ahead to forecast
190
+
191
+ Returns:
192
+ r_t_k (np.array): Forecast values of estimated prior state covariance of the size
193
+ [nof_state_parameters x nof_state_parameters x size(forecast_steps)]
194
+ q_t_k (np.array): Forecast values of estimated observation covariance of the size
195
+ [nof_observables x nof_observables x size(forecast_steps)]
196
+
197
+ """
198
+ min_forecast = np.amin(forecast_steps)
199
+ max_forecast = np.amax(forecast_steps)
200
+
201
+ if min_forecast < 1:
202
+ raise ValueError(f"Minimum forecast should be >= 1, currently it is {min_forecast}")
203
+ if isinstance(forecast_steps, int):
204
+ forecast_steps = [forecast_steps]
205
+
206
+ sum_g_w_g = np.zeros((self.nof_state_parameters, self.nof_state_parameters, max_forecast))
207
+ sum_g_w_g[:, :, 0] = self.w_matrix
208
+ for i in np.arange(1, max_forecast, step=1):
209
+ sum_g_w_g[:, :, i] = (
210
+ sum_g_w_g[:, :, i - 1] + self.g_power[:, :, i] @ self.w_matrix @ self.g_power[:, :, i].T
211
+ )
212
+
213
+ r_t_k = np.dstack(
214
+ [
215
+ self.g_power[:, :, step] @ c_matrix @ self.g_power[:, :, step].T + sum_g_w_g[:, :, step - 1]
216
+ for step in forecast_steps
217
+ ]
218
+ )
219
+ q_t_k = np.dstack(
220
+ [self.f_matrix.T @ r_t_k[:, :, idx] @ self.f_matrix + self.v_matrix for idx in range(r_t_k.shape[2])]
221
+ )
222
+
223
+ return r_t_k, q_t_k
224
+
225
+ def update_posterior(
226
+ self, a_t: np.ndarray, r_matrix_t: np.ndarray, q_matrix_t: np.ndarray, error: np.ndarray
227
+ ) -> Tuple[np.ndarray, np.ndarray]:
228
+ """Update of the posterior mean and covariance of the state.
229
+
230
+ Following Harrison and West (2nd ed) Chapter 4.4 (Forecast Distributions), assuming F, G, V and W are
231
+ constant over time.
232
+ We are using a solver instead of calculating the inverse of Q directly
233
+ Setting inf values in Q equal to 0 after the solver function for computational issues, otherwise we would
234
+ get 0 * inf = nan, where we want the result to be 0.
235
+
236
+ Args:
237
+ a_t (np.ndarray): Current prior mean of the state of size [nof_state_parameters x 1]
238
+ r_matrix_t (np.ndarray): Current prior covariance of the state of size [nof_state_parameters x nof_state_parameters]
239
+ q_matrix_t (np.ndarray): Current one step ahead forecast covariance estimate of the observations of size [nof_observables x nof_observables]
240
+ error (np.ndarray): Error associated with the one step ahead forecast (observation - forecast) of size [nof_observables x 1]
241
+
242
+ Returns:
243
+ m_t (np.array): Posterior mean estimate of the state of size [nof_state_parameters x 1]
244
+ c_matrix (np.array): Posterior covariance estimate of the state of size [nof_state_parameters x nof_state_parameters]
245
+
246
+ """
247
+ if self.nof_state_parameters == 1:
248
+ a_matrix_t = r_matrix_t @ self.f_matrix.T @ (1 / q_matrix_t)
249
+ else:
250
+ a_matrix_t = r_matrix_t @ np.linalg.solve(q_matrix_t.T, self.f_matrix.T).T
251
+ m_t = a_t + a_matrix_t @ error
252
+ q_matrix_t[np.isinf(q_matrix_t)] = 0
253
+ c_matrix = r_matrix_t - a_matrix_t @ q_matrix_t @ a_matrix_t.T
254
+
255
+ return m_t, c_matrix
256
+
257
+ def dlm_full_update(
258
+ self,
259
+ new_observation: np.ndarray,
260
+ current_mean_state: np.ndarray,
261
+ current_cov_state: np.ndarray,
262
+ mode: str = "learn",
263
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
264
+ """Perform 1 step of the full DLM update.
265
+
266
+ Following Harrison and West (2nd ed) we perform all steps to update the entire DLM model and obtain new
267
+ estimates for all parameters involved, including nan value handling.
268
+ When mode == 'learn' the parameters are updated, when mode == 'ignore' the current observation is ignored and
269
+ the posterior is set equal to the prior
270
+ When no observation is present (i.e. a nan value) we let the covariance (V matrix) for that particular sensor
271
+ such that we set the variance of that sensor for that time instance to infinity and set all cross (covariance)
272
+ terms to 0. Instead of changing this in the V matrix, we simply adjust the Q matrix accordingly. Effectively,
273
+ we set the posterior equal to the prior for that particular sensor and the uncertainty associated with the new
274
+ forecast gets increased. We set the error equal to zero for computational issues, first but finally set it equal
275
+ to nan in the end.
276
+
277
+ Args:
278
+ new_observation (np.ndarray): New observations to use in the updating of the estimates of size [nof_observables x 1]
279
+ current_mean_state (np.ndarray): Current mean estimate for the state of size [nof_state_parameters x 1]
280
+ current_cov_state (np.ndarray): Current covariance estimate for the state of size [nof_state_parameters x nof_state_parameters]
281
+ mode (str, optional): String indicating whether the DLM needs to be updated using the new observation or not. Currently, `learn` and `ignore` are implemented
282
+
283
+ Returns:
284
+ new_mean_state (np.ndarray): New mean estimate for the state of size [nof_state_parameters x 1]
285
+ new_cov_state (np.ndarray): New covariance estimate for the state of size [nof_state_parameters x nof_state_parameters]
286
+ error (np.ndarray): Error between the observation and the forecast (observation - forecast) of size [nof_observables x 1]
287
+
288
+ """
289
+ a_t, f_t = self.forecast_mean(current_mean_state, forecast_steps=1)
290
+ r_matrix_t, q_matrix_t = self.forecast_covariance(current_cov_state, forecast_steps=1)
291
+ error = new_observation - f_t
292
+
293
+ nan_bool = np.isnan(new_observation)
294
+ nan_idx = np.argwhere(nan_bool.flatten())
295
+ if np.any(nan_bool):
296
+ q_matrix_t[nan_idx, :, 0] -= self.v_matrix[nan_idx, :]
297
+ q_matrix_t[:, nan_idx, 0] -= self.v_matrix[:, nan_idx]
298
+ q_matrix_t[nan_idx, nan_idx, 0] = np.inf
299
+ error[nan_idx] = 0
300
+
301
+ if mode == "learn":
302
+ new_mean_state, new_cov_state = self.update_posterior(a_t, r_matrix_t[:, :, 0], q_matrix_t[:, :, 0], error)
303
+ elif mode == "ignore":
304
+ new_mean_state = a_t
305
+ new_cov_state = r_matrix_t
306
+ else:
307
+ raise TypeError(f"Mode {mode} not implemented")
308
+
309
+ error[nan_idx] = np.nan
310
+
311
+ return new_mean_state, new_cov_state, error
312
+
313
+ def calculate_mahalanobis_distance(
314
+ self,
315
+ new_observations: np.ndarray,
316
+ current_mean_state: np.ndarray,
317
+ current_cov_state: np.ndarray,
318
+ forecast_steps: int = 1,
319
+ return_statistics=False,
320
+ ) -> Union[Tuple[float, np.ndarray], Tuple[float, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
321
+ """Calculate the mahalanobis distance.
322
+
323
+ Calculating the Mahalanobis distance which is defined as error.T @ covariance^(-1) @ error
324
+ The error is flatted in row-major (C-style) This returns the stacked rows, which in our case is the errors per
325
+ observation parameter stacked and this is exactly what we want: array([[1, 2], [3, 4]]).reshape((-1, 1),
326
+ order='C') becomes column array([1, 2 3, 4])
327
+ Using a solve method instead of calculating inverse matrices directly
328
+ When calculating mhd_per_obs_param we use the partial result and reshape the temporary output such that we can
329
+ sum the correct elements associated with the same observable together
330
+ When no observation is present (i.e. a nan value) we let the covariance (V matrix) for that particular sensor
331
+ such that we set the variance of that sensor for that time instance to infinity and set all cross (covariance)
332
+ terms to 0. Instead of changing this in the V matrix, we simply adjust the Q matrix accordingly. Effectively,
333
+ we set the posterior equal to the prior for that particular sensor and the uncertainty associated with the new
334
+ forecast gets increased. We set the error equal to zero for computational issues, but this does decrease the
335
+ number of degrees of freedom for that particular Mahalanobis distance calculation, basically decreasing the
336
+ Mahalanobis distance. We allow the option to output the number of degrees of freedom and chi2 statistic which
337
+ allows to take this decrease in degrees of freedom into account.
338
+
339
+ Args:
340
+ new_observations (np.ndarray): New observations to use in the calculation of the mahalanobis distance of
341
+ size [nof_observables x forecast_steps]
342
+ current_mean_state (np.ndarray): Current mean estimate for the state of size [nof_state_parameters x 1]
343
+ current_cov_state (np.ndarray): Current covariance estimate for the state of size
344
+ [nof_state_parameters x nof_state_parameters]
345
+ forecast_steps (int, optional): Number of steps ahead to forecast and use in the mahalanobis distance
346
+ calculation
347
+ return_statistics (bool, optional): Boolean to return used degrees of freedom and chi2 statistic
348
+ Returns:
349
+ mhd_overall (float): mahalanobis distance over all observables
350
+ mhd_per_obs_param (np.ndarray): mahalanobis distance per observation parameter of size [nof_observables, 1]
351
+
352
+ """
353
+ if forecast_steps <= 0:
354
+ raise AttributeError("Forecast steps should be a positive integer")
355
+
356
+ if new_observations.size / self.nof_observables != forecast_steps:
357
+ raise AttributeError("Sizes of new observations and forecast steps are not aligning")
358
+
359
+ _, f_t_k = self.forecast_mean(current_mean_state, forecast_steps=np.array(range(forecast_steps)) + 1)
360
+
361
+ if new_observations.shape != f_t_k.shape:
362
+ raise AttributeError("Dimensions of new_observations are not aligning with dimensions of forecast")
363
+
364
+ error = np.subtract(new_observations, f_t_k).reshape((-1, 1), order="C")
365
+
366
+ r_t_k, q_t_k = self.forecast_covariance(current_cov_state, forecast_steps=np.array(range(forecast_steps)) + 1)
367
+
368
+ nan_bool = np.isnan(new_observations)
369
+ if np.any(nan_bool):
370
+ nan_idx = np.argwhere(nan_bool)
371
+ for value in nan_idx:
372
+ q_t_k[value[0], :, value[1]] -= self.v_matrix[value[0], :]
373
+ q_t_k[:, value[0], value[1]] -= self.v_matrix[:, value[0]]
374
+
375
+ q_t_k[nan_idx[:, 0], nan_idx[:, 0], nan_idx[:, 1]] = np.inf
376
+ error[nan_bool.reshape((-1, 1), order="C")] = 0
377
+
378
+ if forecast_steps > 1:
379
+ full_covariance = self.create_full_covariance(r_t_k=r_t_k, q_t_k=q_t_k, forecast_steps=forecast_steps)
380
+ else:
381
+ full_covariance = q_t_k[:, :, 0]
382
+
383
+ mhd_overall = mahalanobis_distance(error=error, cov_matrix=full_covariance)
384
+ mhd_per_obs_param = np.empty((self.nof_observables, 1))
385
+
386
+ for i_obs in range(self.nof_observables):
387
+ ind_hrz = np.array(range(forecast_steps)) + i_obs * forecast_steps
388
+ mhd_per_obs_param[i_obs] = mahalanobis_distance(
389
+ error=error[ind_hrz], cov_matrix=full_covariance[np.ix_(ind_hrz, ind_hrz)]
390
+ )
391
+
392
+ if self.nof_observables == 1:
393
+ mhd_per_obs_param = mhd_per_obs_param.item()
394
+
395
+ if return_statistics:
396
+ dof_per_obs_param = (nan_bool.shape[1] - np.count_nonzero(nan_bool, axis=1)).reshape(
397
+ self.nof_observables, 1
398
+ )
399
+ dof_overall = dof_per_obs_param.sum()
400
+ chi2_cdf_per_obs_param = chi2.cdf(mhd_per_obs_param.flatten(), dof_per_obs_param.flatten()).reshape(
401
+ self.nof_observables, 1
402
+ )
403
+ chi2_cdf_overall = chi2.cdf(mhd_overall, dof_overall)
404
+
405
+ return (
406
+ mhd_overall,
407
+ mhd_per_obs_param,
408
+ dof_overall,
409
+ dof_per_obs_param,
410
+ chi2_cdf_overall,
411
+ chi2_cdf_per_obs_param,
412
+ )
413
+
414
+ return mhd_overall, mhd_per_obs_param
415
+
416
+ def create_full_covariance(self, r_t_k: np.ndarray, q_t_k: np.ndarray, forecast_steps: int) -> np.ndarray:
417
+ """Helper function to construct the full covariance matrix.
418
+
419
+ Following Harrison and West (2nd ed) Chapter 4.4 (Forecast distributions) Theorem 4.2 and corollary 4.2
420
+ we construct the full covariance matrix. This full covariance matrix is the covariance matrix of all forecasted
421
+ observations with respect to each other. Hence, it's COV[Y_{t+k}, Y_{t+j}] with j and k 1<=j,k<=forecast steps
422
+ input argument and Y_{t+k} the k step ahead forecast of the observation at time t
423
+
424
+ The matrix is build up using the different blocks for different covariances between observations i and j.
425
+ The diagonals of each block are calculated first as q_t_k[i, j, :].
426
+ Next the i, j-th (lower triangular) entry of the m, n-th block is calculated as
427
+ (F.T @ G^(i-j) r_t_k[:, :, j] @ F)[i, j]
428
+ Next each upper triangular part of each lower diagonal block is calculated and next the entire upper triangular
429
+ part of the full matrix is calculated
430
+
431
+ Args:
432
+ r_t_k (np.array): Forecast values of estimated prior state covariance of the size
433
+ [nof_state_parameters x nof_state_parameters x forecast_steps]
434
+ q_t_k (np.array): Forecast values of estimated observation covariance of the size
435
+ [nof_observables x nof_observables x forecast_steps]
436
+ forecast_steps (int): Maximum number of steps ahead to forecast and use all of those in the mahalanobis
437
+ distance calculation
438
+
439
+ Returns:
440
+ full_covariance (np.array): Full covariance matrix of all forecasted observations with respect to each other
441
+ having size [(nof_observables * forecast_steps) X (nof_observables * forecast_steps)]
442
+
443
+ """
444
+ full_covariance = np.zeros((forecast_steps * self.nof_observables, forecast_steps * self.nof_observables))
445
+ base_idx = np.array(range(forecast_steps))
446
+ for block_i in range(self.nof_observables):
447
+ for block_j in range(block_i + 1):
448
+ block_rows = base_idx + block_i * forecast_steps
449
+ block_cols = base_idx + block_j * forecast_steps
450
+ full_covariance[block_rows, block_cols] = q_t_k[block_i, block_j, :]
451
+
452
+ temp_idx = np.array(range(self.nof_observables))
453
+ for sub_i in np.arange(start=1, stop=forecast_steps, step=1):
454
+ sub_row = temp_idx * forecast_steps + sub_i
455
+ for sub_j in range(sub_i):
456
+ sub_col = temp_idx * forecast_steps + sub_j
457
+ sub_idx = np.ix_(sub_row, sub_col)
458
+ full_covariance[sub_idx] = (
459
+ self.f_matrix.T @ self.g_power[:, :, sub_i - sub_j] @ r_t_k[:, :, sub_j] @ self.f_matrix
460
+ )
461
+
462
+ for block_i in range(self.nof_observables):
463
+ for block_j in range(block_i):
464
+ block_rows = base_idx + block_i * forecast_steps
465
+ block_cols = base_idx + block_j * forecast_steps
466
+ block_idx = np.ix_(block_rows, block_cols)
467
+ full_covariance[block_idx] = full_covariance[block_idx] + np.tril(full_covariance[block_idx], k=-1).T
468
+
469
+ full_covariance = np.tril(full_covariance) + np.tril(full_covariance, k=-1).T
470
+
471
+ return full_covariance
472
+
473
+
474
+ def mahalanobis_distance(error: np.ndarray, cov_matrix: np.ndarray) -> float:
475
+ """Calculate Mahalanobis distance for multivariate observations.
476
+
477
+ m = e.T @ inv(cov) @ e
478
+ Sometimes the solution does not exist when np.inf value is present in cov_matrix (computational limitations?)
479
+ Hence, we set it to a large value instead
480
+
481
+ Args:
482
+ error (np.ndarray): n x p observation error
483
+ cov_matrix (np.ndarray): p x p covariance matrix
484
+
485
+ Returns:
486
+ np.ndarray: n x 1 mahalanobis distance score for each observation
487
+
488
+ """
489
+ if cov_matrix.size == 1:
490
+ return error.item() ** 2 / cov_matrix.item()
491
+
492
+ partial_solution = np.linalg.solve(cov_matrix, error)
493
+ if np.any(np.isnan(partial_solution)):
494
+ cov_matrix[np.isinf(cov_matrix)] = 1e100
495
+ partial_solution = np.linalg.solve(cov_matrix, error)
496
+
497
+ return np.sum(error * partial_solution, axis=0).item()