pymc-extras 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pymc_extras/inference/__init__.py +8 -1
- pymc_extras/inference/dadvi/__init__.py +0 -0
- pymc_extras/inference/dadvi/dadvi.py +261 -0
- pymc_extras/inference/fit.py +5 -0
- pymc_extras/inference/laplace_approx/find_map.py +16 -8
- pymc_extras/inference/laplace_approx/idata.py +5 -2
- pymc_extras/inference/laplace_approx/laplace.py +1 -0
- pymc_extras/statespace/models/DFM.py +849 -0
- pymc_extras/statespace/models/SARIMAX.py +4 -4
- pymc_extras/statespace/models/VARMAX.py +7 -7
- pymc_extras/statespace/utils/constants.py +3 -1
- {pymc_extras-0.4.1.dist-info → pymc_extras-0.5.0.dist-info}/METADATA +1 -1
- {pymc_extras-0.4.1.dist-info → pymc_extras-0.5.0.dist-info}/RECORD +15 -12
- {pymc_extras-0.4.1.dist-info → pymc_extras-0.5.0.dist-info}/WHEEL +0 -0
- {pymc_extras-0.4.1.dist-info → pymc_extras-0.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,849 @@
|
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import pytensor
|
|
5
|
+
import pytensor.tensor as pt
|
|
6
|
+
|
|
7
|
+
from pymc_extras.statespace.core.statespace import PyMCStateSpace
|
|
8
|
+
from pymc_extras.statespace.models.utilities import make_default_coords
|
|
9
|
+
from pymc_extras.statespace.utils.constants import (
|
|
10
|
+
ALL_STATE_AUX_DIM,
|
|
11
|
+
ALL_STATE_DIM,
|
|
12
|
+
AR_PARAM_DIM,
|
|
13
|
+
ERROR_AR_PARAM_DIM,
|
|
14
|
+
EXOG_STATE_DIM,
|
|
15
|
+
FACTOR_DIM,
|
|
16
|
+
OBS_STATE_AUX_DIM,
|
|
17
|
+
OBS_STATE_DIM,
|
|
18
|
+
TIME_DIM,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
floatX = pytensor.config.floatX
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class BayesianDynamicFactor(PyMCStateSpace):
|
|
25
|
+
r"""
|
|
26
|
+
Dynamic Factor Models
|
|
27
|
+
|
|
28
|
+
Notes
|
|
29
|
+
-----
|
|
30
|
+
The Dynamic Factor Model (DFM) is a multivariate state-space model used to represent high-dimensional time series
|
|
31
|
+
as being driven by a smaller set of unobserved dynamic factors.
|
|
32
|
+
|
|
33
|
+
Given a set of observed time series :math:`\{y_t\}_{t=0}^T`, where
|
|
34
|
+
|
|
35
|
+
.. math::
|
|
36
|
+
y_t = \begin{bmatrix} y_{1,t} & y_{2,t} & \cdots & y_{k_{\text{endog}},t} \end{bmatrix}^T,
|
|
37
|
+
|
|
38
|
+
the DFM assumes that each series is a linear combination of a few latent factors and (optionally) autoregressive errors.
|
|
39
|
+
|
|
40
|
+
Let:
|
|
41
|
+
- :math:`k` be the number of dynamic factors (k_factors),
|
|
42
|
+
- :math:`p` be the order of the latent factor process (factor_order),
|
|
43
|
+
- :math:`q` be the order of the observation error process (error_order).
|
|
44
|
+
|
|
45
|
+
The model equations are in reduced form is:
|
|
46
|
+
|
|
47
|
+
.. math::
|
|
48
|
+
y_t &= \Lambda f_t + B x_t + u_t + \eta_t \\
|
|
49
|
+
f_t &= A_1 f_{t-1} + \cdots + A_p f_{t-p} + \varepsilon_{f,t} \\
|
|
50
|
+
u_t &= C_1 u_{t-1} + \cdots + C_q u_{t-q} + \varepsilon_{u,t}
|
|
51
|
+
|
|
52
|
+
Where:
|
|
53
|
+
- :math:`f_t` is the vector of latent dynamic factors (size :math:`k`),
|
|
54
|
+
- :math:`x_t` is an optional vector of exogenous variables
|
|
55
|
+
- :math:`u_t` is a vector of autoregressive observation errors (if `error_var=True` with a VAR(q) structure, else treated as independent AR processes),
|
|
56
|
+
- :math:`\eta_t \sim \mathcal{N}(0, H_t)` is an optional measurement error (if `measurement_error=True`),
|
|
57
|
+
- :math:`\varepsilon_{f,t} \sim \mathcal{N}(0, I)` and :math:`\varepsilon_{u,t} \sim \mathcal{N}(0, \Sigma_u)` are independent noise terms.
|
|
58
|
+
To identify the factors, the innovations to the factor process are standardized with identity covariance.
|
|
59
|
+
|
|
60
|
+
Internally, the model is represented in state-space form by stacking all current and lagged latent factors and (if present)
|
|
61
|
+
AR observation errors into a single state vector of dimension: :math:: k_{\text{states}} = k \cdot p + k_{\text{endog}} \cdot q,
|
|
62
|
+
where :math:`k_{\text{endog}}` is the number of observed time series.
|
|
63
|
+
|
|
64
|
+
The state vector is defined as:
|
|
65
|
+
|
|
66
|
+
.. math::
|
|
67
|
+
s_t = \begin{bmatrix}
|
|
68
|
+
f_t(1) \\
|
|
69
|
+
\vdots \\
|
|
70
|
+
f_t(k) \\
|
|
71
|
+
f_{t-p+1}(1) \\
|
|
72
|
+
\vdots \\
|
|
73
|
+
f_{t-p+1}(k) \\
|
|
74
|
+
u_t(1) \\
|
|
75
|
+
\vdots \\
|
|
76
|
+
u_t(k_{\text{endog}}) \\
|
|
77
|
+
\vdots \\
|
|
78
|
+
u_{t-q+1}(1) \\
|
|
79
|
+
\vdots \\
|
|
80
|
+
u_{t-q+1}(k_{\text{endog}})
|
|
81
|
+
\end{bmatrix}
|
|
82
|
+
\in \mathbb{R}^{k_{\text{states}}}
|
|
83
|
+
|
|
84
|
+
The transition equation is given by:
|
|
85
|
+
|
|
86
|
+
.. math::
|
|
87
|
+
s_{t+1} = T s_t + R \epsilon_t
|
|
88
|
+
|
|
89
|
+
Where:
|
|
90
|
+
- :math:`T` is the state transition matrix, composed of:
|
|
91
|
+
- VAR coefficients :math:`A_1, \dots, A_{p*k_factors}` for the factors,
|
|
92
|
+
- (if enabled) AR coefficients :math:`C_1, \dots, C_q` for the observation errors.
|
|
93
|
+
.. math::
|
|
94
|
+
T = \begin{bmatrix}
|
|
95
|
+
A_{1,1} & A_{1,2} & \cdots & A_{1,p} & 0 & 0 & \cdots & 0 \\
|
|
96
|
+
A_{2,1} & A_{2,2} & \cdots & A_{2,p} & 0 & 0 & \cdots & 0 \\
|
|
97
|
+
1 & 0 & \cdots & 0 & 0 & 0 & \cdots & 0 \\
|
|
98
|
+
0 & 1 & \cdots & 0 & 0 & 0 & \cdots & 0 \\
|
|
99
|
+
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\
|
|
100
|
+
\hline
|
|
101
|
+
0 & 0 & \cdots & 0 & C_{1,1} & \cdots & C_{1,2} & C_{1,q} \\
|
|
102
|
+
0 & 0 & \cdots & 0 & 1 & 0 & \cdots & 0 \\
|
|
103
|
+
0 & 0 & \cdots & 0 & 0 & 1 & \cdots & 0 \\
|
|
104
|
+
\vdots & \vdots & & \vdots & \vdots & \vdots & \ddots & \vdots
|
|
105
|
+
\end{bmatrix}
|
|
106
|
+
\in \mathbb{R}^{k_{\text{states}} \times k_{\text{states}}}
|
|
107
|
+
|
|
108
|
+
- :math:`\epsilon_t` contains the independent shocks (innovations) and has dimension :math:`k + k_{\text{endog}}` if AR errors are included.
|
|
109
|
+
.. math::
|
|
110
|
+
\epsilon_t = \begin{bmatrix}
|
|
111
|
+
\epsilon_{f,t} \\
|
|
112
|
+
\epsilon_{u,t}
|
|
113
|
+
\end{bmatrix}
|
|
114
|
+
\in \mathbb{R}^{k + k_{\text{endog}}}
|
|
115
|
+
|
|
116
|
+
- :math:`R` is a selection matrix mapping shocks to state transitions.
|
|
117
|
+
.. math::
|
|
118
|
+
R = \begin{bmatrix}
|
|
119
|
+
1 & 0 & \cdots & 0 & 0 & 0 & \cdots & 0 \\
|
|
120
|
+
0 & 1 & \cdots & 0 & 0 & 0 & \cdots & 0 \\
|
|
121
|
+
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\
|
|
122
|
+
0 & 0 & \cdots & 1 & 0 & 0 & \cdots & 0 \\
|
|
123
|
+
0 & 0 & \cdots & 0 & 1 & 0 & \cdots & 0 \\
|
|
124
|
+
0 & 0 & \cdots & 0 & 0 & 1 & \cdots & 0 \\
|
|
125
|
+
\vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\
|
|
126
|
+
\end{bmatrix}
|
|
127
|
+
\in \mathbb{R}^{k_{\text{states}} \times (k + k_{\text{endog}})}
|
|
128
|
+
|
|
129
|
+
The observation equation is given by:
|
|
130
|
+
|
|
131
|
+
.. math::
|
|
132
|
+
|
|
133
|
+
y_t = Z s_t + \eta_t
|
|
134
|
+
|
|
135
|
+
where
|
|
136
|
+
|
|
137
|
+
- :math:`y_t` is the vector of observed variables at time :math:`t`
|
|
138
|
+
|
|
139
|
+
- :math:`Z` is the design matrix of the state space representation
|
|
140
|
+
.. math::
|
|
141
|
+
Z = \begin{bmatrix}
|
|
142
|
+
\lambda_{1,1} & \lambda_{1,k} & \vdots & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
|
|
143
|
+
\lambda_{2,1} & \lambda_{2,k} & \vdots & 0 & 1 & \cdots & 0 & \cdots & 0 \\
|
|
144
|
+
\vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots & \vdots \\
|
|
145
|
+
\lambda_{k_{\text{endog}},1} & \cdots & \lambda_{k_{\text{endog}},k} & 0 & 0 & \cdots & 1 & 0 & \cdots & 0 \\
|
|
146
|
+
\end{bmatrix}
|
|
147
|
+
\in \mathbb{R}^{k_{\text{endog}} \times k_{\text{states}}}
|
|
148
|
+
|
|
149
|
+
- :math:`\eta_t` is the vector of observation errors at time :math:`t`
|
|
150
|
+
|
|
151
|
+
When exogenous variables :math:`x_t` are present, the implementation follows `pymc_extras/statespace/models/structural/components/regression.py`.
|
|
152
|
+
In this case, the state vector is extended to include the beta parameters, and the design matrix is modified accordingly,
|
|
153
|
+
becoming 3-dimensional to handle time-varying exogenous regressors.
|
|
154
|
+
This approach provides greater flexibility, controlled by the boolean flags `shared_exog_state` and `exog_innovations`.
|
|
155
|
+
Unlike Statsmodels, where exogenous variables are included only in the observation equation, here they are fully integrated into the state-space
|
|
156
|
+
representation.
|
|
157
|
+
|
|
158
|
+
.. warning::
|
|
159
|
+
|
|
160
|
+
Identification can be an issue, particularly when many observed series load onto only a few latent factors.
|
|
161
|
+
These models are only identified up to a sign flip in the factor loadings. Proper prior specification is crucial
|
|
162
|
+
for good estimation and inference.
|
|
163
|
+
|
|
164
|
+
Examples
|
|
165
|
+
--------
|
|
166
|
+
The following code snippet estimates a dynamic factor model with 1 latent factors,
|
|
167
|
+
a AR(2) structure on the factor and a AR(1) structure on the errors:
|
|
168
|
+
|
|
169
|
+
.. code:: python
|
|
170
|
+
|
|
171
|
+
import pymc_extras.statespace as pmss
|
|
172
|
+
import pymc as pm
|
|
173
|
+
|
|
174
|
+
# Create DFM Statespace Model
|
|
175
|
+
dfm_mod = pmss.BayesianDynamicFactor(
|
|
176
|
+
k_factors=1, # Number of latent dynamic factors
|
|
177
|
+
factor_order=2, # Number of lags for the latent factor process
|
|
178
|
+
endog_names=data.columns, # Names of the observed time series (endogenous variables) (we could also use k_endog = len(data.columns))
|
|
179
|
+
error_order=1, # Order of the autoregressive process for the observation noise (i.e., AR(q) error, here q=1)
|
|
180
|
+
error_var=False, # If False, models errors as separate AR processes
|
|
181
|
+
error_cov_type="diagonal", # Structure of the observation error covariance matrix: uncorrelated noise across series
|
|
182
|
+
measurement_error=True, # Whether to include a measurement error term in the model
|
|
183
|
+
verbose=True
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Unpack coords
|
|
187
|
+
coords = dfm_mod.coords
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
with pm.Model(coords=coords) as pymc_mod:
|
|
191
|
+
# Priors for the initial state mean and covariance
|
|
192
|
+
x0 = pm.Normal("x0", dims=["state_dim"])
|
|
193
|
+
P0 = pm.HalfNormal("P0", dims=["state_dim", "state_dim"])
|
|
194
|
+
|
|
195
|
+
# Factor loadings: shape (k_endog, k_factors)
|
|
196
|
+
factor_loadings = pm.Normal("factor_loadings", sigma=1, dims=["k_endog", "k_factors"])
|
|
197
|
+
|
|
198
|
+
# AR coefficients for factor dynamics: shape (k_factors, factor_order)
|
|
199
|
+
factor_ar = pm.Normal("factor_ar", sigma=1, dims=["k_factors", "k_factors" * "factor_order"])
|
|
200
|
+
|
|
201
|
+
# AR coefficients for observation noise: shape (k_endog, error_order)
|
|
202
|
+
error_ar = pm.Normal("error_ar", sigma=1, dims=["k_endog", "error_order"])
|
|
203
|
+
|
|
204
|
+
# Std devs for observation noise: shape (k_endog,)
|
|
205
|
+
error_sigma = pm.HalfNormal("error_sigma", dims=["k_endog"])
|
|
206
|
+
|
|
207
|
+
# Observation noise covariance matrix
|
|
208
|
+
obs_sigma = pm.HalfNormal("sigma_obs", dims=["k_endog"])
|
|
209
|
+
|
|
210
|
+
# Build the symbolic graph and attach it to the model
|
|
211
|
+
dfm_mod.build_statespace_graph(data=data, mode="JAX")
|
|
212
|
+
|
|
213
|
+
# Sampling
|
|
214
|
+
idata = pm.sample(
|
|
215
|
+
draws=500,
|
|
216
|
+
chains=2,
|
|
217
|
+
nuts_sampler="nutpie",
|
|
218
|
+
nuts_sampler_kwargs={"backend": "jax", "gradient_backend": "jax"},
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
def __init__(
|
|
224
|
+
self,
|
|
225
|
+
k_factors: int,
|
|
226
|
+
factor_order: int,
|
|
227
|
+
k_endog: int | None = None,
|
|
228
|
+
endog_names: Sequence[str] | None = None,
|
|
229
|
+
k_exog: int | None = None,
|
|
230
|
+
exog_names: Sequence[str] | None = None,
|
|
231
|
+
shared_exog_states: bool = False,
|
|
232
|
+
exog_innovations: bool = False,
|
|
233
|
+
error_order: int = 0,
|
|
234
|
+
error_var: bool = False,
|
|
235
|
+
error_cov_type: str = "diagonal",
|
|
236
|
+
measurement_error: bool = False,
|
|
237
|
+
verbose: bool = True,
|
|
238
|
+
):
|
|
239
|
+
"""
|
|
240
|
+
Create a Bayesian Dynamic Factor Model.
|
|
241
|
+
|
|
242
|
+
Parameters
|
|
243
|
+
----------
|
|
244
|
+
k_factors : int
|
|
245
|
+
Number of latent factors.
|
|
246
|
+
|
|
247
|
+
factor_order : int
|
|
248
|
+
Order of the VAR process for the latent factors. If set to 0, the factors have no autoregressive dynamics
|
|
249
|
+
and are modeled as a white noise process, i.e., :math:`f_t = \varepsilon_{f,t}`.
|
|
250
|
+
Therefore, the state vector will include one state per factor and "factor_ar" will not exist.
|
|
251
|
+
|
|
252
|
+
k_endog : int, optional
|
|
253
|
+
Number of observed time series. If not provided, the number of observed series will be inferred from `endog_names`.
|
|
254
|
+
At least one of `k_endog` or `endog_names` must be provided.
|
|
255
|
+
|
|
256
|
+
endog_names : list of str, optional
|
|
257
|
+
Names of the observed time series. If not provided, default names will be generated as `endog_1`, `endog_2`, ..., `endog_k` based on `k_endog`.
|
|
258
|
+
At least one of `k_endog` or `endog_names` must be provided.
|
|
259
|
+
|
|
260
|
+
k_exog : int, optional
|
|
261
|
+
Number of exogenous variables. If not provided, the model will not have exogenous variables.
|
|
262
|
+
|
|
263
|
+
exog_names : Sequence[str], optional
|
|
264
|
+
Names of the exogenous variables. If not provided, but `k_exog` is specified, default names will be generated as `exog_1`, `exog_2`, ..., `exog_k`.
|
|
265
|
+
|
|
266
|
+
shared_exog_states: bool, optional
|
|
267
|
+
Whether exogenous latent states are shared across the observed states. If True, there will be only one set of exogenous latent
|
|
268
|
+
states, which are observed by all observed states. If False, each observed state has its own set of exogenous latent states.
|
|
269
|
+
|
|
270
|
+
exog_innovations : bool, optional
|
|
271
|
+
Whether to allow time-varying regression coefficients. If True, coefficients follow a random walk.
|
|
272
|
+
|
|
273
|
+
error_order : int, optional
|
|
274
|
+
Order of the AR process for the observation error component.
|
|
275
|
+
Default is 0, corresponding to white noise errors.
|
|
276
|
+
|
|
277
|
+
error_var : bool, optional
|
|
278
|
+
If True, errors are modeled jointly via a VAR process;
|
|
279
|
+
otherwise, each error is modeled separately.
|
|
280
|
+
|
|
281
|
+
error_cov_type : {'scalar', 'diagonal', 'unstructured'}, optional
|
|
282
|
+
Structure of the covariance matrix of the observation errors.
|
|
283
|
+
|
|
284
|
+
measurement_error: bool, default True
|
|
285
|
+
If true, a measurement error term is added to the model.
|
|
286
|
+
|
|
287
|
+
verbose: bool, default True
|
|
288
|
+
If true, a message will be logged to the terminal explaining the variable names, dimensions, and supports.
|
|
289
|
+
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
if k_endog is None and endog_names is None:
|
|
293
|
+
raise ValueError("Either k_endog or endog_names must be provided.")
|
|
294
|
+
if k_endog is None:
|
|
295
|
+
k_endog = len(endog_names)
|
|
296
|
+
if endog_names is None:
|
|
297
|
+
endog_names = [f"endog_{i}" for i in range(k_endog)]
|
|
298
|
+
|
|
299
|
+
self.endog_names = endog_names
|
|
300
|
+
self.k_endog = k_endog
|
|
301
|
+
self.k_factors = k_factors
|
|
302
|
+
self.factor_order = factor_order
|
|
303
|
+
self.error_order = error_order
|
|
304
|
+
self.error_var = error_var
|
|
305
|
+
self.error_cov_type = error_cov_type
|
|
306
|
+
|
|
307
|
+
if k_exog is None and exog_names is None:
|
|
308
|
+
self.k_exog = 0
|
|
309
|
+
else:
|
|
310
|
+
self.shared_exog_states = shared_exog_states
|
|
311
|
+
self.exog_innovations = exog_innovations
|
|
312
|
+
if k_exog is None:
|
|
313
|
+
k_exog = len(exog_names) if exog_names is not None else 0
|
|
314
|
+
elif exog_names is None:
|
|
315
|
+
exog_names = [f"exog_{i}" for i in range(k_exog)] if k_exog > 0 else None
|
|
316
|
+
self.k_exog = k_exog
|
|
317
|
+
self.exog_names = exog_names
|
|
318
|
+
|
|
319
|
+
self.k_exog_states = self.k_exog * self.k_endog if not shared_exog_states else self.k_exog
|
|
320
|
+
self.exog_flag = self.k_exog > 0
|
|
321
|
+
|
|
322
|
+
# Determine the dimension for the latent factor states.
|
|
323
|
+
# For static factors, one use k_factors.
|
|
324
|
+
# For dynamic factors with lags, the state include current factors and past lags.
|
|
325
|
+
# If factor_order is 0, we treat the factor as static (no dynamics),
|
|
326
|
+
# but it is still included in the state vector with one state per factor. Factor_ar paramter will not exist in this case.
|
|
327
|
+
k_factor_states = max(self.factor_order, 1) * k_factors
|
|
328
|
+
|
|
329
|
+
# Determine the dimension for the error component.
|
|
330
|
+
# If error_order > 0 then we add additional states for error dynamics, otherwise white noise error.
|
|
331
|
+
k_error_states = k_endog * error_order if error_order > 0 else 0
|
|
332
|
+
|
|
333
|
+
# Total state dimension
|
|
334
|
+
k_states = k_factor_states + k_error_states + self.k_exog_states
|
|
335
|
+
|
|
336
|
+
# Number of independent shocks.
|
|
337
|
+
# Typically, the latent factors introduce k_factors shocks.
|
|
338
|
+
# If error_order > 0 and errors are modeled jointly or separately, add appropriate count.
|
|
339
|
+
k_posdef = k_factors + (k_endog if error_order > 0 else 0) + self.k_exog_states
|
|
340
|
+
# k_posdef = (k_factors + (k_endog if error_order > 0 else 0) + self.k_exog_states if self.exog_innovations else 0)
|
|
341
|
+
|
|
342
|
+
# Initialize the PyMCStateSpace base class.
|
|
343
|
+
super().__init__(
|
|
344
|
+
k_endog=k_endog,
|
|
345
|
+
k_states=k_states,
|
|
346
|
+
k_posdef=k_posdef,
|
|
347
|
+
verbose=verbose,
|
|
348
|
+
measurement_error=measurement_error,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
@property
|
|
352
|
+
def param_names(self):
|
|
353
|
+
names = [
|
|
354
|
+
"x0",
|
|
355
|
+
"P0",
|
|
356
|
+
"factor_loadings",
|
|
357
|
+
"factor_ar",
|
|
358
|
+
"error_ar",
|
|
359
|
+
"error_sigma",
|
|
360
|
+
"error_cov",
|
|
361
|
+
"sigma_obs",
|
|
362
|
+
"beta",
|
|
363
|
+
"beta_sigma",
|
|
364
|
+
]
|
|
365
|
+
|
|
366
|
+
# Handle cases where parameters should be excluded based on model settings
|
|
367
|
+
if self.factor_order == 0:
|
|
368
|
+
names.remove("factor_ar")
|
|
369
|
+
if self.error_order == 0:
|
|
370
|
+
names.remove("error_ar")
|
|
371
|
+
if self.error_cov_type in ["scalar", "diagonal"]:
|
|
372
|
+
names.remove("error_cov")
|
|
373
|
+
if self.error_cov_type == "unstructured":
|
|
374
|
+
names.remove("error_sigma")
|
|
375
|
+
if not self.measurement_error:
|
|
376
|
+
names.remove("sigma_obs")
|
|
377
|
+
if not self.exog_flag:
|
|
378
|
+
names.remove("beta")
|
|
379
|
+
names.remove("beta_sigma")
|
|
380
|
+
if self.exog_flag and not self.exog_innovations:
|
|
381
|
+
names.remove("beta_sigma")
|
|
382
|
+
|
|
383
|
+
return names
|
|
384
|
+
|
|
385
|
+
@property
|
|
386
|
+
def param_info(self) -> dict[str, dict[str, Any]]:
|
|
387
|
+
info = {
|
|
388
|
+
"x0": {
|
|
389
|
+
"shape": (self.k_states,),
|
|
390
|
+
"constraints": None,
|
|
391
|
+
},
|
|
392
|
+
"P0": {
|
|
393
|
+
"shape": (self.k_states, self.k_states),
|
|
394
|
+
"constraints": "Positive Semi-definite",
|
|
395
|
+
},
|
|
396
|
+
"factor_loadings": {
|
|
397
|
+
"shape": (self.k_endog, self.k_factors),
|
|
398
|
+
"constraints": None,
|
|
399
|
+
},
|
|
400
|
+
"factor_ar": {
|
|
401
|
+
"shape": (self.k_factors, self.factor_order * self.k_factors),
|
|
402
|
+
"constraints": None,
|
|
403
|
+
},
|
|
404
|
+
"error_ar": {
|
|
405
|
+
"shape": (
|
|
406
|
+
self.k_endog,
|
|
407
|
+
self.error_order * self.k_endog if self.error_var else self.error_order,
|
|
408
|
+
),
|
|
409
|
+
"constraints": None,
|
|
410
|
+
},
|
|
411
|
+
"error_sigma": {
|
|
412
|
+
"shape": (self.k_endog,) if self.error_cov_type == "diagonal" else (),
|
|
413
|
+
"constraints": "Positive",
|
|
414
|
+
},
|
|
415
|
+
"error_cov": {
|
|
416
|
+
"shape": (self.k_endog, self.k_endog),
|
|
417
|
+
"constraints": "Positive Semi-definite",
|
|
418
|
+
},
|
|
419
|
+
"sigma_obs": {
|
|
420
|
+
"shape": (self.k_endog,),
|
|
421
|
+
"constraints": "Positive",
|
|
422
|
+
},
|
|
423
|
+
"beta": {
|
|
424
|
+
"shape": (self.k_exog_states,),
|
|
425
|
+
"constraints": None,
|
|
426
|
+
},
|
|
427
|
+
"beta_sigma": {
|
|
428
|
+
"shape": (self.k_exog_states,),
|
|
429
|
+
"constraints": "Positive",
|
|
430
|
+
},
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
for name in self.param_names:
|
|
434
|
+
info[name]["dims"] = self.param_dims[name]
|
|
435
|
+
|
|
436
|
+
return {name: info[name] for name in self.param_names}
|
|
437
|
+
|
|
438
|
+
@property
|
|
439
|
+
def state_names(self) -> list[str]:
|
|
440
|
+
"""
|
|
441
|
+
Returns the names of the hidden states: first factor states (with lags),
|
|
442
|
+
idiosyncratic error states (with lags), then exogenous states.
|
|
443
|
+
"""
|
|
444
|
+
names = [
|
|
445
|
+
f"L{lag}.factor_{i}"
|
|
446
|
+
for i in range(self.k_factors)
|
|
447
|
+
for lag in range(max(self.factor_order, 1))
|
|
448
|
+
]
|
|
449
|
+
|
|
450
|
+
if self.error_order > 0:
|
|
451
|
+
names.extend(
|
|
452
|
+
f"L{lag}.error_{i}" for i in range(self.k_endog) for lag in range(self.error_order)
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
if self.exog_flag:
|
|
456
|
+
if self.shared_exog_states:
|
|
457
|
+
names.extend([f"beta_{exog_name}[shared]" for exog_name in self.exog_names])
|
|
458
|
+
else:
|
|
459
|
+
names.extend(
|
|
460
|
+
f"beta_{exog_name}[{endog_name}]"
|
|
461
|
+
for exog_name in self.exog_names
|
|
462
|
+
for endog_name in self.endog_names
|
|
463
|
+
)
|
|
464
|
+
return names
|
|
465
|
+
|
|
466
|
+
@property
|
|
467
|
+
def observed_states(self) -> list[str]:
|
|
468
|
+
"""
|
|
469
|
+
Returns the names of the observed states (i.e., the endogenous variables).
|
|
470
|
+
"""
|
|
471
|
+
return self.endog_names
|
|
472
|
+
|
|
473
|
+
@property
|
|
474
|
+
def coords(self) -> dict[str, Sequence]:
|
|
475
|
+
coords = make_default_coords(self)
|
|
476
|
+
|
|
477
|
+
coords[FACTOR_DIM] = [f"factor_{i+1}" for i in range(self.k_factors)]
|
|
478
|
+
|
|
479
|
+
if self.factor_order > 0:
|
|
480
|
+
coords[AR_PARAM_DIM] = list(range(1, (self.factor_order * self.k_factors) + 1))
|
|
481
|
+
|
|
482
|
+
if self.error_order > 0:
|
|
483
|
+
if self.error_var:
|
|
484
|
+
coords[ERROR_AR_PARAM_DIM] = list(range(1, (self.error_order * self.k_endog) + 1))
|
|
485
|
+
else:
|
|
486
|
+
coords[ERROR_AR_PARAM_DIM] = list(range(1, self.error_order + 1))
|
|
487
|
+
|
|
488
|
+
if self.exog_flag:
|
|
489
|
+
coords[EXOG_STATE_DIM] = list(range(1, self.k_exog_states + 1))
|
|
490
|
+
|
|
491
|
+
return coords
|
|
492
|
+
|
|
493
|
+
@property
|
|
494
|
+
def shock_names(self) -> list[str]:
|
|
495
|
+
shock_names = [f"factor_shock_{i}" for i in range(self.k_factors)]
|
|
496
|
+
|
|
497
|
+
if self.error_order > 0:
|
|
498
|
+
shock_names.extend(f"error_shock_{i}" for i in range(self.k_endog))
|
|
499
|
+
|
|
500
|
+
if self.exog_flag:
|
|
501
|
+
if self.shared_exog_states:
|
|
502
|
+
shock_names.extend(f"exog_shock_{i}.shared" for i in range(self.k_exog))
|
|
503
|
+
else:
|
|
504
|
+
shock_names.extend(
|
|
505
|
+
f"exog_shock_{i}.endog_{j}"
|
|
506
|
+
for i in range(self.k_exog)
|
|
507
|
+
for j in range(self.k_endog)
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
return shock_names
|
|
511
|
+
|
|
512
|
+
@property
|
|
513
|
+
def param_dims(self):
|
|
514
|
+
coord_map = {
|
|
515
|
+
"x0": (ALL_STATE_DIM,),
|
|
516
|
+
"P0": (ALL_STATE_DIM, ALL_STATE_AUX_DIM),
|
|
517
|
+
"factor_loadings": (OBS_STATE_DIM, FACTOR_DIM),
|
|
518
|
+
}
|
|
519
|
+
if self.factor_order > 0:
|
|
520
|
+
coord_map["factor_ar"] = (FACTOR_DIM, AR_PARAM_DIM)
|
|
521
|
+
|
|
522
|
+
if self.error_order > 0:
|
|
523
|
+
coord_map["error_ar"] = (OBS_STATE_DIM, ERROR_AR_PARAM_DIM)
|
|
524
|
+
|
|
525
|
+
if self.error_cov_type in ["scalar"]:
|
|
526
|
+
coord_map["error_sigma"] = ()
|
|
527
|
+
|
|
528
|
+
elif self.error_cov_type in ["diagonal"]:
|
|
529
|
+
coord_map["error_sigma"] = (OBS_STATE_DIM,)
|
|
530
|
+
|
|
531
|
+
if self.error_cov_type == "unstructured":
|
|
532
|
+
coord_map["error_cov"] = (OBS_STATE_DIM, OBS_STATE_AUX_DIM)
|
|
533
|
+
|
|
534
|
+
if self.measurement_error:
|
|
535
|
+
coord_map["sigma_obs"] = (OBS_STATE_DIM,)
|
|
536
|
+
|
|
537
|
+
if self.exog_flag:
|
|
538
|
+
coord_map["beta"] = (EXOG_STATE_DIM,)
|
|
539
|
+
if self.exog_innovations:
|
|
540
|
+
coord_map["beta_sigma"] = (EXOG_STATE_DIM,)
|
|
541
|
+
|
|
542
|
+
return coord_map
|
|
543
|
+
|
|
544
|
+
@property
|
|
545
|
+
def data_info(self):
|
|
546
|
+
if self.exog_flag:
|
|
547
|
+
return {
|
|
548
|
+
"exog_data": {
|
|
549
|
+
"shape": (None, self.k_exog),
|
|
550
|
+
"dims": (TIME_DIM, EXOG_STATE_DIM),
|
|
551
|
+
},
|
|
552
|
+
}
|
|
553
|
+
return {}
|
|
554
|
+
|
|
555
|
+
@property
|
|
556
|
+
def data_names(self):
|
|
557
|
+
if self.exog_flag:
|
|
558
|
+
return ["exog_data"]
|
|
559
|
+
return []
|
|
560
|
+
|
|
561
|
+
def make_symbolic_graph(self):
|
|
562
|
+
if not self.exog_flag:
|
|
563
|
+
x0 = self.make_and_register_variable("x0", shape=(self.k_states,), dtype=floatX)
|
|
564
|
+
else:
|
|
565
|
+
initial_factor_loadings = self.make_and_register_variable(
|
|
566
|
+
"x0", shape=(self.k_states - self.k_exog_states,), dtype=floatX
|
|
567
|
+
)
|
|
568
|
+
initial_betas = self.make_and_register_variable(
|
|
569
|
+
"beta", shape=(self.k_exog_states,), dtype=floatX
|
|
570
|
+
)
|
|
571
|
+
x0 = pt.concatenate([initial_factor_loadings, initial_betas], axis=0)
|
|
572
|
+
|
|
573
|
+
self.ssm["initial_state", :] = x0
|
|
574
|
+
|
|
575
|
+
# Initial covariance
|
|
576
|
+
P0 = self.make_and_register_variable(
|
|
577
|
+
"P0", shape=(self.k_states, self.k_states), dtype=floatX
|
|
578
|
+
)
|
|
579
|
+
self.ssm["initial_state_cov", :, :] = P0
|
|
580
|
+
|
|
581
|
+
# Design matrix (Z)
|
|
582
|
+
# Construction with block structure:
|
|
583
|
+
# When factor_order <= 1 and error_order = 0:
|
|
584
|
+
# [ A ] A is the factor loadings matrix with shape (k_endog, k_factors)
|
|
585
|
+
#
|
|
586
|
+
# When factor_order > 1, add block of zeros for the factors lags:
|
|
587
|
+
# [ A | 0 ] the zero block has shape (k_endog, k_factors * (factor_order - 1))
|
|
588
|
+
#
|
|
589
|
+
# When error_order > 0, add identity matrix and additional zero block for errors lags:
|
|
590
|
+
# [ A | 0 | I | 0 ] I is the identity matrix (k_endog, k_endog) and the final zero block
|
|
591
|
+
# has shape (k_endog, k_endog * (error_order - 1))
|
|
592
|
+
#
|
|
593
|
+
# When exog_flag=True, exogenous data (exog_data) is included and the design
|
|
594
|
+
# matrix becomes 3D with the first dimension indexing time:
|
|
595
|
+
# - shared_exog_states=True: exog_data is broadcast across all endogenous series
|
|
596
|
+
# → shape (n_timepoints, k_endog, k_exog)
|
|
597
|
+
# - shared_exog_states=False: each endogenous series gets its own exog block
|
|
598
|
+
# → block-diagonal structure with shape (n_timepoints, k_endog, k_exog * k_endog)
|
|
599
|
+
# In this case, the base design matrix (factors + errors) is repeated over
|
|
600
|
+
# time and concatenated with the exogenous block. The final design matrix
|
|
601
|
+
# has shape (n_timepoints, k_endog, n_columns) and combines all components.
|
|
602
|
+
factor_loadings = self.make_and_register_variable(
|
|
603
|
+
"factor_loadings", shape=(self.k_endog, self.k_factors), dtype=floatX
|
|
604
|
+
)
|
|
605
|
+
# Add factor loadings (A matrix)
|
|
606
|
+
matrix_parts = [factor_loadings]
|
|
607
|
+
|
|
608
|
+
# Add zero block for the factors lags when factor_order > 1
|
|
609
|
+
if self.factor_order > 1:
|
|
610
|
+
matrix_parts.append(
|
|
611
|
+
pt.zeros((self.k_endog, self.k_factors * (self.factor_order - 1)), dtype=floatX)
|
|
612
|
+
)
|
|
613
|
+
# Add identity and zero blocks for error lags when error_order > 0
|
|
614
|
+
if self.error_order > 0:
|
|
615
|
+
error_matrix = pt.eye(self.k_endog, dtype=floatX)
|
|
616
|
+
matrix_parts.append(error_matrix)
|
|
617
|
+
matrix_parts.append(
|
|
618
|
+
pt.zeros((self.k_endog, self.k_endog * (self.error_order - 1)), dtype=floatX)
|
|
619
|
+
)
|
|
620
|
+
if len(matrix_parts) == 1:
|
|
621
|
+
design_matrix = factor_loadings * 1.0 # copy to ensure a new PyTensor variable
|
|
622
|
+
design_matrix.name = "design"
|
|
623
|
+
# TODO: This is a hack to ensure the design matrix isn't identically equal to the factor_loadings when error_order=0 and factor_order=0
|
|
624
|
+
else:
|
|
625
|
+
design_matrix = pt.concatenate(matrix_parts, axis=1)
|
|
626
|
+
design_matrix.name = "design"
|
|
627
|
+
# Handle exogenous variables (if any)
|
|
628
|
+
if self.exog_flag:
|
|
629
|
+
exog_data = self.make_and_register_data("exog_data", shape=(None, self.k_exog))
|
|
630
|
+
if self.shared_exog_states:
|
|
631
|
+
# Shared exogenous states: same exog data is used across all endogenous variables
|
|
632
|
+
# Shape becomes (n_timepoints, k_endog, k_exog)
|
|
633
|
+
Z_exog = pt.specify_shape(
|
|
634
|
+
pt.join(1, *[pt.expand_dims(exog_data, 1) for _ in range(self.k_endog)]),
|
|
635
|
+
(None, self.k_endog, self.k_exog),
|
|
636
|
+
)
|
|
637
|
+
else:
|
|
638
|
+
# Separate exogenous states: each endogenous variable gets its own exog block
|
|
639
|
+
# Create block-diagonal structure and reshape to (n_timepoints, k_endog, k_exog * k_endog)
|
|
640
|
+
Z_exog = pt.linalg.block_diag(
|
|
641
|
+
*[pt.expand_dims(exog_data, 1) for _ in range(self.k_endog)]
|
|
642
|
+
)
|
|
643
|
+
Z_exog = pt.specify_shape(Z_exog, (None, self.k_endog, self.k_exog * self.k_endog))
|
|
644
|
+
|
|
645
|
+
# Repeat base design_matrix over time dimension to match exogenous time series
|
|
646
|
+
n_timepoints = Z_exog.shape[0]
|
|
647
|
+
design_matrix_time = pt.tile(design_matrix, (n_timepoints, 1, 1))
|
|
648
|
+
# Concatenate the repeated design matrix with exogenous matrix along the last axis
|
|
649
|
+
# Final shape: (n_timepoints, k_endog, n_columns + n_exog_columns)
|
|
650
|
+
design_matrix = pt.concatenate([design_matrix_time, Z_exog], axis=2)
|
|
651
|
+
|
|
652
|
+
self.ssm["design"] = design_matrix
|
|
653
|
+
|
|
654
|
+
# Transition matrix (T)
|
|
655
|
+
# Construction with block-diagonal structure:
|
|
656
|
+
# Each latent component (factors, errors, exogenous states) contributes its own transition block,
|
|
657
|
+
# and the full transition matrix is assembled with block_diag.
|
|
658
|
+
# T = block_diag(A, B, C)
|
|
659
|
+
#
|
|
660
|
+
# - Factors (block A):
|
|
661
|
+
# If factor_order > 0, the factor AR coefficients are organized into a
|
|
662
|
+
# VAR(p) companion matrix of size (k_factors * factor_order, k_factors * factor_order).
|
|
663
|
+
# This block shifts lagged factor states and applies AR coefficients.
|
|
664
|
+
# If factor_order = 0, a zero matrix is used instead.
|
|
665
|
+
#
|
|
666
|
+
# - Errors (block B):
|
|
667
|
+
# If error_order > 0:
|
|
668
|
+
# * error_var=True → build a full VAR(p) companion matrix (cross-series correlations allowed).
|
|
669
|
+
# * error_var=False → build independent AR(p) companion matrices (no cross-series effects).
|
|
670
|
+
#
|
|
671
|
+
# - Exogenous states (block C):
|
|
672
|
+
# If exog_flag=True, exogenous states are either constant or follow a random walk, modeled with an identity
|
|
673
|
+
# transition block of size (k_exog_states, k_exog_states).
|
|
674
|
+
#
|
|
675
|
+
# The final transition matrix is block-diagonal, combining all active components:
|
|
676
|
+
# Transition = block_diag(Factors, Errors, Exogenous)
|
|
677
|
+
|
|
678
|
+
# auxiliary functions to build transition matrix block
|
|
679
|
+
def build_var_block_matrix(ar_coeffs, k_series, p):
|
|
680
|
+
"""
|
|
681
|
+
Build the VAR(p) companion matrix for the factors.
|
|
682
|
+
|
|
683
|
+
ar_coeffs: PyTensor matrix of shape (k_series, p * k_series)
|
|
684
|
+
[A1 | A2 | ... | Ap] horizontally concatenated.
|
|
685
|
+
k_series: number of series
|
|
686
|
+
p: lag order
|
|
687
|
+
"""
|
|
688
|
+
size = k_series * p
|
|
689
|
+
block = pt.zeros((size, size), dtype=floatX)
|
|
690
|
+
|
|
691
|
+
# First block row: the AR coefficient matrices for each lag
|
|
692
|
+
block = block[0:k_series, 0 : k_series * p].set(ar_coeffs)
|
|
693
|
+
|
|
694
|
+
# Sub-diagonal identity blocks (shift structure)
|
|
695
|
+
if p > 1:
|
|
696
|
+
# Create the identity pattern for all sub-diagonal blocks
|
|
697
|
+
identity_pattern = pt.eye(k_series * (p - 1), dtype=floatX)
|
|
698
|
+
block = block[k_series:, : k_series * (p - 1)].set(identity_pattern)
|
|
699
|
+
|
|
700
|
+
return block
|
|
701
|
+
|
|
702
|
+
def build_independent_var_block_matrix(ar_coeffs, k_series, p):
|
|
703
|
+
"""
|
|
704
|
+
Build a VAR(p)-style companion matrix for independent AR(p) processes
|
|
705
|
+
with interleaved state ordering:
|
|
706
|
+
(x1(t), x2(t), ..., x1(t-1), x2(t-1), ...).
|
|
707
|
+
|
|
708
|
+
ar_coeffs: PyTensor matrix of shape (k_series, p)
|
|
709
|
+
k_series: number of independent series
|
|
710
|
+
p: lag order
|
|
711
|
+
"""
|
|
712
|
+
size = k_series * p
|
|
713
|
+
block = pt.zeros((size, size), dtype=floatX)
|
|
714
|
+
|
|
715
|
+
# First block row: AR coefficients per series (block diagonal)
|
|
716
|
+
for j in range(k_series):
|
|
717
|
+
for lag in range(p):
|
|
718
|
+
col_idx = lag * k_series + j
|
|
719
|
+
block = pt.set_subtensor(block[j, col_idx], ar_coeffs[j, lag])
|
|
720
|
+
|
|
721
|
+
# Sub-diagonal identity blocks (shift)
|
|
722
|
+
if p > 1:
|
|
723
|
+
identity_pattern = pt.eye(k_series * (p - 1), dtype=floatX)
|
|
724
|
+
block = pt.set_subtensor(block[k_series:, : k_series * (p - 1)], identity_pattern)
|
|
725
|
+
return block
|
|
726
|
+
|
|
727
|
+
transition_blocks = []
|
|
728
|
+
# Block A: Factors
|
|
729
|
+
if self.factor_order > 0:
|
|
730
|
+
factor_ar = self.make_and_register_variable(
|
|
731
|
+
"factor_ar",
|
|
732
|
+
shape=(self.k_factors, self.factor_order * self.k_factors),
|
|
733
|
+
dtype=floatX,
|
|
734
|
+
)
|
|
735
|
+
transition_blocks.append(
|
|
736
|
+
build_var_block_matrix(factor_ar, self.k_factors, self.factor_order)
|
|
737
|
+
)
|
|
738
|
+
else:
|
|
739
|
+
transition_blocks.append(pt.zeros((self.k_factors, self.k_factors), dtype=floatX))
|
|
740
|
+
# Block B: Errors
|
|
741
|
+
if self.error_order > 0 and self.error_var:
|
|
742
|
+
error_ar = self.make_and_register_variable(
|
|
743
|
+
"error_ar", shape=(self.k_endog, self.error_order * self.k_endog), dtype=floatX
|
|
744
|
+
)
|
|
745
|
+
transition_blocks.append(
|
|
746
|
+
build_var_block_matrix(error_ar, self.k_endog, self.error_order)
|
|
747
|
+
)
|
|
748
|
+
elif self.error_order > 0 and not self.error_var:
|
|
749
|
+
error_ar = self.make_and_register_variable(
|
|
750
|
+
"error_ar", shape=(self.k_endog, self.error_order), dtype=floatX
|
|
751
|
+
)
|
|
752
|
+
transition_blocks.append(
|
|
753
|
+
build_independent_var_block_matrix(error_ar, self.k_endog, self.error_order)
|
|
754
|
+
)
|
|
755
|
+
# Block C: Exogenous states
|
|
756
|
+
if self.exog_flag:
|
|
757
|
+
transition_blocks.append(pt.eye(self.k_exog_states, dtype=floatX))
|
|
758
|
+
|
|
759
|
+
self.ssm["transition", :, :] = pt.linalg.block_diag(*transition_blocks)
|
|
760
|
+
|
|
761
|
+
# Selection matrix (R)
|
|
762
|
+
for i in range(self.k_factors):
|
|
763
|
+
self.ssm["selection", i, i] = 1.0
|
|
764
|
+
|
|
765
|
+
if self.error_order > 0:
|
|
766
|
+
for i in range(self.k_endog):
|
|
767
|
+
row = max(self.factor_order, 1) * self.k_factors + i
|
|
768
|
+
col = self.k_factors + i
|
|
769
|
+
self.ssm["selection", row, col] = 1.0
|
|
770
|
+
|
|
771
|
+
if self.exog_flag and self.exog_innovations:
|
|
772
|
+
row_start = self.k_states - self.k_exog_states
|
|
773
|
+
row_end = self.k_states
|
|
774
|
+
|
|
775
|
+
if self.error_order > 0:
|
|
776
|
+
col_start = self.k_factors + self.k_endog
|
|
777
|
+
col_end = self.k_factors + self.k_endog + self.k_exog_states
|
|
778
|
+
else:
|
|
779
|
+
col_start = self.k_factors
|
|
780
|
+
col_end = self.k_factors + self.k_exog_states
|
|
781
|
+
|
|
782
|
+
self.ssm["selection", row_start:row_end, col_start:col_end] = pt.eye(
|
|
783
|
+
self.k_exog_states, dtype=floatX
|
|
784
|
+
)
|
|
785
|
+
|
|
786
|
+
factor_cov = pt.eye(self.k_factors, dtype=floatX)
|
|
787
|
+
|
|
788
|
+
# Handle error_sigma and error_cov depending on error_cov_type
|
|
789
|
+
if self.error_cov_type == "scalar":
|
|
790
|
+
error_sigma = self.make_and_register_variable("error_sigma", shape=(), dtype=floatX)
|
|
791
|
+
error_cov = pt.eye(self.k_endog) * error_sigma
|
|
792
|
+
elif self.error_cov_type == "diagonal":
|
|
793
|
+
error_sigma = self.make_and_register_variable(
|
|
794
|
+
"error_sigma", shape=(self.k_endog,), dtype=floatX
|
|
795
|
+
)
|
|
796
|
+
error_cov = pt.diag(error_sigma)
|
|
797
|
+
elif self.error_cov_type == "unstructured":
|
|
798
|
+
error_cov = self.make_and_register_variable(
|
|
799
|
+
"error_cov", shape=(self.k_endog, self.k_endog), dtype=floatX
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
# State covariance matrix (Q)
|
|
803
|
+
if self.error_order > 0:
|
|
804
|
+
if self.exog_flag and self.exog_innovations:
|
|
805
|
+
# Include AR noise in state vector
|
|
806
|
+
beta_sigma = self.make_and_register_variable(
|
|
807
|
+
"beta_sigma", shape=(self.k_exog_states,), dtype=floatX
|
|
808
|
+
)
|
|
809
|
+
exog_cov = pt.diag(beta_sigma)
|
|
810
|
+
self.ssm["state_cov", :, :] = pt.linalg.block_diag(factor_cov, error_cov, exog_cov)
|
|
811
|
+
elif self.exog_flag and not self.exog_innovations:
|
|
812
|
+
exog_cov = pt.zeros((self.k_exog_states, self.k_exog_states), dtype=floatX)
|
|
813
|
+
self.ssm["state_cov", :, :] = pt.linalg.block_diag(factor_cov, error_cov, exog_cov)
|
|
814
|
+
elif not self.exog_flag:
|
|
815
|
+
self.ssm["state_cov", :, :] = pt.linalg.block_diag(factor_cov, error_cov)
|
|
816
|
+
else:
|
|
817
|
+
if self.exog_flag and self.exog_innovations:
|
|
818
|
+
beta_sigma = self.make_and_register_variable(
|
|
819
|
+
"beta_sigma", shape=(self.k_exog_states,), dtype=floatX
|
|
820
|
+
)
|
|
821
|
+
exog_cov = pt.diag(beta_sigma)
|
|
822
|
+
self.ssm["state_cov", :, :] = pt.linalg.block_diag(factor_cov, exog_cov)
|
|
823
|
+
elif self.exog_flag and not self.exog_innovations:
|
|
824
|
+
exog_cov = pt.zeros((self.k_exog_states, self.k_exog_states), dtype=floatX)
|
|
825
|
+
self.ssm["state_cov", :, :] = pt.linalg.block_diag(factor_cov, exog_cov)
|
|
826
|
+
elif not self.exog_flag:
|
|
827
|
+
# Only latent factor in the state
|
|
828
|
+
self.ssm["state_cov", :, :] = factor_cov
|
|
829
|
+
|
|
830
|
+
# Observation covariance matrix (H)
|
|
831
|
+
if self.error_order > 0:
|
|
832
|
+
if self.measurement_error:
|
|
833
|
+
sigma_obs = self.make_and_register_variable(
|
|
834
|
+
"sigma_obs", shape=(self.k_endog,), dtype=floatX
|
|
835
|
+
)
|
|
836
|
+
self.ssm["obs_cov", :, :] = pt.diag(sigma_obs)
|
|
837
|
+
# else: obs_cov remains zero (no measurement noise and idiosyncratic noise captured in state)
|
|
838
|
+
else:
|
|
839
|
+
if self.measurement_error:
|
|
840
|
+
# TODO: check this decision
|
|
841
|
+
# in this case error_order = 0, so there is no error term in the state, so the sigma error could not be added there
|
|
842
|
+
# Idiosyncratic + measurement error
|
|
843
|
+
sigma_obs = self.make_and_register_variable(
|
|
844
|
+
"sigma_obs", shape=(self.k_endog,), dtype=floatX
|
|
845
|
+
)
|
|
846
|
+
total_obs_var = error_sigma**2 + sigma_obs**2
|
|
847
|
+
self.ssm["obs_cov", :, :] = pt.diag(pt.sqrt(total_obs_var))
|
|
848
|
+
else:
|
|
849
|
+
self.ssm["obs_cov", :, :] = pt.diag(error_sigma)
|