pymc-extras 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pymc_extras/__init__.py +5 -1
- pymc_extras/distributions/continuous.py +3 -2
- pymc_extras/distributions/discrete.py +3 -1
- pymc_extras/inference/find_map.py +62 -17
- pymc_extras/inference/laplace.py +10 -7
- pymc_extras/statespace/core/statespace.py +191 -52
- pymc_extras/statespace/filters/distributions.py +15 -16
- pymc_extras/statespace/filters/kalman_filter.py +1 -18
- pymc_extras/statespace/filters/kalman_smoother.py +2 -6
- pymc_extras/statespace/models/ETS.py +10 -0
- pymc_extras/statespace/models/SARIMAX.py +26 -5
- pymc_extras/statespace/models/VARMAX.py +12 -2
- pymc_extras/statespace/models/structural.py +18 -5
- pymc_extras-0.2.6.dist-info/METADATA +318 -0
- pymc_extras-0.2.6.dist-info/RECORD +65 -0
- {pymc_extras-0.2.5.dist-info → pymc_extras-0.2.6.dist-info}/WHEEL +1 -2
- pymc_extras/version.py +0 -11
- pymc_extras/version.txt +0 -1
- pymc_extras-0.2.5.dist-info/METADATA +0 -112
- pymc_extras-0.2.5.dist-info/RECORD +0 -108
- pymc_extras-0.2.5.dist-info/top_level.txt +0 -2
- tests/__init__.py +0 -13
- tests/distributions/__init__.py +0 -19
- tests/distributions/test_continuous.py +0 -185
- tests/distributions/test_discrete.py +0 -210
- tests/distributions/test_discrete_markov_chain.py +0 -258
- tests/distributions/test_multivariate.py +0 -304
- tests/distributions/test_transform.py +0 -77
- tests/model/__init__.py +0 -0
- tests/model/marginal/__init__.py +0 -0
- tests/model/marginal/test_distributions.py +0 -132
- tests/model/marginal/test_graph_analysis.py +0 -182
- tests/model/marginal/test_marginal_model.py +0 -967
- tests/model/test_model_api.py +0 -38
- tests/statespace/__init__.py +0 -0
- tests/statespace/test_ETS.py +0 -411
- tests/statespace/test_SARIMAX.py +0 -405
- tests/statespace/test_VARMAX.py +0 -184
- tests/statespace/test_coord_assignment.py +0 -181
- tests/statespace/test_distributions.py +0 -270
- tests/statespace/test_kalman_filter.py +0 -326
- tests/statespace/test_representation.py +0 -175
- tests/statespace/test_statespace.py +0 -872
- tests/statespace/test_statespace_JAX.py +0 -156
- tests/statespace/test_structural.py +0 -836
- tests/statespace/utilities/__init__.py +0 -0
- tests/statespace/utilities/shared_fixtures.py +0 -9
- tests/statespace/utilities/statsmodel_local_level.py +0 -42
- tests/statespace/utilities/test_helpers.py +0 -310
- tests/test_blackjax_smc.py +0 -222
- tests/test_find_map.py +0 -103
- tests/test_histogram_approximation.py +0 -109
- tests/test_laplace.py +0 -281
- tests/test_linearmodel.py +0 -208
- tests/test_model_builder.py +0 -306
- tests/test_pathfinder.py +0 -297
- tests/test_pivoted_cholesky.py +0 -24
- tests/test_printing.py +0 -98
- tests/test_prior_from_trace.py +0 -172
- tests/test_splines.py +0 -77
- tests/utils.py +0 -0
- {pymc_extras-0.2.5.dist-info → pymc_extras-0.2.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,326 +0,0 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import pytensor
|
|
3
|
-
import pytensor.tensor as pt
|
|
4
|
-
import pytest
|
|
5
|
-
|
|
6
|
-
from numpy.testing import assert_allclose, assert_array_less
|
|
7
|
-
|
|
8
|
-
from pymc_extras.statespace.filters import (
|
|
9
|
-
KalmanSmoother,
|
|
10
|
-
SquareRootFilter,
|
|
11
|
-
StandardFilter,
|
|
12
|
-
UnivariateFilter,
|
|
13
|
-
)
|
|
14
|
-
from pymc_extras.statespace.filters.kalman_filter import BaseFilter
|
|
15
|
-
from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import
|
|
16
|
-
rng,
|
|
17
|
-
)
|
|
18
|
-
from tests.statespace.utilities.test_helpers import (
|
|
19
|
-
get_expected_shape,
|
|
20
|
-
get_sm_state_from_output_name,
|
|
21
|
-
initialize_filter,
|
|
22
|
-
make_test_inputs,
|
|
23
|
-
nile_test_test_helper,
|
|
24
|
-
)
|
|
25
|
-
|
|
26
|
-
floatX = pytensor.config.floatX
|
|
27
|
-
|
|
28
|
-
# TODO: These are pretty loose because of all the stabilizing of covariance matrices that is done inside the kalman
|
|
29
|
-
# filters. When that is improved, this should be tightened.
|
|
30
|
-
ATOL = 1e-6 if floatX.endswith("64") else 1e-3
|
|
31
|
-
RTOL = 1e-6 if floatX.endswith("64") else 1e-3
|
|
32
|
-
|
|
33
|
-
standard_inout = initialize_filter(StandardFilter())
|
|
34
|
-
cholesky_inout = initialize_filter(SquareRootFilter())
|
|
35
|
-
univariate_inout = initialize_filter(UnivariateFilter())
|
|
36
|
-
|
|
37
|
-
f_standard = pytensor.function(*standard_inout, on_unused_input="ignore")
|
|
38
|
-
f_cholesky = pytensor.function(*cholesky_inout, on_unused_input="ignore")
|
|
39
|
-
f_univariate = pytensor.function(*univariate_inout, on_unused_input="ignore")
|
|
40
|
-
|
|
41
|
-
filter_funcs = [f_standard, f_cholesky, f_univariate]
|
|
42
|
-
|
|
43
|
-
filter_names = [
|
|
44
|
-
"StandardFilter",
|
|
45
|
-
"CholeskyFilter",
|
|
46
|
-
"UnivariateFilter",
|
|
47
|
-
]
|
|
48
|
-
|
|
49
|
-
output_names = [
|
|
50
|
-
"filtered_states",
|
|
51
|
-
"predicted_states",
|
|
52
|
-
"smoothed_states",
|
|
53
|
-
"filtered_covs",
|
|
54
|
-
"predicted_covs",
|
|
55
|
-
"smoothed_covs",
|
|
56
|
-
"log_likelihood",
|
|
57
|
-
"ll_obs",
|
|
58
|
-
]
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def test_base_class_update_raises():
|
|
62
|
-
filter = BaseFilter()
|
|
63
|
-
inputs = [None] * 7
|
|
64
|
-
with pytest.raises(NotImplementedError):
|
|
65
|
-
filter.update(*inputs)
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
@pytest.mark.parametrize("filter_func", filter_funcs, ids=filter_names)
|
|
69
|
-
def test_output_shapes_one_state_one_observed(filter_func, rng):
|
|
70
|
-
p, m, r, n = 1, 1, 1, 10
|
|
71
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
72
|
-
outputs = filter_func(*inputs)
|
|
73
|
-
|
|
74
|
-
for output_idx, name in enumerate(output_names):
|
|
75
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
76
|
-
assert (
|
|
77
|
-
outputs[output_idx].shape == expected_output
|
|
78
|
-
), f"Shape of {name} does not match expected"
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
@pytest.mark.parametrize("filter_func", filter_funcs, ids=filter_names)
|
|
82
|
-
def test_output_shapes_when_all_states_are_stochastic(filter_func, rng):
|
|
83
|
-
p, m, r, n = 1, 2, 2, 10
|
|
84
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
85
|
-
|
|
86
|
-
outputs = filter_func(*inputs)
|
|
87
|
-
for output_idx, name in enumerate(output_names):
|
|
88
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
89
|
-
assert (
|
|
90
|
-
outputs[output_idx].shape == expected_output
|
|
91
|
-
), f"Shape of {name} does not match expected"
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
@pytest.mark.parametrize("filter_func", filter_funcs, ids=filter_names)
|
|
95
|
-
def test_output_shapes_when_some_states_are_deterministic(filter_func, rng):
|
|
96
|
-
p, m, r, n = 1, 5, 2, 10
|
|
97
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
98
|
-
|
|
99
|
-
outputs = filter_func(*inputs)
|
|
100
|
-
for output_idx, name in enumerate(output_names):
|
|
101
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
102
|
-
assert (
|
|
103
|
-
outputs[output_idx].shape == expected_output
|
|
104
|
-
), f"Shape of {name} does not match expected"
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
@pytest.fixture
|
|
108
|
-
def f_standard_nd():
|
|
109
|
-
ksmoother = KalmanSmoother()
|
|
110
|
-
data = pt.tensor(name="data", dtype=floatX, shape=(None, None))
|
|
111
|
-
a0 = pt.vector(name="a0", dtype=floatX)
|
|
112
|
-
P0 = pt.matrix(name="P0", dtype=floatX)
|
|
113
|
-
c = pt.vector(name="c", dtype=floatX)
|
|
114
|
-
d = pt.vector(name="d", dtype=floatX)
|
|
115
|
-
Q = pt.tensor(name="Q", dtype=floatX, shape=(None, None, None))
|
|
116
|
-
H = pt.tensor(name="H", dtype=floatX, shape=(None, None, None))
|
|
117
|
-
T = pt.tensor(name="T", dtype=floatX, shape=(None, None, None))
|
|
118
|
-
R = pt.tensor(name="R", dtype=floatX, shape=(None, None, None))
|
|
119
|
-
Z = pt.tensor(name="Z", dtype=floatX, shape=(None, None, None))
|
|
120
|
-
|
|
121
|
-
inputs = [data, a0, P0, c, d, T, Z, R, H, Q]
|
|
122
|
-
|
|
123
|
-
(
|
|
124
|
-
filtered_states,
|
|
125
|
-
predicted_states,
|
|
126
|
-
observed_states,
|
|
127
|
-
filtered_covs,
|
|
128
|
-
predicted_covs,
|
|
129
|
-
observed_covs,
|
|
130
|
-
ll_obs,
|
|
131
|
-
) = StandardFilter().build_graph(*inputs)
|
|
132
|
-
|
|
133
|
-
smoothed_states, smoothed_covs = ksmoother.build_graph(T, R, Q, filtered_states, filtered_covs)
|
|
134
|
-
|
|
135
|
-
outputs = [
|
|
136
|
-
filtered_states,
|
|
137
|
-
predicted_states,
|
|
138
|
-
smoothed_states,
|
|
139
|
-
filtered_covs,
|
|
140
|
-
predicted_covs,
|
|
141
|
-
smoothed_covs,
|
|
142
|
-
ll_obs.sum(),
|
|
143
|
-
ll_obs,
|
|
144
|
-
]
|
|
145
|
-
|
|
146
|
-
f_standard = pytensor.function(inputs, outputs)
|
|
147
|
-
|
|
148
|
-
return f_standard
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def test_output_shapes_with_time_varying_matrices(f_standard_nd, rng):
|
|
152
|
-
p, m, r, n = 1, 5, 2, 10
|
|
153
|
-
data, a0, P0, c, d, T, Z, R, H, Q = make_test_inputs(p, m, r, n, rng)
|
|
154
|
-
T = np.concatenate([np.expand_dims(T, 0)] * n, axis=0)
|
|
155
|
-
Z = np.concatenate([np.expand_dims(Z, 0)] * n, axis=0)
|
|
156
|
-
R = np.concatenate([np.expand_dims(R, 0)] * n, axis=0)
|
|
157
|
-
H = np.concatenate([np.expand_dims(H, 0)] * n, axis=0)
|
|
158
|
-
Q = np.concatenate([np.expand_dims(Q, 0)] * n, axis=0)
|
|
159
|
-
|
|
160
|
-
outputs = f_standard_nd(data, a0, P0, c, d, T, Z, R, H, Q)
|
|
161
|
-
|
|
162
|
-
for output_idx, name in enumerate(output_names):
|
|
163
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
164
|
-
assert (
|
|
165
|
-
outputs[output_idx].shape == expected_output
|
|
166
|
-
), f"Shape of {name} does not match expected"
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
@pytest.mark.parametrize("filter_func", filter_funcs, ids=filter_names)
|
|
170
|
-
def test_output_with_deterministic_observation_equation(filter_func, rng):
|
|
171
|
-
p, m, r, n = 1, 5, 1, 10
|
|
172
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
173
|
-
|
|
174
|
-
outputs = filter_func(*inputs)
|
|
175
|
-
|
|
176
|
-
for output_idx, name in enumerate(output_names):
|
|
177
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
178
|
-
assert (
|
|
179
|
-
outputs[output_idx].shape == expected_output
|
|
180
|
-
), f"Shape of {name} does not match expected"
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
@pytest.mark.parametrize(
|
|
184
|
-
("filter_func", "filter_name"), zip(filter_funcs, filter_names), ids=filter_names
|
|
185
|
-
)
|
|
186
|
-
def test_output_with_multiple_observed(filter_func, filter_name, rng):
|
|
187
|
-
p, m, r, n = 5, 5, 1, 10
|
|
188
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
189
|
-
|
|
190
|
-
outputs = filter_func(*inputs)
|
|
191
|
-
for output_idx, name in enumerate(output_names):
|
|
192
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
193
|
-
assert (
|
|
194
|
-
outputs[output_idx].shape == expected_output
|
|
195
|
-
), f"Shape of {name} does not match expected"
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
@pytest.mark.parametrize(
|
|
199
|
-
("filter_func", "filter_name"), zip(filter_funcs, filter_names), ids=filter_names
|
|
200
|
-
)
|
|
201
|
-
@pytest.mark.parametrize("p", [1, 5], ids=["univariate (p=1)", "multivariate (p=5)"])
|
|
202
|
-
def test_missing_data(filter_func, filter_name, p, rng):
|
|
203
|
-
m, r, n = 5, 1, 10
|
|
204
|
-
inputs = make_test_inputs(p, m, r, n, rng, missing_data=1)
|
|
205
|
-
|
|
206
|
-
outputs = filter_func(*inputs)
|
|
207
|
-
for output_idx, name in enumerate(output_names):
|
|
208
|
-
expected_output = get_expected_shape(name, p, m, r, n)
|
|
209
|
-
assert (
|
|
210
|
-
outputs[output_idx].shape == expected_output
|
|
211
|
-
), f"Shape of {name} does not match expected"
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
@pytest.mark.parametrize("filter_func", filter_funcs, ids=filter_names)
|
|
215
|
-
@pytest.mark.parametrize("output_idx", [(0, 2), (3, 5)], ids=["smoothed_states", "smoothed_covs"])
|
|
216
|
-
def test_last_smoother_is_last_filtered(filter_func, output_idx, rng):
|
|
217
|
-
p, m, r, n = 1, 5, 1, 10
|
|
218
|
-
inputs = make_test_inputs(p, m, r, n, rng)
|
|
219
|
-
outputs = filter_func(*inputs)
|
|
220
|
-
|
|
221
|
-
filtered = outputs[output_idx[0]]
|
|
222
|
-
smoothed = outputs[output_idx[1]]
|
|
223
|
-
|
|
224
|
-
assert_allclose(filtered[-1], smoothed[-1])
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
@pytest.mark.parametrize(
|
|
228
|
-
"filter_func, filter_name", zip(filter_funcs, filter_names), ids=filter_names
|
|
229
|
-
)
|
|
230
|
-
@pytest.mark.parametrize("n_missing", [0, 5], ids=["n_missing=0", "n_missing=5"])
|
|
231
|
-
@pytest.mark.skipif(floatX == "float32", reason="Tests are too sensitive for float32")
|
|
232
|
-
def test_filters_match_statsmodel_output(filter_func, filter_name, n_missing, rng):
|
|
233
|
-
fit_sm_mod, [data, a0, P0, c, d, T, Z, R, H, Q] = nile_test_test_helper(rng, n_missing)
|
|
234
|
-
if filter_name == "CholeskyFilter":
|
|
235
|
-
P0 = np.linalg.cholesky(P0)
|
|
236
|
-
inputs = [data, a0, P0, c, d, T, Z, R, H, Q]
|
|
237
|
-
outputs = filter_func(*inputs)
|
|
238
|
-
|
|
239
|
-
for output_idx, name in enumerate(output_names):
|
|
240
|
-
ref_val = get_sm_state_from_output_name(fit_sm_mod, name)
|
|
241
|
-
val_to_test = outputs[output_idx].squeeze()
|
|
242
|
-
|
|
243
|
-
if name == "smoothed_covs":
|
|
244
|
-
# TODO: The smoothed covariance matrices have large errors (1e-2) ONLY in the first few states -- no idea why.
|
|
245
|
-
assert_allclose(
|
|
246
|
-
val_to_test[5:],
|
|
247
|
-
ref_val[5:],
|
|
248
|
-
atol=ATOL,
|
|
249
|
-
rtol=RTOL,
|
|
250
|
-
err_msg=f"{name} does not match statsmodels",
|
|
251
|
-
)
|
|
252
|
-
elif name.startswith("predicted"):
|
|
253
|
-
# statsmodels doesn't throw away the T+1 forecast in the predicted states like we do
|
|
254
|
-
assert_allclose(
|
|
255
|
-
val_to_test,
|
|
256
|
-
ref_val[:-1],
|
|
257
|
-
atol=ATOL,
|
|
258
|
-
rtol=RTOL,
|
|
259
|
-
err_msg=f"{name} does not match statsmodels",
|
|
260
|
-
)
|
|
261
|
-
else:
|
|
262
|
-
# Need atol = 1e-7 for smoother tests to pass
|
|
263
|
-
assert_allclose(
|
|
264
|
-
val_to_test,
|
|
265
|
-
ref_val,
|
|
266
|
-
atol=ATOL,
|
|
267
|
-
rtol=RTOL,
|
|
268
|
-
err_msg=f"{name} does not match statsmodels",
|
|
269
|
-
)
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
@pytest.mark.parametrize(
|
|
273
|
-
"filter_func, filter_name", zip(filter_funcs[:-1], filter_names[:-1]), ids=filter_names[:-1]
|
|
274
|
-
)
|
|
275
|
-
@pytest.mark.parametrize("n_missing", [0, 5], ids=["n_missing=0", "n_missing=5"])
|
|
276
|
-
@pytest.mark.parametrize("obs_noise", [True, False])
|
|
277
|
-
def test_all_covariance_matrices_are_PSD(filter_func, filter_name, n_missing, obs_noise, rng):
|
|
278
|
-
if (floatX == "float32") & (filter_name == "UnivariateFilter"):
|
|
279
|
-
# TODO: These tests all pass locally for me with float32 but they fail on the CI, so i'm just disabling them.
|
|
280
|
-
pytest.skip("Univariate filter not stable at half precision without measurement error")
|
|
281
|
-
|
|
282
|
-
fit_sm_mod, [data, a0, P0, c, d, T, Z, R, H, Q] = nile_test_test_helper(rng, n_missing)
|
|
283
|
-
if filter_name == "CholeskyFilter":
|
|
284
|
-
P0 = np.linalg.cholesky(P0)
|
|
285
|
-
|
|
286
|
-
H *= int(obs_noise)
|
|
287
|
-
inputs = [data, a0, P0, c, d, T, Z, R, H, Q]
|
|
288
|
-
outputs = filter_func(*inputs)
|
|
289
|
-
|
|
290
|
-
for output_idx, name in zip([3, 4, 5], output_names[3:-2]):
|
|
291
|
-
cov_stack = outputs[output_idx]
|
|
292
|
-
w, v = np.linalg.eig(cov_stack)
|
|
293
|
-
|
|
294
|
-
assert_array_less(0, w, err_msg=f"Smallest eigenvalue of {name}: {min(w.ravel())}")
|
|
295
|
-
assert_allclose(
|
|
296
|
-
cov_stack,
|
|
297
|
-
np.swapaxes(cov_stack, -2, -1),
|
|
298
|
-
rtol=RTOL,
|
|
299
|
-
atol=ATOL,
|
|
300
|
-
err_msg=f"{name} is not symmetrical",
|
|
301
|
-
)
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
@pytest.mark.parametrize(
|
|
305
|
-
"filter",
|
|
306
|
-
[StandardFilter, SquareRootFilter],
|
|
307
|
-
ids=["standard", "cholesky"],
|
|
308
|
-
)
|
|
309
|
-
def test_kalman_filter_jax(filter):
|
|
310
|
-
pytest.importorskip("jax")
|
|
311
|
-
from pymc.sampling.jax import get_jaxified_graph
|
|
312
|
-
|
|
313
|
-
# TODO: Add UnivariateFilter to test; need to figure out the broadcasting issue when 2nd data dim is defined
|
|
314
|
-
|
|
315
|
-
p, m, r, n = 1, 5, 1, 10
|
|
316
|
-
inputs, outputs = initialize_filter(filter(), mode="JAX", p=p, m=m, r=r, n=n)
|
|
317
|
-
inputs_np = make_test_inputs(p, m, r, n, rng)
|
|
318
|
-
|
|
319
|
-
f_jax = get_jaxified_graph(inputs, outputs)
|
|
320
|
-
f_pt = pytensor.function(inputs, outputs, mode="FAST_COMPILE")
|
|
321
|
-
|
|
322
|
-
jax_outputs = f_jax(*inputs_np)
|
|
323
|
-
pt_outputs = f_pt(*inputs_np)
|
|
324
|
-
|
|
325
|
-
for name, jax_res, pt_res in zip(output_names, jax_outputs, pt_outputs):
|
|
326
|
-
assert_allclose(jax_res, pt_res, atol=ATOL, rtol=RTOL, err_msg=f"{name} failed!")
|
|
@@ -1,175 +0,0 @@
|
|
|
1
|
-
import unittest
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
import pytensor
|
|
5
|
-
import pytensor.tensor as pt
|
|
6
|
-
|
|
7
|
-
from numpy.testing import assert_allclose
|
|
8
|
-
|
|
9
|
-
from pymc_extras.statespace.core.representation import PytensorRepresentation
|
|
10
|
-
from tests.statespace.utilities.shared_fixtures import TEST_SEED
|
|
11
|
-
from tests.statespace.utilities.test_helpers import fast_eval, make_test_inputs
|
|
12
|
-
|
|
13
|
-
floatX = pytensor.config.floatX
|
|
14
|
-
atol = 1e-12 if floatX == "float64" else 1e-6
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def unpack_ssm_dims(ssm):
|
|
18
|
-
p = ssm.k_endog
|
|
19
|
-
m = ssm.k_states
|
|
20
|
-
r = ssm.k_posdef
|
|
21
|
-
|
|
22
|
-
return p, m, r
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class BasicFunctionality(unittest.TestCase):
|
|
26
|
-
def setUp(self):
|
|
27
|
-
self.rng = np.random.default_rng(TEST_SEED)
|
|
28
|
-
|
|
29
|
-
def test_numpy_to_pytensor(self):
|
|
30
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
31
|
-
X = np.eye(5)
|
|
32
|
-
X_pt = ssm._numpy_to_pytensor("transition", X)
|
|
33
|
-
self.assertTrue(isinstance(X_pt, pt.TensorVariable))
|
|
34
|
-
assert_allclose(ssm["transition"].type.shape, X.shape)
|
|
35
|
-
|
|
36
|
-
assert ssm["transition"].name == "transition"
|
|
37
|
-
|
|
38
|
-
def test_default_shapes_full_rank(self):
|
|
39
|
-
ssm = PytensorRepresentation(k_endog=5, k_states=5, k_posdef=5)
|
|
40
|
-
p, m, r = unpack_ssm_dims(ssm)
|
|
41
|
-
|
|
42
|
-
assert_allclose(ssm["design"].type.shape, (p, m))
|
|
43
|
-
assert_allclose(ssm["transition"].type.shape, (m, m))
|
|
44
|
-
assert_allclose(ssm["selection"].type.shape, (m, r))
|
|
45
|
-
assert_allclose(ssm["state_cov"].type.shape, (r, r))
|
|
46
|
-
assert_allclose(ssm["obs_cov"].type.shape, (p, p))
|
|
47
|
-
|
|
48
|
-
def test_default_shapes_low_rank(self):
|
|
49
|
-
ssm = PytensorRepresentation(k_endog=5, k_states=5, k_posdef=2)
|
|
50
|
-
p, m, r = unpack_ssm_dims(ssm)
|
|
51
|
-
|
|
52
|
-
assert_allclose(ssm["design"].type.shape, (p, m))
|
|
53
|
-
assert_allclose(ssm["transition"].type.shape, (m, m))
|
|
54
|
-
assert_allclose(ssm["selection"].type.shape, (m, r))
|
|
55
|
-
assert_allclose(ssm["state_cov"].type.shape, (r, r))
|
|
56
|
-
assert_allclose(ssm["obs_cov"].type.shape, (p, p))
|
|
57
|
-
|
|
58
|
-
def test_matrix_assignment(self):
|
|
59
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=2)
|
|
60
|
-
|
|
61
|
-
ssm["design", 0, 0] = 3.0
|
|
62
|
-
ssm["transition", 0, :] = 2.7
|
|
63
|
-
ssm["selection", -1, -1] = 9.9
|
|
64
|
-
|
|
65
|
-
assert_allclose(fast_eval(ssm["design"][0, 0]), 3.0, atol=atol)
|
|
66
|
-
assert_allclose(fast_eval(ssm["transition"][0, :]), 2.7, atol=atol)
|
|
67
|
-
assert_allclose(fast_eval(ssm["selection"][-1, -1]), 9.9, atol=atol)
|
|
68
|
-
|
|
69
|
-
assert ssm["design"].name == "design"
|
|
70
|
-
assert ssm["transition"].name == "transition"
|
|
71
|
-
assert ssm["selection"].name == "selection"
|
|
72
|
-
|
|
73
|
-
def test_build_representation_from_data(self):
|
|
74
|
-
p, m, r, n = 3, 6, 1, 10
|
|
75
|
-
inputs = [data, a0, P0, c, d, T, Z, R, H, Q] = make_test_inputs(
|
|
76
|
-
p, m, r, n, self.rng, missing_data=0
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
ssm = PytensorRepresentation(
|
|
80
|
-
k_endog=p,
|
|
81
|
-
k_states=m,
|
|
82
|
-
k_posdef=r,
|
|
83
|
-
design=Z,
|
|
84
|
-
transition=T,
|
|
85
|
-
selection=R,
|
|
86
|
-
state_cov=Q,
|
|
87
|
-
obs_cov=H,
|
|
88
|
-
initial_state=a0,
|
|
89
|
-
initial_state_cov=P0,
|
|
90
|
-
state_intercept=c,
|
|
91
|
-
obs_intercept=d,
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
names = [
|
|
95
|
-
"initial_state",
|
|
96
|
-
"initial_state_cov",
|
|
97
|
-
"state_intercept",
|
|
98
|
-
"obs_intercept",
|
|
99
|
-
"transition",
|
|
100
|
-
"design",
|
|
101
|
-
"selection",
|
|
102
|
-
"obs_cov",
|
|
103
|
-
"state_cov",
|
|
104
|
-
]
|
|
105
|
-
|
|
106
|
-
for name, X in zip(names, inputs[1:]):
|
|
107
|
-
assert_allclose(X, fast_eval(ssm[name]), err_msg=name)
|
|
108
|
-
|
|
109
|
-
for name, X in zip(names, inputs[1:]):
|
|
110
|
-
assert ssm[name].name == name
|
|
111
|
-
assert_allclose(ssm[name].type.shape, X.shape, err_msg=f"{name} shape test")
|
|
112
|
-
|
|
113
|
-
def test_assign_time_varying_matrices(self):
|
|
114
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=2)
|
|
115
|
-
n = 10
|
|
116
|
-
|
|
117
|
-
ssm["design", 0, 0] = 3.0
|
|
118
|
-
ssm["transition", 0, :] = 2.7
|
|
119
|
-
ssm["selection", -1, -1] = 9.9
|
|
120
|
-
|
|
121
|
-
ssm["state_intercept"] = np.zeros((n, 5))
|
|
122
|
-
ssm["state_intercept", :, 0] = np.arange(n)
|
|
123
|
-
|
|
124
|
-
assert_allclose(fast_eval(ssm["design"][0, 0]), 3.0, atol=atol)
|
|
125
|
-
assert_allclose(fast_eval(ssm["transition"][0, :]), 2.7, atol=atol)
|
|
126
|
-
assert_allclose(fast_eval(ssm["selection"][-1, -1]), 9.9, atol=atol)
|
|
127
|
-
assert_allclose(fast_eval(ssm["state_intercept"][:, 0]), np.arange(n), atol=atol)
|
|
128
|
-
|
|
129
|
-
def test_invalid_key_name_raises(self):
|
|
130
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
131
|
-
with self.assertRaises(IndexError) as e:
|
|
132
|
-
X = ssm["invalid_key"]
|
|
133
|
-
msg = str(e.exception)
|
|
134
|
-
self.assertEqual(msg, "invalid_key is an invalid state space matrix name")
|
|
135
|
-
|
|
136
|
-
def test_non_string_key_raises(self):
|
|
137
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
138
|
-
with self.assertRaises(IndexError) as e:
|
|
139
|
-
X = ssm[0]
|
|
140
|
-
msg = str(e.exception)
|
|
141
|
-
self.assertEqual(msg, "First index must the name of a valid state space matrix.")
|
|
142
|
-
|
|
143
|
-
def test_invalid_key_tuple_raises(self):
|
|
144
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
145
|
-
with self.assertRaises(IndexError) as e:
|
|
146
|
-
X = ssm[0, 1, 1]
|
|
147
|
-
msg = str(e.exception)
|
|
148
|
-
self.assertEqual(msg, "First index must the name of a valid state space matrix.")
|
|
149
|
-
|
|
150
|
-
def test_slice_statespace_matrix(self):
|
|
151
|
-
T = np.eye(5)
|
|
152
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1, transition=T)
|
|
153
|
-
T_out = ssm["transition", :3, :]
|
|
154
|
-
assert_allclose(T[:3], fast_eval(T_out))
|
|
155
|
-
|
|
156
|
-
def test_update_matrix_via_key(self):
|
|
157
|
-
T = np.eye(5)
|
|
158
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
159
|
-
ssm["transition"] = T
|
|
160
|
-
|
|
161
|
-
assert_allclose(T, fast_eval(ssm["transition"]))
|
|
162
|
-
|
|
163
|
-
def test_update_matrix_with_invalid_shape_raises(self):
|
|
164
|
-
T = np.eye(10)
|
|
165
|
-
ssm = PytensorRepresentation(k_endog=3, k_states=5, k_posdef=1)
|
|
166
|
-
with self.assertRaises(ValueError) as e:
|
|
167
|
-
ssm["transition"] = T
|
|
168
|
-
msg = str(e.exception)
|
|
169
|
-
self.assertEqual(
|
|
170
|
-
msg, "The last two dimensions of transition must be (5, 5), found (10, 10)"
|
|
171
|
-
)
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
if __name__ == "__main__":
|
|
175
|
-
unittest.main()
|