pymc-extras 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pymc_extras/__init__.py +5 -1
- pymc_extras/deserialize.py +224 -0
- pymc_extras/distributions/continuous.py +3 -2
- pymc_extras/distributions/discrete.py +3 -1
- pymc_extras/inference/find_map.py +62 -17
- pymc_extras/inference/laplace.py +10 -7
- pymc_extras/prior.py +1356 -0
- pymc_extras/statespace/core/statespace.py +191 -52
- pymc_extras/statespace/filters/distributions.py +15 -16
- pymc_extras/statespace/filters/kalman_filter.py +1 -18
- pymc_extras/statespace/filters/kalman_smoother.py +2 -6
- pymc_extras/statespace/models/ETS.py +10 -0
- pymc_extras/statespace/models/SARIMAX.py +26 -5
- pymc_extras/statespace/models/VARMAX.py +12 -2
- pymc_extras/statespace/models/structural.py +18 -5
- pymc_extras-0.2.7.dist-info/METADATA +321 -0
- pymc_extras-0.2.7.dist-info/RECORD +66 -0
- {pymc_extras-0.2.5.dist-info → pymc_extras-0.2.7.dist-info}/WHEEL +1 -2
- pymc_extras/utils/pivoted_cholesky.py +0 -69
- pymc_extras/version.py +0 -11
- pymc_extras/version.txt +0 -1
- pymc_extras-0.2.5.dist-info/METADATA +0 -112
- pymc_extras-0.2.5.dist-info/RECORD +0 -108
- pymc_extras-0.2.5.dist-info/top_level.txt +0 -2
- tests/__init__.py +0 -13
- tests/distributions/__init__.py +0 -19
- tests/distributions/test_continuous.py +0 -185
- tests/distributions/test_discrete.py +0 -210
- tests/distributions/test_discrete_markov_chain.py +0 -258
- tests/distributions/test_multivariate.py +0 -304
- tests/distributions/test_transform.py +0 -77
- tests/model/__init__.py +0 -0
- tests/model/marginal/__init__.py +0 -0
- tests/model/marginal/test_distributions.py +0 -132
- tests/model/marginal/test_graph_analysis.py +0 -182
- tests/model/marginal/test_marginal_model.py +0 -967
- tests/model/test_model_api.py +0 -38
- tests/statespace/__init__.py +0 -0
- tests/statespace/test_ETS.py +0 -411
- tests/statespace/test_SARIMAX.py +0 -405
- tests/statespace/test_VARMAX.py +0 -184
- tests/statespace/test_coord_assignment.py +0 -181
- tests/statespace/test_distributions.py +0 -270
- tests/statespace/test_kalman_filter.py +0 -326
- tests/statespace/test_representation.py +0 -175
- tests/statespace/test_statespace.py +0 -872
- tests/statespace/test_statespace_JAX.py +0 -156
- tests/statespace/test_structural.py +0 -836
- tests/statespace/utilities/__init__.py +0 -0
- tests/statespace/utilities/shared_fixtures.py +0 -9
- tests/statespace/utilities/statsmodel_local_level.py +0 -42
- tests/statespace/utilities/test_helpers.py +0 -310
- tests/test_blackjax_smc.py +0 -222
- tests/test_find_map.py +0 -103
- tests/test_histogram_approximation.py +0 -109
- tests/test_laplace.py +0 -281
- tests/test_linearmodel.py +0 -208
- tests/test_model_builder.py +0 -306
- tests/test_pathfinder.py +0 -297
- tests/test_pivoted_cholesky.py +0 -24
- tests/test_printing.py +0 -98
- tests/test_prior_from_trace.py +0 -172
- tests/test_splines.py +0 -77
- tests/utils.py +0 -0
- {pymc_extras-0.2.5.dist-info → pymc_extras-0.2.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,258 +0,0 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import pymc as pm
|
|
3
|
-
|
|
4
|
-
# general imports
|
|
5
|
-
import pytensor.tensor as pt
|
|
6
|
-
import pytest
|
|
7
|
-
|
|
8
|
-
from pymc.distributions import Categorical
|
|
9
|
-
from pymc.distributions.shape_utils import change_dist_size
|
|
10
|
-
from pymc.logprob.utils import ParameterValueError
|
|
11
|
-
from pymc.sampling.mcmc import assign_step_methods
|
|
12
|
-
|
|
13
|
-
from pymc_extras.distributions.timeseries import (
|
|
14
|
-
DiscreteMarkovChain,
|
|
15
|
-
DiscreteMarkovChainGibbsMetropolis,
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def transition_probability_tests(steps, n_states, n_lags, n_draws, atol):
|
|
20
|
-
P = np.full((n_states,) * (n_lags + 1), 1 / n_states)
|
|
21
|
-
x0 = pm.Categorical.dist(p=np.ones(n_states) / n_states)
|
|
22
|
-
|
|
23
|
-
chain = DiscreteMarkovChain.dist(
|
|
24
|
-
P=pt.as_tensor_variable(P), init_dist=x0, steps=steps, n_lags=n_lags
|
|
25
|
-
)
|
|
26
|
-
|
|
27
|
-
draws = pm.draw(chain, n_draws, random_seed=172)
|
|
28
|
-
|
|
29
|
-
# Test x0 is uniform over n_states
|
|
30
|
-
for i in range(n_lags):
|
|
31
|
-
assert np.allclose(
|
|
32
|
-
np.histogram(draws[:, ..., i], bins=n_states)[0] / n_draws, 1 / n_states, atol=atol
|
|
33
|
-
)
|
|
34
|
-
|
|
35
|
-
n_grams = [[tuple(row[i : i + n_lags + 1]) for i in range(len(row) - n_lags)] for row in draws]
|
|
36
|
-
freq_table = np.zeros((n_states,) * (n_lags + 1))
|
|
37
|
-
|
|
38
|
-
for row in n_grams:
|
|
39
|
-
for ngram in row:
|
|
40
|
-
freq_table[ngram] += 1
|
|
41
|
-
freq_table /= freq_table.sum(axis=-1)[:, None]
|
|
42
|
-
|
|
43
|
-
# Test continuation probabilities match P
|
|
44
|
-
assert np.allclose(P, freq_table, atol=atol)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class TestDiscreteMarkovRV:
|
|
48
|
-
def test_fail_if_P_not_square(self):
|
|
49
|
-
P = pt.eye(3, 2)
|
|
50
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
51
|
-
|
|
52
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=3)
|
|
53
|
-
with pytest.raises(ParameterValueError):
|
|
54
|
-
pm.logp(chain, np.zeros((3,))).eval()
|
|
55
|
-
|
|
56
|
-
def test_fail_if_P_not_valid(self):
|
|
57
|
-
P = pt.zeros((3, 3))
|
|
58
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
59
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=3)
|
|
60
|
-
with pytest.raises(ParameterValueError):
|
|
61
|
-
pm.logp(chain, np.zeros((3,))).eval()
|
|
62
|
-
|
|
63
|
-
def test_high_dimensional_P(self):
|
|
64
|
-
P = pm.Dirichlet.dist(a=pt.ones(3), size=(3, 3, 3))
|
|
65
|
-
n_lags = 3
|
|
66
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
67
|
-
chain = DiscreteMarkovChain.dist(P=P, steps=10, init_dist=x0, n_lags=n_lags)
|
|
68
|
-
draws = pm.draw(chain, 10)
|
|
69
|
-
logp = pm.logp(chain, draws)
|
|
70
|
-
|
|
71
|
-
def test_default_init_dist_warns_user(self):
|
|
72
|
-
P = pt.as_tensor_variable(np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]]))
|
|
73
|
-
|
|
74
|
-
with pytest.warns(UserWarning):
|
|
75
|
-
DiscreteMarkovChain.dist(P=P, steps=3)
|
|
76
|
-
|
|
77
|
-
def test_logp_shape(self):
|
|
78
|
-
P = pt.as_tensor_variable(np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]]))
|
|
79
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
80
|
-
|
|
81
|
-
# Test with steps
|
|
82
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=3)
|
|
83
|
-
draws = pm.draw(chain, 5)
|
|
84
|
-
logp = pm.logp(chain, draws).eval()
|
|
85
|
-
|
|
86
|
-
assert logp.shape == (5,)
|
|
87
|
-
|
|
88
|
-
# Test with shape
|
|
89
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, shape=(3,))
|
|
90
|
-
draws = pm.draw(chain, 5)
|
|
91
|
-
logp = pm.logp(chain, draws).eval()
|
|
92
|
-
|
|
93
|
-
assert logp.shape == (5,)
|
|
94
|
-
|
|
95
|
-
def test_logp_with_default_init_dist(self):
|
|
96
|
-
P = pt.as_tensor_variable(np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]]))
|
|
97
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
98
|
-
|
|
99
|
-
value = np.array([0, 1, 2])
|
|
100
|
-
logp_expected = np.log((1 / 3) * 0.5 * 0.3)
|
|
101
|
-
|
|
102
|
-
# Test dist directly
|
|
103
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=3)
|
|
104
|
-
logp_eval = pm.logp(chain, value).eval()
|
|
105
|
-
np.testing.assert_allclose(logp_eval, logp_expected, rtol=1e-6)
|
|
106
|
-
|
|
107
|
-
# Test via Model
|
|
108
|
-
with pm.Model() as m:
|
|
109
|
-
DiscreteMarkovChain("chain", P=P, init_dist=x0, steps=3)
|
|
110
|
-
model_logp_eval = m.compile_logp()({"chain": value})
|
|
111
|
-
np.testing.assert_allclose(model_logp_eval, logp_expected, rtol=1e-6)
|
|
112
|
-
|
|
113
|
-
def test_logp_with_user_defined_init_dist(self):
|
|
114
|
-
P = pt.as_tensor_variable(np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]]))
|
|
115
|
-
x0 = pm.Categorical.dist(p=[0.2, 0.6, 0.2])
|
|
116
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=3)
|
|
117
|
-
|
|
118
|
-
logp = pm.logp(chain, [0, 1, 2]).eval()
|
|
119
|
-
assert logp == np.log(0.2 * 0.5 * 0.3)
|
|
120
|
-
|
|
121
|
-
def test_moment_function(self):
|
|
122
|
-
P_np = np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]])
|
|
123
|
-
|
|
124
|
-
x0_np = np.array([0, 1, 0])
|
|
125
|
-
|
|
126
|
-
P = pt.as_tensor_variable(P_np)
|
|
127
|
-
x0 = pm.Categorical.dist(p=x0_np.tolist())
|
|
128
|
-
n_steps = 3
|
|
129
|
-
|
|
130
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, steps=n_steps)
|
|
131
|
-
|
|
132
|
-
chain_np = np.empty(shape=n_steps + 1, dtype="int8")
|
|
133
|
-
chain_np[0] = np.argmax(x0_np)
|
|
134
|
-
for i in range(n_steps):
|
|
135
|
-
state = chain_np[i]
|
|
136
|
-
chain_np[i + 1] = np.argmax(P_np[state])
|
|
137
|
-
|
|
138
|
-
dmc_chain = pm.distributions.distribution.support_point(chain).eval()
|
|
139
|
-
|
|
140
|
-
assert np.allclose(dmc_chain, chain_np)
|
|
141
|
-
|
|
142
|
-
def test_define_steps_via_shape_arg(self):
|
|
143
|
-
P = pt.full((3, 3), 1 / 3)
|
|
144
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
145
|
-
|
|
146
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, shape=(3,))
|
|
147
|
-
assert chain.eval().shape == (3,)
|
|
148
|
-
|
|
149
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, shape=(3, 2))
|
|
150
|
-
assert chain.eval().shape == (3, 2)
|
|
151
|
-
|
|
152
|
-
def test_define_steps_via_dim_arg(self):
|
|
153
|
-
coords = {"steps": [1, 2, 3]}
|
|
154
|
-
|
|
155
|
-
with pm.Model(coords=coords):
|
|
156
|
-
P = pt.full((3, 3), 1 / 3)
|
|
157
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
158
|
-
|
|
159
|
-
chain = DiscreteMarkovChain("chain", P=P, init_dist=x0, dims=["steps"])
|
|
160
|
-
|
|
161
|
-
assert chain.eval().shape == (3,)
|
|
162
|
-
|
|
163
|
-
def test_dims_when_steps_are_defined(self):
|
|
164
|
-
coords = {"steps": [1, 2, 3, 4]}
|
|
165
|
-
|
|
166
|
-
with pm.Model(coords=coords):
|
|
167
|
-
P = pt.full((3, 3), 1 / 3)
|
|
168
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
169
|
-
|
|
170
|
-
chain = DiscreteMarkovChain("chain", P=P, steps=3, init_dist=x0, dims=["steps"])
|
|
171
|
-
|
|
172
|
-
assert chain.eval().shape == (4,)
|
|
173
|
-
|
|
174
|
-
def test_multiple_dims_with_steps(self):
|
|
175
|
-
coords = {"steps": [1, 2, 3], "mc_chains": [1, 2, 3]}
|
|
176
|
-
|
|
177
|
-
with pm.Model(coords=coords):
|
|
178
|
-
P = pt.full((3, 3), 1 / 3)
|
|
179
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
180
|
-
|
|
181
|
-
chain = DiscreteMarkovChain(
|
|
182
|
-
"chain", P=P, steps=2, init_dist=x0, dims=["steps", "mc_chains"]
|
|
183
|
-
)
|
|
184
|
-
|
|
185
|
-
assert chain.eval().shape == (3, 3)
|
|
186
|
-
|
|
187
|
-
def test_mutiple_dims_with_steps_and_init_dist(self):
|
|
188
|
-
coords = {"steps": [1, 2, 3], "mc_chains": [1, 2, 3]}
|
|
189
|
-
|
|
190
|
-
with pm.Model(coords=coords):
|
|
191
|
-
P = pt.full((3, 3), 1 / 3)
|
|
192
|
-
x0 = pm.Categorical.dist(p=[0.1, 0.1, 0.8], size=(3,))
|
|
193
|
-
chain = DiscreteMarkovChain(
|
|
194
|
-
"chain", P=P, init_dist=x0, steps=2, dims=["steps", "mc_chains"]
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
assert chain.eval().shape == (3, 3)
|
|
198
|
-
|
|
199
|
-
def test_multiple_lags_with_data(self):
|
|
200
|
-
with pm.Model():
|
|
201
|
-
P = pt.full((3, 3, 3), 1 / 3)
|
|
202
|
-
x0 = pm.Categorical.dist(p=[0.1, 0.1, 0.8], size=2)
|
|
203
|
-
data = pm.draw(x0, 100)
|
|
204
|
-
|
|
205
|
-
chain = DiscreteMarkovChain("chain", P=P, init_dist=x0, n_lags=2, observed=data)
|
|
206
|
-
|
|
207
|
-
assert chain.eval().shape == (100, 2)
|
|
208
|
-
|
|
209
|
-
def test_random_draws(self):
|
|
210
|
-
transition_probability_tests(steps=3, n_states=2, n_lags=1, n_draws=2500, atol=0.05)
|
|
211
|
-
transition_probability_tests(steps=3, n_states=2, n_lags=3, n_draws=7500, atol=0.05)
|
|
212
|
-
|
|
213
|
-
def test_change_size_univariate(self):
|
|
214
|
-
P = pt.as_tensor_variable(np.array([[0.1, 0.5, 0.4], [0.3, 0.4, 0.3], [0.9, 0.05, 0.05]]))
|
|
215
|
-
x0 = pm.Categorical.dist(p=np.ones(3) / 3)
|
|
216
|
-
|
|
217
|
-
chain = DiscreteMarkovChain.dist(P=P, init_dist=x0, shape=(100, 5))
|
|
218
|
-
|
|
219
|
-
new_rw = change_dist_size(chain, new_size=(7,))
|
|
220
|
-
assert tuple(new_rw.shape.eval()) == (7, 5)
|
|
221
|
-
|
|
222
|
-
new_rw = change_dist_size(chain, new_size=(4, 3), expand=True)
|
|
223
|
-
assert tuple(new_rw.shape.eval()) == (4, 3, 100, 5)
|
|
224
|
-
|
|
225
|
-
def test_mcmc_sampling(self):
|
|
226
|
-
with pm.Model(coords={"step": range(100)}) as model:
|
|
227
|
-
init_dist = Categorical.dist(p=[0.5, 0.5])
|
|
228
|
-
markov_chain = DiscreteMarkovChain(
|
|
229
|
-
"markov_chain",
|
|
230
|
-
P=[[0.1, 0.9], [0.1, 0.9]],
|
|
231
|
-
init_dist=init_dist,
|
|
232
|
-
shape=(100,),
|
|
233
|
-
dims="step",
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
_, assigned_step_methods = assign_step_methods(model)
|
|
237
|
-
assert assigned_step_methods[DiscreteMarkovChainGibbsMetropolis] == [
|
|
238
|
-
model.rvs_to_values[markov_chain]
|
|
239
|
-
]
|
|
240
|
-
|
|
241
|
-
# Sampler needs no tuning
|
|
242
|
-
idata = pm.sample(
|
|
243
|
-
tune=0, chains=4, draws=250, progressbar=False, compute_convergence_checks=False
|
|
244
|
-
)
|
|
245
|
-
|
|
246
|
-
np.testing.assert_allclose(
|
|
247
|
-
idata.posterior["markov_chain"].isel(step=0).mean(("chain", "draw")),
|
|
248
|
-
0.5,
|
|
249
|
-
atol=0.05,
|
|
250
|
-
)
|
|
251
|
-
|
|
252
|
-
np.testing.assert_allclose(
|
|
253
|
-
idata.posterior["markov_chain"].isel(step=slice(1, None)).mean(("chain", "draw")),
|
|
254
|
-
0.9,
|
|
255
|
-
atol=0.05,
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
assert pm.stats.ess(idata, method="tail").min() > 950
|
|
@@ -1,304 +0,0 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import pymc as pm
|
|
3
|
-
import pytensor
|
|
4
|
-
import pytest
|
|
5
|
-
|
|
6
|
-
import pymc_extras as pmx
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class TestR2D2M2CP:
|
|
10
|
-
@pytest.fixture(autouse=True)
|
|
11
|
-
def fast_compile(self):
|
|
12
|
-
with pytensor.config.change_flags(mode="FAST_COMPILE", exception_verbosity="high"):
|
|
13
|
-
yield
|
|
14
|
-
|
|
15
|
-
@pytest.fixture(autouse=True)
|
|
16
|
-
def model(self):
|
|
17
|
-
# every method is within a model
|
|
18
|
-
with pm.Model() as model:
|
|
19
|
-
yield model
|
|
20
|
-
|
|
21
|
-
@pytest.fixture(params=[True, False], ids=["centered", "non-centered"])
|
|
22
|
-
def centered(self, request):
|
|
23
|
-
return request.param
|
|
24
|
-
|
|
25
|
-
@pytest.fixture(params=[["a"], ["a", "b"], ["one"]])
|
|
26
|
-
def dims(self, model: pm.Model, request):
|
|
27
|
-
for i, c in enumerate(request.param):
|
|
28
|
-
if c == "one":
|
|
29
|
-
model.add_coord(c, range(1))
|
|
30
|
-
else:
|
|
31
|
-
model.add_coord(c, range((i + 2) ** 2))
|
|
32
|
-
return request.param
|
|
33
|
-
|
|
34
|
-
@pytest.fixture
|
|
35
|
-
def input_shape(self, dims, model):
|
|
36
|
-
return [int(model.dim_lengths[d].eval()) for d in dims]
|
|
37
|
-
|
|
38
|
-
@pytest.fixture
|
|
39
|
-
def output_shape(self, dims, model):
|
|
40
|
-
*hierarchy, _ = dims
|
|
41
|
-
return [int(model.dim_lengths[d].eval()) for d in hierarchy]
|
|
42
|
-
|
|
43
|
-
@pytest.fixture
|
|
44
|
-
def input_std(self, input_shape):
|
|
45
|
-
return np.ones(input_shape)
|
|
46
|
-
|
|
47
|
-
@pytest.fixture
|
|
48
|
-
def output_std(self, output_shape):
|
|
49
|
-
return np.ones(output_shape)
|
|
50
|
-
|
|
51
|
-
@pytest.fixture
|
|
52
|
-
def r2(self):
|
|
53
|
-
return 0.8
|
|
54
|
-
|
|
55
|
-
@pytest.fixture(params=[None, 0.1], ids=["r2-std", "no-r2-std"])
|
|
56
|
-
def r2_std(self, request):
|
|
57
|
-
return request.param
|
|
58
|
-
|
|
59
|
-
@pytest.fixture(params=["true", "false", "limit-1", "limit-0", "limit-all"])
|
|
60
|
-
def positive_probs(self, input_std, request):
|
|
61
|
-
if request.param == "true":
|
|
62
|
-
return np.full_like(input_std, 0.5)
|
|
63
|
-
elif request.param == "false":
|
|
64
|
-
return 0.5
|
|
65
|
-
elif request.param == "limit-1":
|
|
66
|
-
ret = np.full_like(input_std, 0.5)
|
|
67
|
-
ret[..., 0] = 1
|
|
68
|
-
return ret
|
|
69
|
-
elif request.param == "limit-0":
|
|
70
|
-
ret = np.full_like(input_std, 0.5)
|
|
71
|
-
ret[..., 0] = 0
|
|
72
|
-
return ret
|
|
73
|
-
elif request.param == "limit-all":
|
|
74
|
-
return np.full_like(input_std, 0)
|
|
75
|
-
|
|
76
|
-
@pytest.fixture(params=[True, False], ids=["probs-std", "no-probs-std"])
|
|
77
|
-
def positive_probs_std(self, positive_probs, request):
|
|
78
|
-
if request.param:
|
|
79
|
-
std = np.full_like(positive_probs, 0.1)
|
|
80
|
-
std[positive_probs == 0] = 0
|
|
81
|
-
std[positive_probs == 1] = 0
|
|
82
|
-
return std
|
|
83
|
-
else:
|
|
84
|
-
return None
|
|
85
|
-
|
|
86
|
-
@pytest.fixture(params=[None, "importance", "explained"])
|
|
87
|
-
def phi_args_base(self, request, input_shape):
|
|
88
|
-
if input_shape[-1] < 2 and request.param is not None:
|
|
89
|
-
pytest.skip("not compatible")
|
|
90
|
-
elif request.param is None:
|
|
91
|
-
return {}
|
|
92
|
-
elif request.param == "importance":
|
|
93
|
-
return {"variables_importance": np.full(input_shape, 2)}
|
|
94
|
-
else:
|
|
95
|
-
val = np.full(input_shape, 2)
|
|
96
|
-
return {"variance_explained": val / val.sum(-1, keepdims=True)}
|
|
97
|
-
|
|
98
|
-
@pytest.fixture(params=["concentration", "no-concentration"])
|
|
99
|
-
def phi_args(self, request, phi_args_base):
|
|
100
|
-
if request.param == "concentration":
|
|
101
|
-
phi_args_base["importance_concentration"] = 10
|
|
102
|
-
return phi_args_base
|
|
103
|
-
|
|
104
|
-
def test_init_r2(
|
|
105
|
-
self,
|
|
106
|
-
dims,
|
|
107
|
-
input_std,
|
|
108
|
-
output_std,
|
|
109
|
-
r2,
|
|
110
|
-
r2_std,
|
|
111
|
-
model: pm.Model,
|
|
112
|
-
):
|
|
113
|
-
eps, beta = pmx.distributions.R2D2M2CP(
|
|
114
|
-
"beta",
|
|
115
|
-
output_std,
|
|
116
|
-
input_std,
|
|
117
|
-
dims=dims,
|
|
118
|
-
r2=r2,
|
|
119
|
-
r2_std=r2_std,
|
|
120
|
-
)
|
|
121
|
-
assert not np.isnan(beta.eval()).any()
|
|
122
|
-
assert eps.eval().shape == output_std.shape
|
|
123
|
-
assert beta.eval().shape == input_std.shape
|
|
124
|
-
# r2 rv is only created if r2 std is not None
|
|
125
|
-
assert "beta" in model.named_vars
|
|
126
|
-
assert ("beta::r2" in model.named_vars) == (r2_std is not None), set(model.named_vars)
|
|
127
|
-
# phi is only created if variable importance is not None and there is more than one var
|
|
128
|
-
assert np.isfinite(model.compile_logp()(model.initial_point()))
|
|
129
|
-
|
|
130
|
-
def test_init_importance(
|
|
131
|
-
self,
|
|
132
|
-
dims,
|
|
133
|
-
centered,
|
|
134
|
-
input_std,
|
|
135
|
-
output_std,
|
|
136
|
-
phi_args,
|
|
137
|
-
model: pm.Model,
|
|
138
|
-
):
|
|
139
|
-
eps, beta = pmx.distributions.R2D2M2CP(
|
|
140
|
-
"beta",
|
|
141
|
-
output_std,
|
|
142
|
-
input_std,
|
|
143
|
-
dims=dims,
|
|
144
|
-
r2=1,
|
|
145
|
-
centered=centered,
|
|
146
|
-
**phi_args,
|
|
147
|
-
)
|
|
148
|
-
assert not np.isnan(beta.eval()).any()
|
|
149
|
-
assert eps.eval().shape == output_std.shape
|
|
150
|
-
assert beta.eval().shape == input_std.shape
|
|
151
|
-
# r2 rv is only created if r2 std is not None
|
|
152
|
-
assert "beta" in model.named_vars
|
|
153
|
-
# phi is only created if variable importance is not None and there is more than one var
|
|
154
|
-
assert ("beta::phi" in model.named_vars) == (
|
|
155
|
-
"variables_importance" in phi_args or "importance_concentration" in phi_args
|
|
156
|
-
), set(model.named_vars)
|
|
157
|
-
assert np.isfinite(model.compile_logp()(model.initial_point()))
|
|
158
|
-
|
|
159
|
-
def test_init_positive_probs(
|
|
160
|
-
self,
|
|
161
|
-
dims,
|
|
162
|
-
centered,
|
|
163
|
-
input_std,
|
|
164
|
-
output_std,
|
|
165
|
-
positive_probs,
|
|
166
|
-
positive_probs_std,
|
|
167
|
-
model: pm.Model,
|
|
168
|
-
):
|
|
169
|
-
eps, beta = pmx.distributions.R2D2M2CP(
|
|
170
|
-
"beta",
|
|
171
|
-
output_std,
|
|
172
|
-
input_std,
|
|
173
|
-
dims=dims,
|
|
174
|
-
r2=1.0,
|
|
175
|
-
centered=centered,
|
|
176
|
-
positive_probs_std=positive_probs_std,
|
|
177
|
-
positive_probs=positive_probs,
|
|
178
|
-
)
|
|
179
|
-
assert not np.isnan(beta.eval()).any()
|
|
180
|
-
assert eps.eval().shape == output_std.shape
|
|
181
|
-
assert beta.eval().shape == input_std.shape
|
|
182
|
-
# r2 rv is only created if r2 std is not None
|
|
183
|
-
assert "beta" in model.named_vars
|
|
184
|
-
# phi is only created if variable importance is not None and there is more than one var
|
|
185
|
-
assert ("beta::psi" in model.named_vars) == (
|
|
186
|
-
positive_probs_std is not None and positive_probs_std.any()
|
|
187
|
-
), set(model.named_vars)
|
|
188
|
-
assert np.isfinite(sum(model.point_logps().values()))
|
|
189
|
-
|
|
190
|
-
def test_failing_importance(self, dims, input_shape, output_std, input_std):
|
|
191
|
-
if input_shape[-1] < 2:
|
|
192
|
-
with pytest.raises(TypeError, match="less than two variables"):
|
|
193
|
-
pmx.distributions.R2D2M2CP(
|
|
194
|
-
"beta",
|
|
195
|
-
output_std,
|
|
196
|
-
input_std,
|
|
197
|
-
dims=dims,
|
|
198
|
-
r2=0.8,
|
|
199
|
-
variables_importance=abs(input_std),
|
|
200
|
-
)
|
|
201
|
-
else:
|
|
202
|
-
pmx.distributions.R2D2M2CP(
|
|
203
|
-
"beta",
|
|
204
|
-
output_std,
|
|
205
|
-
input_std,
|
|
206
|
-
dims=dims,
|
|
207
|
-
r2=0.8,
|
|
208
|
-
variables_importance=abs(input_std),
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
def test_failing_variance_explained(self, dims, input_shape, output_std, input_std):
|
|
212
|
-
if input_shape[-1] < 2:
|
|
213
|
-
with pytest.raises(TypeError, match="less than two variables"):
|
|
214
|
-
pmx.distributions.R2D2M2CP(
|
|
215
|
-
"beta",
|
|
216
|
-
output_std,
|
|
217
|
-
input_std,
|
|
218
|
-
dims=dims,
|
|
219
|
-
r2=0.8,
|
|
220
|
-
variance_explained=abs(input_std),
|
|
221
|
-
)
|
|
222
|
-
else:
|
|
223
|
-
pmx.distributions.R2D2M2CP(
|
|
224
|
-
"beta", output_std, input_std, dims=dims, r2=0.8, variance_explained=abs(input_std)
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
def test_failing_mutual_exclusive(self, model: pm.Model):
|
|
228
|
-
with pytest.raises(TypeError, match="variable importance with variance explained"):
|
|
229
|
-
with model:
|
|
230
|
-
model.add_coord("a", range(2))
|
|
231
|
-
pmx.distributions.R2D2M2CP(
|
|
232
|
-
"beta",
|
|
233
|
-
1,
|
|
234
|
-
[1, 1],
|
|
235
|
-
dims="a",
|
|
236
|
-
r2=0.8,
|
|
237
|
-
variance_explained=[0.5, 0.5],
|
|
238
|
-
variables_importance=[1, 1],
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
def test_limit_case_requires_std_0(self, model: pm.Model):
|
|
242
|
-
model.add_coord("a", range(2))
|
|
243
|
-
with pytest.raises(ValueError, match="Can't have both positive_probs"):
|
|
244
|
-
pmx.distributions.R2D2M2CP(
|
|
245
|
-
"beta",
|
|
246
|
-
1,
|
|
247
|
-
[1, 1],
|
|
248
|
-
dims="a",
|
|
249
|
-
r2=0.8,
|
|
250
|
-
positive_probs=[0.5, 0],
|
|
251
|
-
positive_probs_std=[0.3, 0.1],
|
|
252
|
-
)
|
|
253
|
-
with pytest.raises(ValueError, match="Can't have both positive_probs"):
|
|
254
|
-
pmx.distributions.R2D2M2CP(
|
|
255
|
-
"beta",
|
|
256
|
-
1,
|
|
257
|
-
[1, 1],
|
|
258
|
-
dims="a",
|
|
259
|
-
r2=0.8,
|
|
260
|
-
positive_probs=[0.5, 1],
|
|
261
|
-
positive_probs_std=[0.3, 0.1],
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
def test_limit_case_creates_masked_vars(self, model: pm.Model, centered: bool):
|
|
265
|
-
model.add_coord("a", range(2))
|
|
266
|
-
pmx.distributions.R2D2M2CP(
|
|
267
|
-
"beta0",
|
|
268
|
-
1,
|
|
269
|
-
[1, 1],
|
|
270
|
-
dims="a",
|
|
271
|
-
r2=0.8,
|
|
272
|
-
positive_probs=[0.5, 1],
|
|
273
|
-
positive_probs_std=[0.3, 0],
|
|
274
|
-
centered=centered,
|
|
275
|
-
)
|
|
276
|
-
pmx.distributions.R2D2M2CP(
|
|
277
|
-
"beta1",
|
|
278
|
-
1,
|
|
279
|
-
[1, 1],
|
|
280
|
-
dims="a",
|
|
281
|
-
r2=0.8,
|
|
282
|
-
positive_probs=[0.5, 0],
|
|
283
|
-
positive_probs_std=[0.3, 0],
|
|
284
|
-
centered=centered,
|
|
285
|
-
)
|
|
286
|
-
if not centered:
|
|
287
|
-
assert "beta0::raw::masked" in model.named_vars, model.named_vars
|
|
288
|
-
assert "beta1::raw::masked" in model.named_vars, model.named_vars
|
|
289
|
-
else:
|
|
290
|
-
assert "beta0::masked" in model.named_vars, model.named_vars
|
|
291
|
-
assert "beta1::masked" in model.named_vars, model.named_vars
|
|
292
|
-
assert "beta1::psi::masked" in model.named_vars
|
|
293
|
-
assert "beta0::psi::masked" in model.named_vars
|
|
294
|
-
|
|
295
|
-
def test_zero_length_rvs_not_created(self, model: pm.Model):
|
|
296
|
-
model.add_coord("a", range(2))
|
|
297
|
-
# deterministic case which should not have any new variables
|
|
298
|
-
b = pmx.distributions.R2D2M2CP("b1", 1, [1, 1], r2=0.5, positive_probs=[1, 1], dims="a")
|
|
299
|
-
assert not model.free_RVs, model.free_RVs
|
|
300
|
-
|
|
301
|
-
b = pmx.distributions.R2D2M2CP(
|
|
302
|
-
"b2", 1, [1, 1], r2=0.5, positive_probs=[1, 1], positive_probs_std=[0, 0], dims="a"
|
|
303
|
-
)
|
|
304
|
-
assert not model.free_RVs, model.free_RVs
|
|
@@ -1,77 +0,0 @@
|
|
|
1
|
-
# Copyright 2025 The PyMC Developers
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
import numpy as np
|
|
15
|
-
import pymc as pm
|
|
16
|
-
|
|
17
|
-
from pymc_extras.distributions.transforms import PartialOrder
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class TestPartialOrder:
|
|
21
|
-
adj_mats = np.array(
|
|
22
|
-
[
|
|
23
|
-
# 0 < {1, 2} < 3
|
|
24
|
-
[[0, 1, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0]],
|
|
25
|
-
# 1 < 0 < 3 < 2
|
|
26
|
-
[[0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]],
|
|
27
|
-
]
|
|
28
|
-
)
|
|
29
|
-
|
|
30
|
-
valid_values = np.array([[0, 2, 1, 3], [1, 0, 3, 2]], dtype=float)
|
|
31
|
-
|
|
32
|
-
# Test that forward and backward are inverses of eachother
|
|
33
|
-
# And that it works when extra dimensions are added in data
|
|
34
|
-
def test_forward_backward_dimensionality(self):
|
|
35
|
-
po = PartialOrder(self.adj_mats)
|
|
36
|
-
po0 = PartialOrder(self.adj_mats[0])
|
|
37
|
-
vv = self.valid_values
|
|
38
|
-
vv0 = self.valid_values[0]
|
|
39
|
-
|
|
40
|
-
testsets = [
|
|
41
|
-
(vv, po),
|
|
42
|
-
(po.initvals(), po),
|
|
43
|
-
(vv0, po0),
|
|
44
|
-
(po0.initvals(), po0),
|
|
45
|
-
(np.tile(vv0, (2, 1)), po0),
|
|
46
|
-
(np.tile(vv0, (2, 3, 2, 1)), po0),
|
|
47
|
-
(np.tile(vv, (2, 3, 2, 1, 1)), po),
|
|
48
|
-
]
|
|
49
|
-
|
|
50
|
-
for vv, po in testsets:
|
|
51
|
-
fw = po.forward(vv)
|
|
52
|
-
bw = po.backward(fw)
|
|
53
|
-
np.testing.assert_allclose(bw.eval(), vv)
|
|
54
|
-
|
|
55
|
-
def test_sample_model(self):
|
|
56
|
-
po = PartialOrder(self.adj_mats)
|
|
57
|
-
with pm.Model() as model:
|
|
58
|
-
x = pm.Normal(
|
|
59
|
-
"x",
|
|
60
|
-
size=(3, 2, 4),
|
|
61
|
-
transform=po,
|
|
62
|
-
initval=po.initvals(shape=(3, 2, 4), lower=-1, upper=1),
|
|
63
|
-
)
|
|
64
|
-
idata = pm.sample()
|
|
65
|
-
|
|
66
|
-
# Check that the order constraints are satisfied
|
|
67
|
-
# Move chain, draw and "3" dimensions to the back
|
|
68
|
-
xvs = idata.posterior.x.values.transpose(3, 4, 0, 1, 2)
|
|
69
|
-
x0 = xvs[0] # 0 < {1, 2} < 3
|
|
70
|
-
assert (
|
|
71
|
-
(x0[0] < x0[1]).all()
|
|
72
|
-
and (x0[0] < x0[2]).all()
|
|
73
|
-
and (x0[1] < x0[3]).all()
|
|
74
|
-
and (x0[2] < x0[3]).all()
|
|
75
|
-
)
|
|
76
|
-
x1 = xvs[1] # 1 < 0 < 3 < 2
|
|
77
|
-
assert (x1[1] < x1[0]).all() and (x1[0] < x1[3]).all() and (x1[3] < x1[2]).all()
|
tests/model/__init__.py
DELETED
|
File without changes
|
tests/model/marginal/__init__.py
DELETED
|
File without changes
|