pysips 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pysips/__init__.py +53 -0
- pysips/crossover_proposal.py +138 -0
- pysips/laplace_nmll.py +104 -0
- pysips/metropolis.py +126 -0
- pysips/mutation_proposal.py +220 -0
- pysips/prior.py +106 -0
- pysips/random_choice_proposal.py +177 -0
- pysips/regressor.py +451 -0
- pysips/sampler.py +159 -0
- pysips-0.0.0.dist-info/METADATA +156 -0
- pysips-0.0.0.dist-info/RECORD +26 -0
- pysips-0.0.0.dist-info/WHEEL +5 -0
- pysips-0.0.0.dist-info/licenses/LICENSE +94 -0
- pysips-0.0.0.dist-info/top_level.txt +2 -0
- tests/integration/test_log_likelihood.py +18 -0
- tests/integration/test_prior_with_bingo.py +45 -0
- tests/regression/test_basic_end_to_end.py +131 -0
- tests/regression/test_regressor_end_to_end.py +95 -0
- tests/unit/test_crossover_proposal.py +156 -0
- tests/unit/test_laplace_nmll.py +111 -0
- tests/unit/test_metropolis.py +111 -0
- tests/unit/test_mutation_proposal.py +196 -0
- tests/unit/test_prior.py +135 -0
- tests/unit/test_random_choice_proposal.py +136 -0
- tests/unit/test_regressor.py +227 -0
- tests/unit/test_sampler.py +133 -0
tests/unit/test_prior.py
ADDED
@@ -0,0 +1,135 @@
|
|
1
|
+
import pytest
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
# Import the class to test
|
5
|
+
from pysips.prior import Prior, MAX_REPEATS
|
6
|
+
|
7
|
+
|
8
|
+
IMPORTMODULE = Prior.__module__
|
9
|
+
|
10
|
+
|
11
|
+
class TestPrior:
|
12
|
+
|
13
|
+
@pytest.fixture
|
14
|
+
def mock_generator(self, mocker):
|
15
|
+
"""Create a mock generator function"""
|
16
|
+
generator = mocker.MagicMock()
|
17
|
+
|
18
|
+
# Generate unique values when called
|
19
|
+
models = ["model1", "model2", "model3", "model4", "model5"]
|
20
|
+
generator.side_effect = models
|
21
|
+
|
22
|
+
return generator, models
|
23
|
+
|
24
|
+
def test_rvs_returns_correct_number_of_models(self, mock_generator, mocker):
|
25
|
+
"""Test rvs returns the requested number of models"""
|
26
|
+
generator, models = mock_generator
|
27
|
+
|
28
|
+
prior = Prior(generator)
|
29
|
+
|
30
|
+
# Request 3 models
|
31
|
+
result = prior.rvs(3)
|
32
|
+
|
33
|
+
# Check generator called enough times
|
34
|
+
assert generator.call_count == 3
|
35
|
+
|
36
|
+
# Check correct shape and content
|
37
|
+
assert result.shape == (3, 1)
|
38
|
+
print(result)
|
39
|
+
assert set(result.flatten()) == set(models[:3])
|
40
|
+
|
41
|
+
def test_rvs_handles_duplicates(self, mocker):
|
42
|
+
"""Test rvs correctly handles duplicate models from generator"""
|
43
|
+
# Create generator that sometimes returns duplicates
|
44
|
+
generator = mocker.MagicMock()
|
45
|
+
generator.side_effect = ["model1", "model2", "model2", "model3", "model4"]
|
46
|
+
|
47
|
+
prior = Prior(generator)
|
48
|
+
|
49
|
+
# Request 4 unique models (will need 5 calls due to duplicate)
|
50
|
+
result = prior.rvs(4)
|
51
|
+
|
52
|
+
# Check generator called enough times to get 4 unique models
|
53
|
+
assert generator.call_count == 5
|
54
|
+
|
55
|
+
# Check correct shape and content (should be 4 unique models)
|
56
|
+
assert result.shape == (4, 1)
|
57
|
+
assert set(result.flatten()) == {"model1", "model2", "model3", "model4"}
|
58
|
+
|
59
|
+
@pytest.mark.parametrize("N", [0, 1, 5, 10])
|
60
|
+
def test_rvs_with_various_sizes(self, mocker, N):
|
61
|
+
"""Test rvs with different values of N"""
|
62
|
+
# Create generator that returns unique models
|
63
|
+
counter = [0]
|
64
|
+
|
65
|
+
def gen_unique_models():
|
66
|
+
counter[0] += 1
|
67
|
+
return f"model_{counter[0]}"
|
68
|
+
|
69
|
+
generator = mocker.MagicMock(side_effect=gen_unique_models)
|
70
|
+
|
71
|
+
prior = Prior(generator)
|
72
|
+
|
73
|
+
# Request N models
|
74
|
+
result = prior.rvs(N)
|
75
|
+
|
76
|
+
# Check generator called correct number of times
|
77
|
+
assert generator.call_count == N
|
78
|
+
|
79
|
+
# Check result has correct shape
|
80
|
+
assert result.shape == (N, 1)
|
81
|
+
|
82
|
+
# Check all models are unique
|
83
|
+
if N > 0:
|
84
|
+
unique_models = set(result.flatten())
|
85
|
+
assert len(unique_models) == N
|
86
|
+
|
87
|
+
def test_warning_issued_for_excessive_repeats(self, mocker):
|
88
|
+
"""Test that a warning is issued when MAX_REPEATS consecutive duplicates occur"""
|
89
|
+
# Create generator that returns the same model repeatedly
|
90
|
+
generator = mocker.MagicMock()
|
91
|
+
|
92
|
+
# First returns "model1", then MAX_REPEATS duplicates, then "model2"
|
93
|
+
generator.side_effect = ["model1"] + ["model1"] * MAX_REPEATS + ["model2"]
|
94
|
+
|
95
|
+
prior = Prior(generator)
|
96
|
+
|
97
|
+
# Request 2 models, should get a warning due to MAX_REPEATS consecutive duplicates
|
98
|
+
with pytest.warns(
|
99
|
+
UserWarning, match=f"Generator called {MAX_REPEATS} times in a row"
|
100
|
+
):
|
101
|
+
result = prior.rvs(2)
|
102
|
+
|
103
|
+
# Should eventually generate 2 models
|
104
|
+
assert result.shape == (2, 1)
|
105
|
+
assert (
|
106
|
+
generator.call_count == MAX_REPEATS + 2
|
107
|
+
) # 1 unique + MAX_REPEATS duplicates + 1 more unique
|
108
|
+
|
109
|
+
# Verify we got the expected unique models
|
110
|
+
assert set(result.flatten()) == {"model1", "model2"}
|
111
|
+
|
112
|
+
def test_warning_issued_only_once(self, mocker):
|
113
|
+
"""Test that the warning is issued only once per rvs call"""
|
114
|
+
# Create generator with many duplicates
|
115
|
+
generator = mocker.MagicMock()
|
116
|
+
|
117
|
+
# First "model1", then 2*MAX_REPEATS duplicates of "model1", then "model2"
|
118
|
+
generator.side_effect = ["model1"] + ["model1"] * (2 * MAX_REPEATS) + ["model2"]
|
119
|
+
prior = Prior(generator)
|
120
|
+
|
121
|
+
# Should see exactly 1 warning despite having 2*MAX_REPEATS duplicates
|
122
|
+
with pytest.warns(UserWarning) as record:
|
123
|
+
result = prior.rvs(2)
|
124
|
+
|
125
|
+
# Verify we got exactly 1 warning
|
126
|
+
assert len(record) == 1
|
127
|
+
|
128
|
+
# Verify the warning has the expected message
|
129
|
+
assert f"Generator called {MAX_REPEATS} times in a row" in str(
|
130
|
+
record[0].message
|
131
|
+
)
|
132
|
+
|
133
|
+
# Verify we got the 2 unique models
|
134
|
+
assert result.shape == (2, 1)
|
135
|
+
assert set(result.flatten()) == {"model1", "model2"}
|
@@ -0,0 +1,136 @@
|
|
1
|
+
import pytest
|
2
|
+
import numpy as np
|
3
|
+
|
4
|
+
from pysips.random_choice_proposal import RandomChoiceProposal
|
5
|
+
|
6
|
+
|
7
|
+
class TestRandomChoiceProposal:
|
8
|
+
|
9
|
+
@pytest.fixture
|
10
|
+
def mock_model(self, mocker):
|
11
|
+
"""Create a mock model for testing"""
|
12
|
+
return mocker.MagicMock()
|
13
|
+
|
14
|
+
@pytest.fixture
|
15
|
+
def mock_proposals(self, mocker):
|
16
|
+
"""Create mock proposals for testing"""
|
17
|
+
prop1 = mocker.MagicMock(name="proposal1")
|
18
|
+
prop1.return_value = mocker.MagicMock(name="result1")
|
19
|
+
|
20
|
+
prop2 = mocker.MagicMock(name="proposal2")
|
21
|
+
prop2.return_value = mocker.MagicMock(name="result2")
|
22
|
+
|
23
|
+
prop3 = mocker.MagicMock(name="proposal3")
|
24
|
+
prop3.return_value = mocker.MagicMock(name="result3")
|
25
|
+
|
26
|
+
return [prop1, prop2, prop3]
|
27
|
+
|
28
|
+
@pytest.fixture
|
29
|
+
def probabilities(self):
|
30
|
+
"""Create probabilities for testing"""
|
31
|
+
return [0.2, 0.5, 0.3]
|
32
|
+
|
33
|
+
def test_initialization(self, mock_proposals, probabilities, mocker):
|
34
|
+
"""Test proper initialization of RandomChoiceProposal"""
|
35
|
+
# Mock numpy random
|
36
|
+
mock_random = mocker.patch("numpy.random.default_rng")
|
37
|
+
|
38
|
+
seed = 42
|
39
|
+
_ = RandomChoiceProposal(
|
40
|
+
mock_proposals, probabilities, exclusive=True, seed=seed
|
41
|
+
)
|
42
|
+
mock_random.assert_called_once_with(seed)
|
43
|
+
|
44
|
+
def test_update_method(self, mock_proposals, probabilities, mocker):
|
45
|
+
"""Test update method calls update on all proposals"""
|
46
|
+
# Initialize proposal
|
47
|
+
proposal = RandomChoiceProposal(mock_proposals, probabilities)
|
48
|
+
|
49
|
+
# Call update with args and kwargs
|
50
|
+
test_args = ["arg1", "arg2"]
|
51
|
+
test_kwargs = {"key1": "value1", "key2": "value2"}
|
52
|
+
proposal.update(*test_args, **test_kwargs)
|
53
|
+
|
54
|
+
# Assertions - each proposal should have update called with same args/kwargs
|
55
|
+
for mock_prop in mock_proposals:
|
56
|
+
mock_prop.update.assert_called_once_with(*test_args, **test_kwargs)
|
57
|
+
|
58
|
+
def test_call_exclusive_mode(
|
59
|
+
self, mock_proposals, probabilities, mock_model, mocker
|
60
|
+
):
|
61
|
+
"""Test __call__ method in exclusive mode selects one proposal and applies it"""
|
62
|
+
# Mock RNG to control which proposal is selected
|
63
|
+
mock_rng = mocker.MagicMock()
|
64
|
+
mock_rng.random.return_value = 0.5 # Will select proposals[1] (0.2 < 0.5 < 0.7)
|
65
|
+
|
66
|
+
# Initialize proposal with mocked RNG
|
67
|
+
proposal = RandomChoiceProposal(mock_proposals, probabilities, exclusive=True)
|
68
|
+
proposal._rng = mock_rng
|
69
|
+
|
70
|
+
# Call the proposal
|
71
|
+
result = proposal(mock_model)
|
72
|
+
|
73
|
+
# Assertions
|
74
|
+
mock_proposals[1].assert_called_once_with(mock_model)
|
75
|
+
|
76
|
+
# Only the selected proposal should be called
|
77
|
+
mock_proposals[0].assert_not_called()
|
78
|
+
mock_proposals[2].assert_not_called()
|
79
|
+
|
80
|
+
# Result should be the output of the selected proposal
|
81
|
+
assert result == mock_proposals[1].return_value
|
82
|
+
|
83
|
+
def test_call_non_exclusive_mode(
|
84
|
+
self, mock_proposals, probabilities, mock_model, mocker
|
85
|
+
):
|
86
|
+
"""Test __call__ method in non-exclusive mode can select multiple proposals"""
|
87
|
+
# Mock RNG to make proposals 0 and 2 be selected
|
88
|
+
mock_rng = mocker.MagicMock()
|
89
|
+
# For each proposal: [0:True, 1:False, 2:True]
|
90
|
+
mock_rng.random.side_effect = [0.1, 0.9, 0.1] # < prob = select, > prob = skip
|
91
|
+
# Prevent random shuffling for predictable testing
|
92
|
+
mock_rng.shuffle.side_effect = lambda x: None
|
93
|
+
|
94
|
+
# Initialize proposal with mocked RNG
|
95
|
+
proposal = RandomChoiceProposal(mock_proposals, probabilities, exclusive=False)
|
96
|
+
proposal._rng = mock_rng
|
97
|
+
|
98
|
+
# Call the proposal
|
99
|
+
result = proposal(mock_model)
|
100
|
+
|
101
|
+
# Assertions - first proposal should be called with original model
|
102
|
+
mock_proposals[0].assert_called_once_with(mock_model)
|
103
|
+
|
104
|
+
# Second proposal should not be called
|
105
|
+
mock_proposals[1].assert_not_called()
|
106
|
+
|
107
|
+
# Third proposal should be called with result from first proposal
|
108
|
+
mock_proposals[2].assert_called_once_with(mock_proposals[0].return_value)
|
109
|
+
|
110
|
+
# Result should be the output of the last applied proposal
|
111
|
+
assert result == mock_proposals[2].return_value
|
112
|
+
|
113
|
+
def test_call_non_exclusive_empty_first_round(
|
114
|
+
self, mock_proposals, probabilities, mock_model, mocker
|
115
|
+
):
|
116
|
+
"""Test __call__ in non-exclusive mode retries if no proposals are selected initially"""
|
117
|
+
# Mock RNG to reject all on first pass, then select one on second pass
|
118
|
+
mock_rng = mocker.MagicMock()
|
119
|
+
# First round: all above threshold (none selected)
|
120
|
+
# Second round: select proposal 0
|
121
|
+
mock_rng.random.side_effect = [0.9, 0.9, 0.9, 0.1, 0.9, 0.9]
|
122
|
+
|
123
|
+
# Initialize proposal
|
124
|
+
proposal = RandomChoiceProposal(mock_proposals, probabilities, exclusive=False)
|
125
|
+
proposal._rng = mock_rng
|
126
|
+
|
127
|
+
# Call the proposal
|
128
|
+
result = proposal(mock_model)
|
129
|
+
|
130
|
+
# Assertions
|
131
|
+
mock_proposals[0].assert_called_once_with(mock_model)
|
132
|
+
mock_proposals[1].assert_not_called()
|
133
|
+
mock_proposals[2].assert_not_called()
|
134
|
+
|
135
|
+
# Result should be the output of the selected proposal
|
136
|
+
assert result == mock_proposals[0].return_value
|
@@ -0,0 +1,227 @@
|
|
1
|
+
import pytest
|
2
|
+
import numpy as np
|
3
|
+
from unittest.mock import MagicMock
|
4
|
+
from pytest_mock import MockerFixture
|
5
|
+
|
6
|
+
from pysips.regressor import PysipsRegressor
|
7
|
+
|
8
|
+
# Dynamically get the module containing the PysipsRegressor class
|
9
|
+
IMPORTMODULE = PysipsRegressor.__module__
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture
|
13
|
+
def sample_data():
|
14
|
+
"""Fixture for sample data."""
|
15
|
+
X = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]])
|
16
|
+
y = np.array([2.0, 4.0, 6.0, 8.0, 10.0])
|
17
|
+
return X, y
|
18
|
+
|
19
|
+
|
20
|
+
@pytest.fixture
|
21
|
+
def mock_external_components(mocker: MockerFixture):
|
22
|
+
"""Mock all external components needed by the regressor."""
|
23
|
+
mock_component_gen = mocker.patch(
|
24
|
+
f"{IMPORTMODULE}.ComponentGenerator", autospec=True
|
25
|
+
)
|
26
|
+
# needs to provide unique outputs for pool generation
|
27
|
+
mock_agraph_gen = mocker.MagicMock(side_effect=lambda: np.random.random())
|
28
|
+
mock_agraph_gen_constructor = mocker.patch(
|
29
|
+
f"{IMPORTMODULE}.AGraphGenerator", autospec=True, return_value=mock_agraph_gen
|
30
|
+
)
|
31
|
+
mock_laplace_nmll = mocker.patch(f"{IMPORTMODULE}.LaplaceNmll", autospec=True)
|
32
|
+
mock_mutation_proposal = mocker.patch(
|
33
|
+
f"{IMPORTMODULE}.MutationProposal", autospec=True
|
34
|
+
)
|
35
|
+
mock_crossover_proposal = mocker.patch(
|
36
|
+
f"{IMPORTMODULE}.CrossoverProposal", autospec=True
|
37
|
+
)
|
38
|
+
mock_random_choice_proposal = mocker.patch(
|
39
|
+
f"{IMPORTMODULE}.RandomChoiceProposal", autospec=True
|
40
|
+
)
|
41
|
+
mock_sample = mocker.patch(f"{IMPORTMODULE}.sample", autospec=True)
|
42
|
+
|
43
|
+
# Configure the mock sample function to return a model and likelihood
|
44
|
+
mock_model = MagicMock()
|
45
|
+
mock_model.__str__.return_value = "2*X_0"
|
46
|
+
mock_likelihoods = np.array([0.9])
|
47
|
+
mock_sample.return_value = ([mock_model], mock_likelihoods)
|
48
|
+
|
49
|
+
return {
|
50
|
+
"component_gen": mock_component_gen,
|
51
|
+
"agraph_gen": mock_agraph_gen_constructor,
|
52
|
+
"laplace_nmll": mock_laplace_nmll,
|
53
|
+
"mutation_proposal": mock_mutation_proposal,
|
54
|
+
"crossover_proposal": mock_crossover_proposal,
|
55
|
+
"random_choice_proposal": mock_random_choice_proposal,
|
56
|
+
"sample": mock_sample,
|
57
|
+
"model": mock_model,
|
58
|
+
}
|
59
|
+
|
60
|
+
|
61
|
+
def test_init_custom_parameters():
|
62
|
+
"""Test initialization with custom parameters."""
|
63
|
+
regressor = PysipsRegressor(
|
64
|
+
operators=["+", "*", "sin", "cos"],
|
65
|
+
max_complexity=30,
|
66
|
+
terminal_probability=0.2,
|
67
|
+
num_particles=100,
|
68
|
+
random_state=42,
|
69
|
+
)
|
70
|
+
|
71
|
+
# Check custom values
|
72
|
+
assert regressor.operators == ["+", "*", "sin", "cos"]
|
73
|
+
assert regressor.max_complexity == 30
|
74
|
+
assert regressor.terminal_probability == 0.2
|
75
|
+
assert regressor.num_particles == 100
|
76
|
+
assert regressor.random_state == 42
|
77
|
+
|
78
|
+
|
79
|
+
def test_fit(sample_data, mock_external_components):
|
80
|
+
"""Test the fit method."""
|
81
|
+
X, y = sample_data
|
82
|
+
mock_model = mock_external_components["model"]
|
83
|
+
mock_sample = mock_external_components["sample"]
|
84
|
+
|
85
|
+
regressor = PysipsRegressor(random_state=42)
|
86
|
+
regressor.fit(X, y)
|
87
|
+
|
88
|
+
# Verify that sample was called
|
89
|
+
mock_sample.assert_called_once()
|
90
|
+
|
91
|
+
# Verify that attributes were set correctly
|
92
|
+
assert regressor.models_ == [mock_model]
|
93
|
+
assert np.array_equal(regressor.likelihoods_, np.array([0.9]))
|
94
|
+
assert regressor.best_model_ == mock_model
|
95
|
+
|
96
|
+
# Verify n_features_in_ is set correctly
|
97
|
+
assert regressor.n_features_in_ == X.shape[1]
|
98
|
+
|
99
|
+
|
100
|
+
def test_predict_requires_fit(sample_data):
|
101
|
+
"""Test that predict requires the model to be fitted first."""
|
102
|
+
X, _ = sample_data
|
103
|
+
regressor = PysipsRegressor()
|
104
|
+
|
105
|
+
with pytest.raises(Exception): # Should raise NotFittedError
|
106
|
+
regressor.predict(X)
|
107
|
+
|
108
|
+
|
109
|
+
def test_predict(sample_data, mocker: MockerFixture):
|
110
|
+
"""Test the predict method."""
|
111
|
+
X, _ = sample_data
|
112
|
+
|
113
|
+
# Create a mock model with evaluate_equation_at method
|
114
|
+
mock_model = MagicMock()
|
115
|
+
mock_predictions = np.array([2.0, 4.0, 6.0, 8.0, 10.0])
|
116
|
+
mock_model.evaluate_equation_at.return_value = mock_predictions
|
117
|
+
|
118
|
+
# Mock check_is_fitted to avoid NotFittedError
|
119
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted")
|
120
|
+
# Mock check_array to return the input unchanged
|
121
|
+
mocker.patch(f"{IMPORTMODULE}.check_array", return_value=X)
|
122
|
+
|
123
|
+
regressor = PysipsRegressor()
|
124
|
+
regressor.best_model_ = mock_model
|
125
|
+
regressor.models_ = [mock_model]
|
126
|
+
regressor.likelihoods_ = np.array([1.0])
|
127
|
+
regressor.n_features_in_ = X.shape[1]
|
128
|
+
|
129
|
+
predictions = regressor.predict(X)
|
130
|
+
|
131
|
+
# Verify that evaluate_equation_at was called with X
|
132
|
+
mock_model.evaluate_equation_at.assert_called_once_with(X)
|
133
|
+
assert np.array_equal(predictions, mock_predictions)
|
134
|
+
|
135
|
+
|
136
|
+
def test_predict_wrong_feature_count(sample_data, mocker: MockerFixture):
|
137
|
+
"""Test predict raises error when input has wrong number of features."""
|
138
|
+
X, _ = sample_data
|
139
|
+
wrong_X = np.array([[1.0, 2.0], [3.0, 4.0]]) # 2 features instead of 1
|
140
|
+
|
141
|
+
# Mock check_is_fitted to avoid NotFittedError
|
142
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted")
|
143
|
+
# Mock check_array to return the input unchanged
|
144
|
+
mocker.patch(f"{IMPORTMODULE}.check_array", return_value=wrong_X)
|
145
|
+
|
146
|
+
regressor = PysipsRegressor()
|
147
|
+
regressor.best_model_ = MagicMock()
|
148
|
+
regressor.models_ = [MagicMock()]
|
149
|
+
regressor.n_features_in_ = X.shape[1] # Trained with 1 feature
|
150
|
+
|
151
|
+
with pytest.raises(
|
152
|
+
ValueError
|
153
|
+
): # Should raise ValueError for feature count mismatch
|
154
|
+
regressor.predict(wrong_X)
|
155
|
+
|
156
|
+
|
157
|
+
def test_score(sample_data, mocker: MockerFixture):
|
158
|
+
"""Test the score method."""
|
159
|
+
X, y = sample_data
|
160
|
+
|
161
|
+
# Mock super().score to return a predetermined R² value
|
162
|
+
mocker.patch("sklearn.base.RegressorMixin.score", return_value=0.95)
|
163
|
+
|
164
|
+
regressor = PysipsRegressor()
|
165
|
+
regressor.best_model_ = MagicMock()
|
166
|
+
|
167
|
+
score = regressor.score(X, y)
|
168
|
+
|
169
|
+
assert score == 0.95
|
170
|
+
|
171
|
+
|
172
|
+
def test_get_expression(mocker: MockerFixture):
|
173
|
+
"""Test the get_expression method."""
|
174
|
+
# Mock check_is_fitted to avoid NotFittedError
|
175
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted")
|
176
|
+
|
177
|
+
mock_model = MagicMock()
|
178
|
+
mock_model.__str__.return_value = "2*X_0"
|
179
|
+
|
180
|
+
regressor = PysipsRegressor()
|
181
|
+
regressor.best_model_ = mock_model
|
182
|
+
|
183
|
+
expression = regressor.get_expression()
|
184
|
+
|
185
|
+
# Verify that __str__ was called on the best_model_
|
186
|
+
mock_model.__str__.assert_called_once()
|
187
|
+
assert expression == "2*X_0"
|
188
|
+
|
189
|
+
|
190
|
+
def test_get_expression_not_fitted(mocker: MockerFixture):
|
191
|
+
"""Test get_expression raises error when called on unfitted model."""
|
192
|
+
# Mock check_is_fitted to raise an exception
|
193
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted", side_effect=Exception("Not fitted"))
|
194
|
+
|
195
|
+
regressor = PysipsRegressor()
|
196
|
+
|
197
|
+
with pytest.raises(Exception): # Should raise the exception from check_is_fitted
|
198
|
+
regressor.get_expression()
|
199
|
+
|
200
|
+
|
201
|
+
def test_get_models(mocker: MockerFixture):
|
202
|
+
"""Test the get_models method."""
|
203
|
+
# Mock check_is_fitted to avoid NotFittedError
|
204
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted")
|
205
|
+
|
206
|
+
mock_models = [MagicMock(), MagicMock()]
|
207
|
+
mock_likelihoods = np.array([0.8, 0.2])
|
208
|
+
|
209
|
+
regressor = PysipsRegressor()
|
210
|
+
regressor.models_ = mock_models
|
211
|
+
regressor.likelihoods_ = mock_likelihoods
|
212
|
+
|
213
|
+
models, likelihoods = regressor.get_models()
|
214
|
+
|
215
|
+
assert models == mock_models
|
216
|
+
assert np.array_equal(likelihoods, mock_likelihoods)
|
217
|
+
|
218
|
+
|
219
|
+
def test_get_models_not_fitted(mocker: MockerFixture):
|
220
|
+
"""Test get_models raises error when called on unfitted model."""
|
221
|
+
# Mock check_is_fitted to raise an exception
|
222
|
+
mocker.patch(f"{IMPORTMODULE}.check_is_fitted", side_effect=Exception("Not fitted"))
|
223
|
+
|
224
|
+
regressor = PysipsRegressor()
|
225
|
+
|
226
|
+
with pytest.raises(Exception): # Should raise the exception from check_is_fitted
|
227
|
+
regressor.get_models()
|
@@ -0,0 +1,133 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from pysips.sampler import sample, run_smc
|
5
|
+
|
6
|
+
IMPORTMODULE = sample.__module__
|
7
|
+
|
8
|
+
|
9
|
+
class TestSampleFunction:
|
10
|
+
def test_default_kwargs(self, mocker):
|
11
|
+
mock_run_smc = mocker.patch(
|
12
|
+
f"{IMPORTMODULE}.run_smc",
|
13
|
+
return_value=("mock_models", "mock_likelihoods"),
|
14
|
+
)
|
15
|
+
|
16
|
+
likelihood = lambda x: x
|
17
|
+
proposal = object()
|
18
|
+
generator = object()
|
19
|
+
seed = 42
|
20
|
+
|
21
|
+
result = sample(likelihood, proposal, generator, seed=seed)
|
22
|
+
|
23
|
+
assert result == ("mock_models", "mock_likelihoods")
|
24
|
+
|
25
|
+
mock_run_smc.assert_called_once()
|
26
|
+
args, _ = mock_run_smc.call_args
|
27
|
+
|
28
|
+
assert args[0] == likelihood
|
29
|
+
assert args[1] == proposal
|
30
|
+
assert args[2] == generator
|
31
|
+
assert args[3] is False
|
32
|
+
|
33
|
+
kwargs_passed = args[4]
|
34
|
+
rng_passed = args[5]
|
35
|
+
|
36
|
+
assert kwargs_passed == {"num_particles": 5000, "num_mcmc_samples": 10}
|
37
|
+
assert isinstance(rng_passed, np.random.Generator)
|
38
|
+
|
39
|
+
def test_custom_kwargs(self, mocker):
|
40
|
+
mock_run_smc = mocker.patch(
|
41
|
+
f"{IMPORTMODULE}.run_smc", return_value=("mock_models", "mock_likelihoods")
|
42
|
+
)
|
43
|
+
|
44
|
+
likelihood = lambda x: x
|
45
|
+
proposal = object()
|
46
|
+
generator = object()
|
47
|
+
custom_kwargs = {"num_particles": 100, "num_mcmc_samples": 3}
|
48
|
+
|
49
|
+
result = sample(likelihood, proposal, generator, kwargs=custom_kwargs, seed=24)
|
50
|
+
|
51
|
+
assert result == ("mock_models", "mock_likelihoods")
|
52
|
+
mock_run_smc.assert_called_once()
|
53
|
+
|
54
|
+
args, _ = mock_run_smc.call_args
|
55
|
+
assert args[0] == likelihood
|
56
|
+
assert args[1] == proposal
|
57
|
+
assert args[2] == generator
|
58
|
+
assert args[3] is False
|
59
|
+
assert args[4] == custom_kwargs
|
60
|
+
|
61
|
+
|
62
|
+
class TestRunSMC:
|
63
|
+
@pytest.mark.parametrize("multiproc", [True, False])
|
64
|
+
def test_functionality(self, mocker, multiproc):
|
65
|
+
mock_rng_instance = mocker.Mock(name="rngInstance")
|
66
|
+
mock_rng = mocker.patch(
|
67
|
+
f"{IMPORTMODULE}.np.random.default_rng", return_value=mock_rng_instance
|
68
|
+
)
|
69
|
+
|
70
|
+
mock_prior_instance = mocker.Mock(name="PriorInstance")
|
71
|
+
mock_prior = mocker.patch(
|
72
|
+
f"{IMPORTMODULE}.Prior", return_value=mock_prior_instance
|
73
|
+
)
|
74
|
+
|
75
|
+
mock_mcmc_instance = mocker.Mock(name="MetropolisInstance")
|
76
|
+
mock_metropolis = mocker.patch(
|
77
|
+
f"{IMPORTMODULE}.Metropolis", return_value=mock_mcmc_instance
|
78
|
+
)
|
79
|
+
|
80
|
+
mock_kernel_instance = mocker.Mock(name="VectorMCMCKernelInstance")
|
81
|
+
mock_vector_kernel = mocker.patch(
|
82
|
+
f"{IMPORTMODULE}.VectorMCMCKernel", return_value=mock_kernel_instance
|
83
|
+
)
|
84
|
+
|
85
|
+
mock_sampler_instance = mocker.Mock(name="AdaptiveSamplerInstance")
|
86
|
+
mock_adaptive_sampler = mocker.patch(
|
87
|
+
f"{IMPORTMODULE}.AdaptiveSampler", return_value=mock_sampler_instance
|
88
|
+
)
|
89
|
+
|
90
|
+
dummy_params = np.array([[1], [2], [3]])
|
91
|
+
dummy_step = mocker.Mock(params=dummy_params)
|
92
|
+
mock_sampler_instance.sample.return_value = ([dummy_step], None)
|
93
|
+
|
94
|
+
likelihood = mocker.Mock(side_effect=lambda x: x * 10)
|
95
|
+
|
96
|
+
proposal = "proposal"
|
97
|
+
generator = "generator"
|
98
|
+
kwargs = {"num_particles": 3, "num_mcmc_samples": 4}
|
99
|
+
|
100
|
+
models, likelihoods = sample(
|
101
|
+
likelihood,
|
102
|
+
proposal,
|
103
|
+
generator,
|
104
|
+
multiprocess=multiproc,
|
105
|
+
kwargs=kwargs,
|
106
|
+
seed=0,
|
107
|
+
)
|
108
|
+
|
109
|
+
mock_prior.assert_called_once_with(generator)
|
110
|
+
|
111
|
+
mock_metropolis.assert_called_once_with(
|
112
|
+
likelihood=likelihood,
|
113
|
+
proposal=proposal,
|
114
|
+
prior=mock_prior_instance,
|
115
|
+
multiprocess=multiproc,
|
116
|
+
)
|
117
|
+
|
118
|
+
mock_vector_kernel.assert_called_once_with(
|
119
|
+
mock_mcmc_instance, param_order=["f"], rng=mock_rng_instance
|
120
|
+
)
|
121
|
+
|
122
|
+
mock_adaptive_sampler.assert_called_once_with(mock_kernel_instance)
|
123
|
+
|
124
|
+
mock_sampler_instance.sample.assert_called_once_with(**kwargs)
|
125
|
+
|
126
|
+
assert mock_sampler_instance._mutator._compute_cov is False
|
127
|
+
|
128
|
+
expected_models = dummy_params[:, 0].tolist()
|
129
|
+
assert models == expected_models
|
130
|
+
|
131
|
+
expected_likelihoods = [m * 10 for m in expected_models]
|
132
|
+
assert likelihoods == expected_likelihoods
|
133
|
+
assert likelihood.call_count == len(expected_models)
|