fishertools 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fishertools/__init__.py +82 -0
- fishertools/config/__init__.py +24 -0
- fishertools/config/manager.py +247 -0
- fishertools/config/models.py +96 -0
- fishertools/config/parser.py +265 -0
- fishertools/decorators.py +93 -0
- fishertools/documentation/__init__.py +38 -0
- fishertools/documentation/api.py +242 -0
- fishertools/documentation/generator.py +502 -0
- fishertools/documentation/models.py +126 -0
- fishertools/documentation/visual.py +583 -0
- fishertools/errors/__init__.py +29 -0
- fishertools/errors/exceptions.py +191 -0
- fishertools/errors/explainer.py +303 -0
- fishertools/errors/formatters.py +386 -0
- fishertools/errors/models.py +228 -0
- fishertools/errors/patterns.py +119 -0
- fishertools/errors/recovery.py +467 -0
- fishertools/examples/__init__.py +22 -0
- fishertools/examples/models.py +118 -0
- fishertools/examples/repository.py +770 -0
- fishertools/helpers.py +116 -0
- fishertools/integration.py +451 -0
- fishertools/learn/__init__.py +18 -0
- fishertools/learn/examples.py +550 -0
- fishertools/learn/tips.py +281 -0
- fishertools/learning/__init__.py +32 -0
- fishertools/learning/core.py +349 -0
- fishertools/learning/models.py +112 -0
- fishertools/learning/progress.py +314 -0
- fishertools/learning/session.py +500 -0
- fishertools/learning/tutorial.py +626 -0
- fishertools/legacy/__init__.py +76 -0
- fishertools/legacy/deprecated.py +261 -0
- fishertools/legacy/deprecation.py +149 -0
- fishertools/safe/__init__.py +16 -0
- fishertools/safe/collections.py +242 -0
- fishertools/safe/files.py +240 -0
- fishertools/safe/strings.py +15 -0
- fishertools/utils.py +57 -0
- fishertools-0.2.1.dist-info/METADATA +256 -0
- fishertools-0.2.1.dist-info/RECORD +81 -0
- fishertools-0.2.1.dist-info/WHEEL +5 -0
- fishertools-0.2.1.dist-info/licenses/LICENSE +21 -0
- fishertools-0.2.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +6 -0
- tests/conftest.py +25 -0
- tests/test_config/__init__.py +3 -0
- tests/test_config/test_basic_config.py +57 -0
- tests/test_config/test_config_error_handling.py +287 -0
- tests/test_config/test_config_properties.py +435 -0
- tests/test_documentation/__init__.py +3 -0
- tests/test_documentation/test_documentation_properties.py +253 -0
- tests/test_documentation/test_visual_documentation_properties.py +444 -0
- tests/test_errors/__init__.py +3 -0
- tests/test_errors/test_api.py +301 -0
- tests/test_errors/test_error_handling.py +354 -0
- tests/test_errors/test_explainer.py +173 -0
- tests/test_errors/test_formatters.py +338 -0
- tests/test_errors/test_models.py +248 -0
- tests/test_errors/test_patterns.py +270 -0
- tests/test_examples/__init__.py +3 -0
- tests/test_examples/test_example_repository_properties.py +204 -0
- tests/test_examples/test_specific_examples.py +303 -0
- tests/test_integration.py +298 -0
- tests/test_integration_enhancements.py +462 -0
- tests/test_learn/__init__.py +3 -0
- tests/test_learn/test_examples.py +221 -0
- tests/test_learn/test_tips.py +285 -0
- tests/test_learning/__init__.py +3 -0
- tests/test_learning/test_interactive_learning_properties.py +337 -0
- tests/test_learning/test_learning_system_properties.py +194 -0
- tests/test_learning/test_progress_tracking_properties.py +279 -0
- tests/test_legacy/__init__.py +3 -0
- tests/test_legacy/test_backward_compatibility.py +236 -0
- tests/test_legacy/test_deprecation_warnings.py +208 -0
- tests/test_safe/__init__.py +3 -0
- tests/test_safe/test_collections_properties.py +189 -0
- tests/test_safe/test_files.py +104 -0
- tests/test_structure.py +58 -0
- tests/test_structure_enhancements.py +115 -0
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for configuration error handling.
|
|
3
|
+
|
|
4
|
+
Tests specific error scenarios and edge cases for configuration management.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import tempfile
|
|
9
|
+
import os
|
|
10
|
+
import platform
|
|
11
|
+
import pytest
|
|
12
|
+
|
|
13
|
+
from fishertools.config.models import LearningConfig, ErrorSeverity
|
|
14
|
+
from fishertools.config.manager import ConfigurationManager
|
|
15
|
+
from fishertools.config.parser import ConfigurationParser
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TestConfigurationErrorHandling:
|
|
19
|
+
"""Unit tests for configuration error handling scenarios."""
|
|
20
|
+
|
|
21
|
+
def test_missing_required_fields(self):
|
|
22
|
+
"""Test handling of configurations with missing required fields."""
|
|
23
|
+
parser = ConfigurationParser()
|
|
24
|
+
|
|
25
|
+
# Configuration missing required 'default_level' field
|
|
26
|
+
config_missing_level = {
|
|
27
|
+
"explanation_verbosity": "detailed",
|
|
28
|
+
"visual_aids_enabled": True
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
validation_result = parser.validate_structure(config_missing_level)
|
|
32
|
+
assert not validation_result.is_valid
|
|
33
|
+
assert len(validation_result.errors) > 0
|
|
34
|
+
|
|
35
|
+
# Check that the error mentions the missing field
|
|
36
|
+
error_messages = [error.message for error in validation_result.errors]
|
|
37
|
+
assert any("default_level" in msg for msg in error_messages)
|
|
38
|
+
assert any("missing" in msg.lower() for msg in error_messages)
|
|
39
|
+
|
|
40
|
+
def test_wrong_data_types(self):
|
|
41
|
+
"""Test handling of configurations with wrong data types."""
|
|
42
|
+
parser = ConfigurationParser()
|
|
43
|
+
|
|
44
|
+
# Configuration with wrong types
|
|
45
|
+
config_wrong_types = {
|
|
46
|
+
"default_level": "beginner",
|
|
47
|
+
"explanation_verbosity": "detailed",
|
|
48
|
+
"visual_aids_enabled": "true", # Should be boolean, not string
|
|
49
|
+
"suggested_topics_count": "5", # Should be int, not string
|
|
50
|
+
"progress_tracking_enabled": 1 # Should be boolean, not int
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
validation_result = parser.validate_structure(config_wrong_types)
|
|
54
|
+
assert not validation_result.is_valid
|
|
55
|
+
assert len(validation_result.errors) >= 2 # At least 2 type errors
|
|
56
|
+
|
|
57
|
+
# Check error details
|
|
58
|
+
for error in validation_result.errors:
|
|
59
|
+
assert error.severity == ErrorSeverity.ERROR
|
|
60
|
+
assert "type" in error.message.lower()
|
|
61
|
+
assert error.suggested_fix is not None
|
|
62
|
+
|
|
63
|
+
def test_invalid_enum_values(self):
|
|
64
|
+
"""Test handling of invalid enum values."""
|
|
65
|
+
parser = ConfigurationParser()
|
|
66
|
+
|
|
67
|
+
# Configuration with invalid enum values
|
|
68
|
+
config_invalid_enums = {
|
|
69
|
+
"default_level": "expert", # Should be beginner/intermediate/advanced
|
|
70
|
+
"explanation_verbosity": "verbose", # Should be brief/detailed/comprehensive
|
|
71
|
+
"visual_aids_enabled": True
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
validation_result = parser.validate_structure(config_invalid_enums)
|
|
75
|
+
assert not validation_result.is_valid
|
|
76
|
+
assert len(validation_result.errors) >= 2
|
|
77
|
+
|
|
78
|
+
# Check that errors mention valid values
|
|
79
|
+
error_messages = [error.message for error in validation_result.errors]
|
|
80
|
+
assert any("valid values" in msg.lower() for msg in error_messages)
|
|
81
|
+
|
|
82
|
+
def test_out_of_range_numeric_values(self):
|
|
83
|
+
"""Test handling of numeric values outside recommended ranges."""
|
|
84
|
+
parser = ConfigurationParser()
|
|
85
|
+
|
|
86
|
+
# Configuration with values outside recommended ranges
|
|
87
|
+
config_out_of_range = {
|
|
88
|
+
"default_level": "beginner",
|
|
89
|
+
"explanation_verbosity": "detailed",
|
|
90
|
+
"suggested_topics_count": 50, # Too high (recommended 1-10)
|
|
91
|
+
"max_examples_per_topic": 0, # Too low (recommended 1-20)
|
|
92
|
+
"visual_aids_enabled": True
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
validation_result = parser.validate_structure(config_out_of_range)
|
|
96
|
+
# Should be valid but have warnings
|
|
97
|
+
assert validation_result.is_valid
|
|
98
|
+
assert len(validation_result.warnings) >= 2
|
|
99
|
+
|
|
100
|
+
# Check warning details
|
|
101
|
+
for warning in validation_result.warnings:
|
|
102
|
+
assert warning.severity == ErrorSeverity.WARNING
|
|
103
|
+
assert "range" in warning.message.lower()
|
|
104
|
+
|
|
105
|
+
def test_unknown_fields_warning(self):
|
|
106
|
+
"""Test that unknown fields generate warnings."""
|
|
107
|
+
parser = ConfigurationParser()
|
|
108
|
+
|
|
109
|
+
# Configuration with unknown fields
|
|
110
|
+
config_unknown_fields = {
|
|
111
|
+
"default_level": "beginner",
|
|
112
|
+
"explanation_verbosity": "detailed",
|
|
113
|
+
"unknown_field": "some_value",
|
|
114
|
+
"another_unknown": 42
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
validation_result = parser.validate_structure(config_unknown_fields)
|
|
118
|
+
assert validation_result.is_valid # Should still be valid
|
|
119
|
+
assert len(validation_result.warnings) >= 2 # Should have warnings for unknown fields
|
|
120
|
+
|
|
121
|
+
# Check warning details
|
|
122
|
+
warning_messages = [warning.message for warning in validation_result.warnings]
|
|
123
|
+
assert any("unknown" in msg.lower() for msg in warning_messages)
|
|
124
|
+
assert any("ignored" in msg.lower() for msg in warning_messages)
|
|
125
|
+
|
|
126
|
+
def test_file_not_found_error(self):
|
|
127
|
+
"""Test handling of missing configuration files."""
|
|
128
|
+
manager = ConfigurationManager()
|
|
129
|
+
|
|
130
|
+
# Try to load non-existent file
|
|
131
|
+
with pytest.raises(FileNotFoundError) as exc_info:
|
|
132
|
+
manager.load_config("non_existent_config.json")
|
|
133
|
+
|
|
134
|
+
error_message = str(exc_info.value)
|
|
135
|
+
assert "not found" in error_message.lower()
|
|
136
|
+
assert "non_existent_config.json" in error_message
|
|
137
|
+
|
|
138
|
+
def test_invalid_json_file(self):
|
|
139
|
+
"""Test handling of files with invalid JSON."""
|
|
140
|
+
manager = ConfigurationManager()
|
|
141
|
+
|
|
142
|
+
# Create temporary file with invalid JSON
|
|
143
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
|
144
|
+
f.write('{"invalid": json content}') # Invalid JSON
|
|
145
|
+
temp_path = f.name
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
with pytest.raises(ValueError) as exc_info:
|
|
149
|
+
manager.load_config(temp_path)
|
|
150
|
+
|
|
151
|
+
error_message = str(exc_info.value)
|
|
152
|
+
assert "failed to load configuration" in error_message.lower()
|
|
153
|
+
finally:
|
|
154
|
+
os.unlink(temp_path)
|
|
155
|
+
|
|
156
|
+
def test_validation_failure_on_load(self):
|
|
157
|
+
"""Test that validation failures prevent configuration loading."""
|
|
158
|
+
manager = ConfigurationManager()
|
|
159
|
+
|
|
160
|
+
# Create temporary file with invalid configuration
|
|
161
|
+
invalid_config = {
|
|
162
|
+
"default_level": "invalid_level", # Invalid enum value
|
|
163
|
+
"explanation_verbosity": 123, # Wrong type
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
|
167
|
+
json.dump(invalid_config, f)
|
|
168
|
+
temp_path = f.name
|
|
169
|
+
|
|
170
|
+
try:
|
|
171
|
+
with pytest.raises(ValueError) as exc_info:
|
|
172
|
+
manager.load_config(temp_path)
|
|
173
|
+
|
|
174
|
+
error_message = str(exc_info.value)
|
|
175
|
+
assert "validation failed" in error_message.lower()
|
|
176
|
+
finally:
|
|
177
|
+
os.unlink(temp_path)
|
|
178
|
+
|
|
179
|
+
def test_save_to_invalid_path(self):
|
|
180
|
+
"""Test handling of invalid save paths."""
|
|
181
|
+
manager = ConfigurationManager()
|
|
182
|
+
config = LearningConfig()
|
|
183
|
+
|
|
184
|
+
# Create a temporary directory and then remove it to simulate permission error
|
|
185
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
186
|
+
invalid_path = os.path.join(temp_dir, "subdir", "config.json")
|
|
187
|
+
|
|
188
|
+
# Now temp_dir is deleted, so trying to create subdir should fail
|
|
189
|
+
# But our implementation creates directories, so let's use a different approach
|
|
190
|
+
|
|
191
|
+
# Try to save to a file that exists as a directory
|
|
192
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
193
|
+
# Create a directory with the same name as our target file
|
|
194
|
+
dir_as_file = os.path.join(temp_dir, "config.json")
|
|
195
|
+
os.makedirs(dir_as_file)
|
|
196
|
+
|
|
197
|
+
with pytest.raises(IOError) as exc_info:
|
|
198
|
+
manager.save_config(config, dir_as_file)
|
|
199
|
+
|
|
200
|
+
error_message = str(exc_info.value)
|
|
201
|
+
assert "failed to save configuration" in error_message.lower()
|
|
202
|
+
|
|
203
|
+
def test_apply_invalid_config(self):
|
|
204
|
+
"""Test that applying invalid configuration raises error."""
|
|
205
|
+
manager = ConfigurationManager()
|
|
206
|
+
|
|
207
|
+
# Create invalid config by directly modifying fields
|
|
208
|
+
invalid_config = LearningConfig()
|
|
209
|
+
invalid_config.default_level = "invalid_level" # Invalid enum value
|
|
210
|
+
|
|
211
|
+
with pytest.raises(ValueError) as exc_info:
|
|
212
|
+
manager.apply_config(invalid_config)
|
|
213
|
+
|
|
214
|
+
error_message = str(exc_info.value)
|
|
215
|
+
assert "cannot apply invalid configuration" in error_message.lower()
|
|
216
|
+
|
|
217
|
+
def test_yaml_not_available_error(self):
|
|
218
|
+
"""Test handling when YAML support is not available."""
|
|
219
|
+
parser = ConfigurationParser()
|
|
220
|
+
|
|
221
|
+
# Mock YAML not being available by temporarily setting the flag
|
|
222
|
+
original_yaml_available = parser.__class__.__module__
|
|
223
|
+
|
|
224
|
+
# Test YAML parsing when not available
|
|
225
|
+
# This test assumes YAML is available, so we'll test the error path differently
|
|
226
|
+
try:
|
|
227
|
+
# Try to parse YAML content
|
|
228
|
+
yaml_content = "key: value\nother_key: other_value"
|
|
229
|
+
result = parser.parse_yaml(yaml_content)
|
|
230
|
+
# If we get here, YAML is available, which is expected
|
|
231
|
+
assert isinstance(result, dict)
|
|
232
|
+
except ValueError as e:
|
|
233
|
+
# If YAML is not available, should get appropriate error
|
|
234
|
+
assert "yaml support not available" in str(e).lower()
|
|
235
|
+
|
|
236
|
+
def test_configuration_merge_with_invalid_overrides(self):
|
|
237
|
+
"""Test merging configuration with invalid override values."""
|
|
238
|
+
manager = ConfigurationManager()
|
|
239
|
+
base_config = LearningConfig()
|
|
240
|
+
|
|
241
|
+
# Invalid overrides
|
|
242
|
+
invalid_overrides = {
|
|
243
|
+
"default_level": "invalid_level",
|
|
244
|
+
"suggested_topics_count": "not_a_number"
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Merge should work (it just updates the dict)
|
|
248
|
+
merged_config = manager.merge_configs(base_config, invalid_overrides)
|
|
249
|
+
|
|
250
|
+
# But validation should fail
|
|
251
|
+
validation_result = manager.validate_config(merged_config)
|
|
252
|
+
assert not validation_result.is_valid
|
|
253
|
+
assert len(validation_result.errors) > 0
|
|
254
|
+
|
|
255
|
+
def test_error_message_descriptiveness(self):
|
|
256
|
+
"""Test that error messages are descriptive and helpful."""
|
|
257
|
+
parser = ConfigurationParser()
|
|
258
|
+
|
|
259
|
+
# Test various error scenarios
|
|
260
|
+
test_cases = [
|
|
261
|
+
{
|
|
262
|
+
"config": {"default_level": 123},
|
|
263
|
+
"expected_in_message": ["type", "str", "int"] # Changed "string" to "str"
|
|
264
|
+
},
|
|
265
|
+
{
|
|
266
|
+
"config": {"default_level": "invalid"},
|
|
267
|
+
"expected_in_message": ["invalid value", "valid values"]
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
"config": {}, # Missing required fields
|
|
271
|
+
"expected_in_message": ["missing", "required"]
|
|
272
|
+
}
|
|
273
|
+
]
|
|
274
|
+
|
|
275
|
+
for case in test_cases:
|
|
276
|
+
validation_result = parser.validate_structure(case["config"])
|
|
277
|
+
assert not validation_result.is_valid
|
|
278
|
+
|
|
279
|
+
# Check that error messages contain expected terms
|
|
280
|
+
all_messages = " ".join([error.message.lower() for error in validation_result.errors])
|
|
281
|
+
for expected_term in case["expected_in_message"]:
|
|
282
|
+
assert expected_term.lower() in all_messages
|
|
283
|
+
|
|
284
|
+
# Check that suggested fixes are provided
|
|
285
|
+
for error in validation_result.errors:
|
|
286
|
+
assert error.suggested_fix is not None
|
|
287
|
+
assert len(error.suggested_fix) > 0
|