fishertools 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fishertools/__init__.py +82 -0
- fishertools/config/__init__.py +24 -0
- fishertools/config/manager.py +247 -0
- fishertools/config/models.py +96 -0
- fishertools/config/parser.py +265 -0
- fishertools/decorators.py +93 -0
- fishertools/documentation/__init__.py +38 -0
- fishertools/documentation/api.py +242 -0
- fishertools/documentation/generator.py +502 -0
- fishertools/documentation/models.py +126 -0
- fishertools/documentation/visual.py +583 -0
- fishertools/errors/__init__.py +29 -0
- fishertools/errors/exceptions.py +191 -0
- fishertools/errors/explainer.py +303 -0
- fishertools/errors/formatters.py +386 -0
- fishertools/errors/models.py +228 -0
- fishertools/errors/patterns.py +119 -0
- fishertools/errors/recovery.py +467 -0
- fishertools/examples/__init__.py +22 -0
- fishertools/examples/models.py +118 -0
- fishertools/examples/repository.py +770 -0
- fishertools/helpers.py +116 -0
- fishertools/integration.py +451 -0
- fishertools/learn/__init__.py +18 -0
- fishertools/learn/examples.py +550 -0
- fishertools/learn/tips.py +281 -0
- fishertools/learning/__init__.py +32 -0
- fishertools/learning/core.py +349 -0
- fishertools/learning/models.py +112 -0
- fishertools/learning/progress.py +314 -0
- fishertools/learning/session.py +500 -0
- fishertools/learning/tutorial.py +626 -0
- fishertools/legacy/__init__.py +76 -0
- fishertools/legacy/deprecated.py +261 -0
- fishertools/legacy/deprecation.py +149 -0
- fishertools/safe/__init__.py +16 -0
- fishertools/safe/collections.py +242 -0
- fishertools/safe/files.py +240 -0
- fishertools/safe/strings.py +15 -0
- fishertools/utils.py +57 -0
- fishertools-0.2.1.dist-info/METADATA +256 -0
- fishertools-0.2.1.dist-info/RECORD +81 -0
- fishertools-0.2.1.dist-info/WHEEL +5 -0
- fishertools-0.2.1.dist-info/licenses/LICENSE +21 -0
- fishertools-0.2.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +6 -0
- tests/conftest.py +25 -0
- tests/test_config/__init__.py +3 -0
- tests/test_config/test_basic_config.py +57 -0
- tests/test_config/test_config_error_handling.py +287 -0
- tests/test_config/test_config_properties.py +435 -0
- tests/test_documentation/__init__.py +3 -0
- tests/test_documentation/test_documentation_properties.py +253 -0
- tests/test_documentation/test_visual_documentation_properties.py +444 -0
- tests/test_errors/__init__.py +3 -0
- tests/test_errors/test_api.py +301 -0
- tests/test_errors/test_error_handling.py +354 -0
- tests/test_errors/test_explainer.py +173 -0
- tests/test_errors/test_formatters.py +338 -0
- tests/test_errors/test_models.py +248 -0
- tests/test_errors/test_patterns.py +270 -0
- tests/test_examples/__init__.py +3 -0
- tests/test_examples/test_example_repository_properties.py +204 -0
- tests/test_examples/test_specific_examples.py +303 -0
- tests/test_integration.py +298 -0
- tests/test_integration_enhancements.py +462 -0
- tests/test_learn/__init__.py +3 -0
- tests/test_learn/test_examples.py +221 -0
- tests/test_learn/test_tips.py +285 -0
- tests/test_learning/__init__.py +3 -0
- tests/test_learning/test_interactive_learning_properties.py +337 -0
- tests/test_learning/test_learning_system_properties.py +194 -0
- tests/test_learning/test_progress_tracking_properties.py +279 -0
- tests/test_legacy/__init__.py +3 -0
- tests/test_legacy/test_backward_compatibility.py +236 -0
- tests/test_legacy/test_deprecation_warnings.py +208 -0
- tests/test_safe/__init__.py +3 -0
- tests/test_safe/test_collections_properties.py +189 -0
- tests/test_safe/test_files.py +104 -0
- tests/test_structure.py +58 -0
- tests/test_structure_enhancements.py +115 -0
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tests for the main explain_error API function.
|
|
3
|
+
|
|
4
|
+
This module contains property-based tests for the public API function
|
|
5
|
+
that validates the core requirements for universal exception acceptance,
|
|
6
|
+
default console output behavior, and formatting parameter responsiveness.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import pytest
|
|
10
|
+
import io
|
|
11
|
+
import sys
|
|
12
|
+
from contextlib import redirect_stdout
|
|
13
|
+
from hypothesis import given, strategies as st
|
|
14
|
+
|
|
15
|
+
from fishertools.errors import explain_error
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Common Python exception types for testing
|
|
19
|
+
COMMON_EXCEPTION_TYPES = [
|
|
20
|
+
TypeError, ValueError, AttributeError, IndexError, KeyError, ImportError, SyntaxError
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# Custom exception for testing
|
|
24
|
+
class TestCustomException(Exception):
|
|
25
|
+
"""Custom exception for testing API behavior."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.mark.property
|
|
30
|
+
class TestUniversalExceptionParameterAcceptance:
|
|
31
|
+
"""Property tests for universal exception parameter acceptance."""
|
|
32
|
+
|
|
33
|
+
@given(
|
|
34
|
+
exception_type=st.sampled_from(COMMON_EXCEPTION_TYPES + [TestCustomException]),
|
|
35
|
+
error_message=st.text(min_size=0, max_size=200)
|
|
36
|
+
)
|
|
37
|
+
def test_universal_exception_parameter_acceptance(self, exception_type, error_message):
|
|
38
|
+
"""
|
|
39
|
+
Property 6: Universal Exception Parameter Acceptance
|
|
40
|
+
For any Exception object, the explain_error() function should accept it
|
|
41
|
+
as a parameter without raising errors.
|
|
42
|
+
|
|
43
|
+
Feature: fishertools-refactor, Property 6: Universal Exception Parameter Acceptance
|
|
44
|
+
Validates: Requirements 3.2
|
|
45
|
+
"""
|
|
46
|
+
# Create exception instance
|
|
47
|
+
exception = exception_type(error_message)
|
|
48
|
+
|
|
49
|
+
# Capture output to avoid cluttering test output
|
|
50
|
+
output_buffer = io.StringIO()
|
|
51
|
+
|
|
52
|
+
# Property: explain_error should accept any Exception without raising errors
|
|
53
|
+
try:
|
|
54
|
+
with redirect_stdout(output_buffer):
|
|
55
|
+
explain_error(exception)
|
|
56
|
+
|
|
57
|
+
# If we get here, the function accepted the exception successfully
|
|
58
|
+
output = output_buffer.getvalue()
|
|
59
|
+
|
|
60
|
+
# Property: Function should produce some output
|
|
61
|
+
assert len(output.strip()) > 0
|
|
62
|
+
|
|
63
|
+
# Property: Output should contain error information
|
|
64
|
+
assert error_message in output or str(exception) in output or exception_type.__name__ in output
|
|
65
|
+
|
|
66
|
+
except Exception as e:
|
|
67
|
+
# If an exception is raised, it should only be due to invalid parameters,
|
|
68
|
+
# not due to the exception type itself
|
|
69
|
+
pytest.fail(f"explain_error raised unexpected exception: {e}")
|
|
70
|
+
|
|
71
|
+
def test_non_exception_parameter_rejection(self):
|
|
72
|
+
"""Test that non-Exception parameters are properly rejected."""
|
|
73
|
+
invalid_inputs = [
|
|
74
|
+
"string",
|
|
75
|
+
123,
|
|
76
|
+
[],
|
|
77
|
+
{},
|
|
78
|
+
None,
|
|
79
|
+
object()
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
for invalid_input in invalid_inputs:
|
|
83
|
+
with pytest.raises(TypeError, match="должен быть экземпляром Exception"):
|
|
84
|
+
explain_error(invalid_input)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@pytest.mark.property
|
|
88
|
+
class TestDefaultConsoleOutputBehavior:
|
|
89
|
+
"""Property tests for default console output behavior."""
|
|
90
|
+
|
|
91
|
+
@given(
|
|
92
|
+
exception_type=st.sampled_from(COMMON_EXCEPTION_TYPES),
|
|
93
|
+
error_message=st.text(min_size=1, max_size=100)
|
|
94
|
+
)
|
|
95
|
+
def test_default_console_output_behavior(self, exception_type, error_message):
|
|
96
|
+
"""
|
|
97
|
+
Property 7: Default Console Output Behavior
|
|
98
|
+
For any call to explain_error() without formatting parameters, the function
|
|
99
|
+
should produce formatted text output to the console.
|
|
100
|
+
|
|
101
|
+
Feature: fishertools-refactor, Property 7: Default Console Output Behavior
|
|
102
|
+
Validates: Requirements 3.3
|
|
103
|
+
"""
|
|
104
|
+
# Create exception instance
|
|
105
|
+
exception = exception_type(error_message)
|
|
106
|
+
|
|
107
|
+
# Capture console output
|
|
108
|
+
output_buffer = io.StringIO()
|
|
109
|
+
|
|
110
|
+
# Call explain_error with default parameters (no formatting specified)
|
|
111
|
+
with redirect_stdout(output_buffer):
|
|
112
|
+
explain_error(exception)
|
|
113
|
+
|
|
114
|
+
output = output_buffer.getvalue()
|
|
115
|
+
|
|
116
|
+
# Property: Function should produce console output by default
|
|
117
|
+
assert len(output.strip()) > 0
|
|
118
|
+
|
|
119
|
+
# Property: Output should be formatted text (not JSON or other structured format)
|
|
120
|
+
# Console format should contain section headers and readable text
|
|
121
|
+
assert "===" in output or "Ошибка Python:" in output or "Что это означает" in output
|
|
122
|
+
|
|
123
|
+
# Property: Output should contain the error information
|
|
124
|
+
assert exception_type.__name__ in output
|
|
125
|
+
|
|
126
|
+
# Property: Output should contain Russian text (default language)
|
|
127
|
+
cyrillic_chars = set('абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ')
|
|
128
|
+
assert any(char in cyrillic_chars for char in output)
|
|
129
|
+
|
|
130
|
+
# Property: Output should not be JSON format by default
|
|
131
|
+
assert not output.strip().startswith('{')
|
|
132
|
+
assert not output.strip().endswith('}')
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@pytest.mark.property
|
|
136
|
+
class TestFormattingParameterResponsiveness:
|
|
137
|
+
"""Property tests for formatting parameter responsiveness."""
|
|
138
|
+
|
|
139
|
+
@given(
|
|
140
|
+
exception_type=st.sampled_from(COMMON_EXCEPTION_TYPES),
|
|
141
|
+
error_message=st.text(min_size=1, max_size=100),
|
|
142
|
+
format_type=st.sampled_from(['console', 'plain', 'json']),
|
|
143
|
+
language=st.sampled_from(['ru', 'en'])
|
|
144
|
+
)
|
|
145
|
+
def test_formatting_parameter_responsiveness(self, exception_type, error_message,
|
|
146
|
+
format_type, language):
|
|
147
|
+
"""
|
|
148
|
+
Property 8: Formatting Parameter Responsiveness
|
|
149
|
+
For any valid formatting parameters passed to explain_error(), the output
|
|
150
|
+
format should change accordingly.
|
|
151
|
+
|
|
152
|
+
Feature: fishertools-refactor, Property 8: Formatting Parameter Responsiveness
|
|
153
|
+
Validates: Requirements 3.4
|
|
154
|
+
"""
|
|
155
|
+
# Create exception instance
|
|
156
|
+
exception = exception_type(error_message)
|
|
157
|
+
|
|
158
|
+
# Capture output with specific formatting parameters
|
|
159
|
+
output_buffer = io.StringIO()
|
|
160
|
+
|
|
161
|
+
with redirect_stdout(output_buffer):
|
|
162
|
+
explain_error(exception, language=language, format_type=format_type)
|
|
163
|
+
|
|
164
|
+
output = output_buffer.getvalue()
|
|
165
|
+
|
|
166
|
+
# Property: Function should produce output regardless of format parameters
|
|
167
|
+
assert len(output.strip()) > 0
|
|
168
|
+
|
|
169
|
+
# Property: Output format should match the requested format_type
|
|
170
|
+
if format_type == 'json':
|
|
171
|
+
# JSON format should produce valid JSON structure
|
|
172
|
+
import json
|
|
173
|
+
try:
|
|
174
|
+
# Output should be parseable as JSON
|
|
175
|
+
json_data = json.loads(output)
|
|
176
|
+
assert isinstance(json_data, dict)
|
|
177
|
+
assert 'error_type' in json_data
|
|
178
|
+
assert 'simple_explanation' in json_data
|
|
179
|
+
except json.JSONDecodeError:
|
|
180
|
+
pytest.fail(f"JSON format requested but output is not valid JSON: {output[:100]}...")
|
|
181
|
+
|
|
182
|
+
elif format_type == 'plain':
|
|
183
|
+
# Plain format should be simple text without special formatting
|
|
184
|
+
assert "===" in output or "Ошибка Python:" in output
|
|
185
|
+
# Should not contain ANSI color codes
|
|
186
|
+
assert '\033[' not in output
|
|
187
|
+
|
|
188
|
+
elif format_type == 'console':
|
|
189
|
+
# Console format should contain structured sections
|
|
190
|
+
assert "===" in output or "Ошибка Python:" in output or "Что это означает" in output
|
|
191
|
+
|
|
192
|
+
# Property: Language parameter should affect output language
|
|
193
|
+
if language == 'ru':
|
|
194
|
+
# Russian output should contain Cyrillic characters
|
|
195
|
+
cyrillic_chars = set('абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ')
|
|
196
|
+
assert any(char in cyrillic_chars for char in output)
|
|
197
|
+
|
|
198
|
+
# Property: Error type should always be present in output
|
|
199
|
+
assert exception_type.__name__ in output
|
|
200
|
+
|
|
201
|
+
def test_invalid_formatting_parameters(self):
|
|
202
|
+
"""Test that invalid formatting parameters are properly rejected."""
|
|
203
|
+
exception = ValueError("test error")
|
|
204
|
+
|
|
205
|
+
# Test invalid language
|
|
206
|
+
with pytest.raises(ValueError, match="должен быть одним из"):
|
|
207
|
+
explain_error(exception, language='invalid')
|
|
208
|
+
|
|
209
|
+
# Test invalid format_type
|
|
210
|
+
with pytest.raises(ValueError, match="должен быть одним из"):
|
|
211
|
+
explain_error(exception, format_type='invalid')
|
|
212
|
+
|
|
213
|
+
@given(
|
|
214
|
+
use_colors=st.booleans(),
|
|
215
|
+
show_original_error=st.booleans(),
|
|
216
|
+
show_traceback=st.booleans()
|
|
217
|
+
)
|
|
218
|
+
def test_additional_formatting_kwargs(self, use_colors, show_original_error, show_traceback):
|
|
219
|
+
"""Test that additional formatting kwargs are properly handled."""
|
|
220
|
+
exception = TypeError("test error")
|
|
221
|
+
|
|
222
|
+
output_buffer = io.StringIO()
|
|
223
|
+
|
|
224
|
+
# Should not raise errors with additional kwargs
|
|
225
|
+
with redirect_stdout(output_buffer):
|
|
226
|
+
explain_error(
|
|
227
|
+
exception,
|
|
228
|
+
use_colors=use_colors,
|
|
229
|
+
show_original_error=show_original_error,
|
|
230
|
+
show_traceback=show_traceback
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
output = output_buffer.getvalue()
|
|
234
|
+
|
|
235
|
+
# Property: Function should handle additional kwargs without errors
|
|
236
|
+
assert len(output.strip()) > 0
|
|
237
|
+
|
|
238
|
+
# Property: show_original_error parameter should affect output
|
|
239
|
+
if show_original_error:
|
|
240
|
+
assert "test error" in output or "TypeError" in output
|
|
241
|
+
|
|
242
|
+
# Property: use_colors parameter should affect console output formatting
|
|
243
|
+
if use_colors and sys.stdout.isatty():
|
|
244
|
+
# May contain ANSI codes in terminal environment
|
|
245
|
+
pass # Color testing is environment-dependent
|
|
246
|
+
else:
|
|
247
|
+
# When colors disabled, should not contain ANSI escape codes
|
|
248
|
+
# (Note: this is hard to test reliably across environments)
|
|
249
|
+
pass
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class TestAPIUnitTests:
|
|
253
|
+
"""Unit tests for specific API function behavior."""
|
|
254
|
+
|
|
255
|
+
def test_basic_functionality(self):
|
|
256
|
+
"""Test basic explain_error functionality with known exception."""
|
|
257
|
+
exception = TypeError("'str' object cannot be interpreted as an integer")
|
|
258
|
+
|
|
259
|
+
output_buffer = io.StringIO()
|
|
260
|
+
with redirect_stdout(output_buffer):
|
|
261
|
+
explain_error(exception)
|
|
262
|
+
|
|
263
|
+
output = output_buffer.getvalue()
|
|
264
|
+
|
|
265
|
+
assert len(output) > 0
|
|
266
|
+
assert "TypeError" in output
|
|
267
|
+
assert "str" in output or "integer" in output
|
|
268
|
+
|
|
269
|
+
def test_json_output_format(self):
|
|
270
|
+
"""Test JSON output format produces valid JSON."""
|
|
271
|
+
exception = ValueError("invalid literal for int()")
|
|
272
|
+
|
|
273
|
+
output_buffer = io.StringIO()
|
|
274
|
+
with redirect_stdout(output_buffer):
|
|
275
|
+
explain_error(exception, format_type='json')
|
|
276
|
+
|
|
277
|
+
output = output_buffer.getvalue()
|
|
278
|
+
|
|
279
|
+
# Should be valid JSON
|
|
280
|
+
import json
|
|
281
|
+
data = json.loads(output)
|
|
282
|
+
|
|
283
|
+
assert data['error_type'] == 'ValueError'
|
|
284
|
+
assert 'simple_explanation' in data
|
|
285
|
+
assert 'fix_tip' in data
|
|
286
|
+
assert 'code_example' in data
|
|
287
|
+
|
|
288
|
+
def test_plain_output_format(self):
|
|
289
|
+
"""Test plain output format."""
|
|
290
|
+
exception = IndexError("list index out of range")
|
|
291
|
+
|
|
292
|
+
output_buffer = io.StringIO()
|
|
293
|
+
with redirect_stdout(output_buffer):
|
|
294
|
+
explain_error(exception, format_type='plain')
|
|
295
|
+
|
|
296
|
+
output = output_buffer.getvalue()
|
|
297
|
+
|
|
298
|
+
assert "IndexError" in output
|
|
299
|
+
assert "list index out of range" in output
|
|
300
|
+
# Should not contain ANSI color codes
|
|
301
|
+
assert '\033[' not in output
|
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tests for comprehensive error handling system.
|
|
3
|
+
|
|
4
|
+
This module tests the custom exception classes, graceful degradation,
|
|
5
|
+
and error recovery mechanisms in fishertools.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import pytest
|
|
9
|
+
import io
|
|
10
|
+
from contextlib import redirect_stdout
|
|
11
|
+
|
|
12
|
+
from fishertools.errors.exceptions import (
|
|
13
|
+
FishertoolsError, ExplanationError, FormattingError,
|
|
14
|
+
ConfigurationError, PatternError, SafeUtilityError
|
|
15
|
+
)
|
|
16
|
+
from fishertools.errors.explainer import ErrorExplainer, explain_error
|
|
17
|
+
from fishertools.errors.models import ErrorPattern, ErrorExplanation, ExplainerConfig
|
|
18
|
+
from fishertools.errors.formatters import get_formatter
|
|
19
|
+
from fishertools.safe import safe_get, safe_divide
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class TestCustomExceptions:
|
|
23
|
+
"""Test custom exception classes and their behavior."""
|
|
24
|
+
|
|
25
|
+
def test_fishertools_error_base_class(self):
|
|
26
|
+
"""Test FishertoolsError base class functionality."""
|
|
27
|
+
# Test basic initialization
|
|
28
|
+
error = FishertoolsError("Test error message")
|
|
29
|
+
assert str(error) == "Test error message"
|
|
30
|
+
assert error.message == "Test error message"
|
|
31
|
+
assert error.original_error is None
|
|
32
|
+
|
|
33
|
+
# Test with original error
|
|
34
|
+
original = ValueError("Original error")
|
|
35
|
+
error_with_original = FishertoolsError("Wrapper error", original)
|
|
36
|
+
assert error_with_original.original_error == original
|
|
37
|
+
|
|
38
|
+
# Test full message
|
|
39
|
+
full_message = error_with_original.get_full_message()
|
|
40
|
+
assert "Wrapper error" in full_message
|
|
41
|
+
assert "Original error" in full_message
|
|
42
|
+
assert "Причина:" in full_message
|
|
43
|
+
|
|
44
|
+
def test_explanation_error_specifics(self):
|
|
45
|
+
"""Test ExplanationError specific functionality."""
|
|
46
|
+
error = ExplanationError("Explanation failed", exception_type="ValueError")
|
|
47
|
+
assert error.exception_type == "ValueError"
|
|
48
|
+
|
|
49
|
+
full_message = error.get_full_message()
|
|
50
|
+
assert "Explanation failed" in full_message
|
|
51
|
+
assert "ValueError" in full_message
|
|
52
|
+
assert "Тип исключения:" in full_message
|
|
53
|
+
|
|
54
|
+
def test_formatting_error_specifics(self):
|
|
55
|
+
"""Test FormattingError specific functionality."""
|
|
56
|
+
error = FormattingError("Formatting failed", formatter_type="console")
|
|
57
|
+
assert error.formatter_type == "console"
|
|
58
|
+
|
|
59
|
+
full_message = error.get_full_message()
|
|
60
|
+
assert "Formatting failed" in full_message
|
|
61
|
+
assert "console" in full_message
|
|
62
|
+
assert "Тип форматтера:" in full_message
|
|
63
|
+
|
|
64
|
+
def test_configuration_error_specifics(self):
|
|
65
|
+
"""Test ConfigurationError specific functionality."""
|
|
66
|
+
error = ConfigurationError("Invalid config", config_field="language", config_value="invalid")
|
|
67
|
+
assert error.config_field == "language"
|
|
68
|
+
assert error.config_value == "invalid"
|
|
69
|
+
|
|
70
|
+
full_message = error.get_full_message()
|
|
71
|
+
assert "Invalid config" in full_message
|
|
72
|
+
assert "language" in full_message
|
|
73
|
+
assert "invalid" in full_message
|
|
74
|
+
assert "Поле:" in full_message
|
|
75
|
+
assert "Значение:" in full_message
|
|
76
|
+
|
|
77
|
+
def test_pattern_error_specifics(self):
|
|
78
|
+
"""Test PatternError specific functionality."""
|
|
79
|
+
error = PatternError("Pattern failed", pattern_type="TypeError")
|
|
80
|
+
assert error.pattern_type == "TypeError"
|
|
81
|
+
|
|
82
|
+
full_message = error.get_full_message()
|
|
83
|
+
assert "Pattern failed" in full_message
|
|
84
|
+
assert "TypeError" in full_message
|
|
85
|
+
assert "Тип паттерна:" in full_message
|
|
86
|
+
|
|
87
|
+
def test_safe_utility_error_specifics(self):
|
|
88
|
+
"""Test SafeUtilityError specific functionality."""
|
|
89
|
+
error = SafeUtilityError("Utility failed", utility_name="safe_get")
|
|
90
|
+
assert error.utility_name == "safe_get"
|
|
91
|
+
|
|
92
|
+
full_message = error.get_full_message()
|
|
93
|
+
assert "Utility failed" in full_message
|
|
94
|
+
assert "safe_get" in full_message
|
|
95
|
+
assert "Утилита:" in full_message
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class TestErrorRecovery:
|
|
99
|
+
"""Test error recovery and graceful degradation."""
|
|
100
|
+
|
|
101
|
+
def test_explainer_initialization_recovery(self):
|
|
102
|
+
"""Test ErrorExplainer recovery from initialization errors."""
|
|
103
|
+
# Test with invalid config - should raise ConfigurationError
|
|
104
|
+
with pytest.raises(ConfigurationError):
|
|
105
|
+
invalid_config = ExplainerConfig(language="invalid")
|
|
106
|
+
ErrorExplainer(invalid_config)
|
|
107
|
+
|
|
108
|
+
def test_explanation_fallback_mechanism(self):
|
|
109
|
+
"""Test fallback explanation when pattern matching fails."""
|
|
110
|
+
explainer = ErrorExplainer()
|
|
111
|
+
|
|
112
|
+
# Create a custom exception that won't match any pattern
|
|
113
|
+
class UnknownCustomError(Exception):
|
|
114
|
+
pass
|
|
115
|
+
|
|
116
|
+
unknown_exception = UnknownCustomError("Unknown error")
|
|
117
|
+
explanation = explainer.explain(unknown_exception)
|
|
118
|
+
|
|
119
|
+
# Should create fallback explanation
|
|
120
|
+
assert explanation.error_type == "UnknownCustomError"
|
|
121
|
+
assert "Произошла ошибка типа UnknownCustomError" in explanation.simple_explanation
|
|
122
|
+
assert explanation.fix_tip is not None
|
|
123
|
+
assert explanation.code_example is not None
|
|
124
|
+
|
|
125
|
+
def test_emergency_explanation_creation(self):
|
|
126
|
+
"""Test emergency explanation when all else fails."""
|
|
127
|
+
explainer = ErrorExplainer()
|
|
128
|
+
|
|
129
|
+
# Simulate a scenario where even fallback fails by creating problematic exception
|
|
130
|
+
class ProblematicException(Exception):
|
|
131
|
+
def __str__(self):
|
|
132
|
+
raise RuntimeError("Cannot convert to string")
|
|
133
|
+
|
|
134
|
+
problematic = ProblematicException()
|
|
135
|
+
|
|
136
|
+
# Should still create some explanation without crashing
|
|
137
|
+
explanation = explainer.explain(problematic)
|
|
138
|
+
assert explanation is not None
|
|
139
|
+
assert explanation.simple_explanation is not None
|
|
140
|
+
assert explanation.fix_tip is not None
|
|
141
|
+
|
|
142
|
+
def test_formatter_error_recovery(self):
|
|
143
|
+
"""Test recovery from formatter errors."""
|
|
144
|
+
# Test with invalid formatter type
|
|
145
|
+
with pytest.raises(FormattingError):
|
|
146
|
+
get_formatter("invalid_type")
|
|
147
|
+
|
|
148
|
+
def test_explain_error_graceful_degradation(self):
|
|
149
|
+
"""Test explain_error function graceful degradation."""
|
|
150
|
+
# Test with various problematic scenarios
|
|
151
|
+
test_cases = [
|
|
152
|
+
ValueError("Test error"),
|
|
153
|
+
Exception(""), # Empty message
|
|
154
|
+
RuntimeError("Very long error message " + "x" * 1000),
|
|
155
|
+
]
|
|
156
|
+
|
|
157
|
+
for test_exception in test_cases:
|
|
158
|
+
# Should not raise exceptions, should produce output
|
|
159
|
+
output_buffer = io.StringIO()
|
|
160
|
+
with redirect_stdout(output_buffer):
|
|
161
|
+
explain_error(test_exception)
|
|
162
|
+
|
|
163
|
+
output = output_buffer.getvalue()
|
|
164
|
+
assert len(output) > 0 # Should produce some output
|
|
165
|
+
|
|
166
|
+
def test_explain_error_parameter_validation(self):
|
|
167
|
+
"""Test explain_error parameter validation with custom exceptions."""
|
|
168
|
+
# Test invalid exception parameter
|
|
169
|
+
with pytest.raises(TypeError) as exc_info:
|
|
170
|
+
explain_error("not an exception")
|
|
171
|
+
assert "должен быть экземпляром Exception" in str(exc_info.value)
|
|
172
|
+
|
|
173
|
+
# Test invalid language parameter
|
|
174
|
+
with pytest.raises(ValueError) as exc_info:
|
|
175
|
+
explain_error(ValueError("test"), language="invalid")
|
|
176
|
+
assert "должен быть одним из" in str(exc_info.value)
|
|
177
|
+
|
|
178
|
+
# Test invalid format_type parameter
|
|
179
|
+
with pytest.raises(ValueError) as exc_info:
|
|
180
|
+
explain_error(ValueError("test"), format_type="invalid")
|
|
181
|
+
assert "должен быть одним из" in str(exc_info.value)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class TestSafeUtilityErrorHandling:
|
|
185
|
+
"""Test error handling in safe utility functions."""
|
|
186
|
+
|
|
187
|
+
def test_safe_get_error_handling(self):
|
|
188
|
+
"""Test safe_get error handling with custom exceptions."""
|
|
189
|
+
# Test with None collection
|
|
190
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
191
|
+
safe_get(None, 0)
|
|
192
|
+
assert "не может быть None" in str(exc_info.value)
|
|
193
|
+
assert exc_info.value.utility_name == "safe_get"
|
|
194
|
+
|
|
195
|
+
# Test with invalid collection type
|
|
196
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
197
|
+
safe_get(123, 0) # Number is not a valid collection
|
|
198
|
+
assert "Неподдерживаемый тип коллекции" in str(exc_info.value)
|
|
199
|
+
|
|
200
|
+
# Test with invalid index type for list
|
|
201
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
202
|
+
safe_get([1, 2, 3], "invalid_index")
|
|
203
|
+
assert "индекс должен быть числом" in str(exc_info.value)
|
|
204
|
+
|
|
205
|
+
def test_safe_divide_error_handling(self):
|
|
206
|
+
"""Test safe_divide error handling with custom exceptions."""
|
|
207
|
+
# Test with invalid types
|
|
208
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
209
|
+
safe_divide("not_a_number", 2)
|
|
210
|
+
assert "должно быть числом" in str(exc_info.value)
|
|
211
|
+
assert exc_info.value.utility_name == "safe_divide"
|
|
212
|
+
|
|
213
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
214
|
+
safe_divide(10, "not_a_number")
|
|
215
|
+
assert "должен быть числом" in str(exc_info.value)
|
|
216
|
+
|
|
217
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
218
|
+
safe_divide(10, 2, "not_a_number")
|
|
219
|
+
assert "должно быть числом" in str(exc_info.value)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class TestModelValidationErrors:
|
|
223
|
+
"""Test validation errors in data models."""
|
|
224
|
+
|
|
225
|
+
def test_error_pattern_validation(self):
|
|
226
|
+
"""Test ErrorPattern validation with custom exceptions."""
|
|
227
|
+
# Test with empty explanation
|
|
228
|
+
with pytest.raises(PatternError) as exc_info:
|
|
229
|
+
ErrorPattern(
|
|
230
|
+
error_type=ValueError,
|
|
231
|
+
error_keywords=["test"],
|
|
232
|
+
explanation="", # Empty explanation
|
|
233
|
+
tip="Test tip",
|
|
234
|
+
example="test code",
|
|
235
|
+
common_causes=["test cause"]
|
|
236
|
+
)
|
|
237
|
+
assert "explanation cannot be empty" in str(exc_info.value)
|
|
238
|
+
|
|
239
|
+
# Test with empty tip
|
|
240
|
+
with pytest.raises(PatternError) as exc_info:
|
|
241
|
+
ErrorPattern(
|
|
242
|
+
error_type=ValueError,
|
|
243
|
+
error_keywords=["test"],
|
|
244
|
+
explanation="Test explanation",
|
|
245
|
+
tip="", # Empty tip
|
|
246
|
+
example="test code",
|
|
247
|
+
common_causes=["test cause"]
|
|
248
|
+
)
|
|
249
|
+
assert "tip cannot be empty" in str(exc_info.value)
|
|
250
|
+
|
|
251
|
+
def test_error_explanation_validation(self):
|
|
252
|
+
"""Test ErrorExplanation validation with custom exceptions."""
|
|
253
|
+
# Test with None original_error
|
|
254
|
+
with pytest.raises(ExplanationError) as exc_info:
|
|
255
|
+
ErrorExplanation(
|
|
256
|
+
original_error=None,
|
|
257
|
+
error_type="ValueError",
|
|
258
|
+
simple_explanation="Test explanation",
|
|
259
|
+
fix_tip="Test tip",
|
|
260
|
+
code_example="test code"
|
|
261
|
+
)
|
|
262
|
+
assert "original_error cannot be None" in str(exc_info.value)
|
|
263
|
+
|
|
264
|
+
# Test with empty simple_explanation
|
|
265
|
+
with pytest.raises(ExplanationError) as exc_info:
|
|
266
|
+
ErrorExplanation(
|
|
267
|
+
original_error="Test error",
|
|
268
|
+
error_type="ValueError",
|
|
269
|
+
simple_explanation="", # Empty explanation
|
|
270
|
+
fix_tip="Test tip",
|
|
271
|
+
code_example="test code"
|
|
272
|
+
)
|
|
273
|
+
assert "simple_explanation cannot be empty" in str(exc_info.value)
|
|
274
|
+
|
|
275
|
+
def test_explainer_config_validation(self):
|
|
276
|
+
"""Test ExplainerConfig validation with custom exceptions."""
|
|
277
|
+
# Test with invalid language
|
|
278
|
+
with pytest.raises(ConfigurationError) as exc_info:
|
|
279
|
+
ExplainerConfig(language="invalid")
|
|
280
|
+
assert "language must be" in str(exc_info.value)
|
|
281
|
+
assert exc_info.value.config_field == "language"
|
|
282
|
+
assert exc_info.value.config_value == "invalid"
|
|
283
|
+
|
|
284
|
+
# Test with invalid format_type
|
|
285
|
+
with pytest.raises(ConfigurationError) as exc_info:
|
|
286
|
+
ExplainerConfig(format_type="invalid")
|
|
287
|
+
assert "format_type must be" in str(exc_info.value)
|
|
288
|
+
assert exc_info.value.config_field == "format_type"
|
|
289
|
+
|
|
290
|
+
# Test with invalid max_explanation_length
|
|
291
|
+
with pytest.raises(ConfigurationError) as exc_info:
|
|
292
|
+
ExplainerConfig(max_explanation_length=-1)
|
|
293
|
+
assert "max_explanation_length must be positive" in str(exc_info.value)
|
|
294
|
+
assert exc_info.value.config_field == "max_explanation_length"
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
class TestErrorHandlingIntegration:
|
|
298
|
+
"""Test integration of error handling across the system."""
|
|
299
|
+
|
|
300
|
+
def test_end_to_end_error_handling(self):
|
|
301
|
+
"""Test complete error handling flow from exception to output."""
|
|
302
|
+
# Test with a normal exception
|
|
303
|
+
test_exception = ValueError("Test value error")
|
|
304
|
+
|
|
305
|
+
output_buffer = io.StringIO()
|
|
306
|
+
with redirect_stdout(output_buffer):
|
|
307
|
+
explain_error(test_exception)
|
|
308
|
+
|
|
309
|
+
output = output_buffer.getvalue()
|
|
310
|
+
assert len(output) > 0
|
|
311
|
+
assert "ValueError" in output
|
|
312
|
+
assert "Test value error" in output
|
|
313
|
+
|
|
314
|
+
def test_error_handling_with_different_formatters(self):
|
|
315
|
+
"""Test error handling with different output formatters."""
|
|
316
|
+
test_exception = TypeError("Test type error")
|
|
317
|
+
|
|
318
|
+
# Test with different format types
|
|
319
|
+
for format_type in ['console', 'plain', 'json']:
|
|
320
|
+
output_buffer = io.StringIO()
|
|
321
|
+
with redirect_stdout(output_buffer):
|
|
322
|
+
explain_error(test_exception, format_type=format_type)
|
|
323
|
+
|
|
324
|
+
output = output_buffer.getvalue()
|
|
325
|
+
assert len(output) > 0
|
|
326
|
+
if format_type == 'json':
|
|
327
|
+
# JSON output should be valid JSON structure
|
|
328
|
+
import json
|
|
329
|
+
try:
|
|
330
|
+
json.loads(output)
|
|
331
|
+
except json.JSONDecodeError:
|
|
332
|
+
pytest.fail(f"Invalid JSON output for format_type={format_type}")
|
|
333
|
+
|
|
334
|
+
def test_exception_hierarchy_catching(self):
|
|
335
|
+
"""Test that FishertoolsError can catch all custom exceptions."""
|
|
336
|
+
custom_exceptions = [
|
|
337
|
+
ExplanationError("Test explanation error"),
|
|
338
|
+
FormattingError("Test formatting error"),
|
|
339
|
+
ConfigurationError("Test config error"),
|
|
340
|
+
PatternError("Test pattern error"),
|
|
341
|
+
SafeUtilityError("Test utility error")
|
|
342
|
+
]
|
|
343
|
+
|
|
344
|
+
for exc in custom_exceptions:
|
|
345
|
+
# All should be instances of FishertoolsError
|
|
346
|
+
assert isinstance(exc, FishertoolsError)
|
|
347
|
+
|
|
348
|
+
# Should be catchable with FishertoolsError
|
|
349
|
+
try:
|
|
350
|
+
raise exc
|
|
351
|
+
except FishertoolsError as caught:
|
|
352
|
+
assert caught == exc
|
|
353
|
+
except Exception:
|
|
354
|
+
pytest.fail(f"Exception {type(exc).__name__} not caught by FishertoolsError")
|