fishertools 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fishertools/__init__.py +82 -0
- fishertools/config/__init__.py +24 -0
- fishertools/config/manager.py +247 -0
- fishertools/config/models.py +96 -0
- fishertools/config/parser.py +265 -0
- fishertools/decorators.py +93 -0
- fishertools/documentation/__init__.py +38 -0
- fishertools/documentation/api.py +242 -0
- fishertools/documentation/generator.py +502 -0
- fishertools/documentation/models.py +126 -0
- fishertools/documentation/visual.py +583 -0
- fishertools/errors/__init__.py +29 -0
- fishertools/errors/exceptions.py +191 -0
- fishertools/errors/explainer.py +303 -0
- fishertools/errors/formatters.py +386 -0
- fishertools/errors/models.py +228 -0
- fishertools/errors/patterns.py +119 -0
- fishertools/errors/recovery.py +467 -0
- fishertools/examples/__init__.py +22 -0
- fishertools/examples/models.py +118 -0
- fishertools/examples/repository.py +770 -0
- fishertools/helpers.py +116 -0
- fishertools/integration.py +451 -0
- fishertools/learn/__init__.py +18 -0
- fishertools/learn/examples.py +550 -0
- fishertools/learn/tips.py +281 -0
- fishertools/learning/__init__.py +32 -0
- fishertools/learning/core.py +349 -0
- fishertools/learning/models.py +112 -0
- fishertools/learning/progress.py +314 -0
- fishertools/learning/session.py +500 -0
- fishertools/learning/tutorial.py +626 -0
- fishertools/legacy/__init__.py +76 -0
- fishertools/legacy/deprecated.py +261 -0
- fishertools/legacy/deprecation.py +149 -0
- fishertools/safe/__init__.py +16 -0
- fishertools/safe/collections.py +242 -0
- fishertools/safe/files.py +240 -0
- fishertools/safe/strings.py +15 -0
- fishertools/utils.py +57 -0
- fishertools-0.2.1.dist-info/METADATA +256 -0
- fishertools-0.2.1.dist-info/RECORD +81 -0
- fishertools-0.2.1.dist-info/WHEEL +5 -0
- fishertools-0.2.1.dist-info/licenses/LICENSE +21 -0
- fishertools-0.2.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +6 -0
- tests/conftest.py +25 -0
- tests/test_config/__init__.py +3 -0
- tests/test_config/test_basic_config.py +57 -0
- tests/test_config/test_config_error_handling.py +287 -0
- tests/test_config/test_config_properties.py +435 -0
- tests/test_documentation/__init__.py +3 -0
- tests/test_documentation/test_documentation_properties.py +253 -0
- tests/test_documentation/test_visual_documentation_properties.py +444 -0
- tests/test_errors/__init__.py +3 -0
- tests/test_errors/test_api.py +301 -0
- tests/test_errors/test_error_handling.py +354 -0
- tests/test_errors/test_explainer.py +173 -0
- tests/test_errors/test_formatters.py +338 -0
- tests/test_errors/test_models.py +248 -0
- tests/test_errors/test_patterns.py +270 -0
- tests/test_examples/__init__.py +3 -0
- tests/test_examples/test_example_repository_properties.py +204 -0
- tests/test_examples/test_specific_examples.py +303 -0
- tests/test_integration.py +298 -0
- tests/test_integration_enhancements.py +462 -0
- tests/test_learn/__init__.py +3 -0
- tests/test_learn/test_examples.py +221 -0
- tests/test_learn/test_tips.py +285 -0
- tests/test_learning/__init__.py +3 -0
- tests/test_learning/test_interactive_learning_properties.py +337 -0
- tests/test_learning/test_learning_system_properties.py +194 -0
- tests/test_learning/test_progress_tracking_properties.py +279 -0
- tests/test_legacy/__init__.py +3 -0
- tests/test_legacy/test_backward_compatibility.py +236 -0
- tests/test_legacy/test_deprecation_warnings.py +208 -0
- tests/test_safe/__init__.py +3 -0
- tests/test_safe/test_collections_properties.py +189 -0
- tests/test_safe/test_files.py +104 -0
- tests/test_structure.py +58 -0
- tests/test_structure_enhancements.py +115 -0
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Property-based tests for backward compatibility
|
|
3
|
+
|
|
4
|
+
Feature: fishertools-refactor, Property 1: Backward Compatibility Preservation
|
|
5
|
+
For any retained function from the legacy library, calling it with the same inputs
|
|
6
|
+
should produce the same outputs as in the previous version.
|
|
7
|
+
|
|
8
|
+
**Validates: Requirements 1.4**
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import pytest
|
|
12
|
+
from hypothesis import given, strategies as st, assume
|
|
13
|
+
import json
|
|
14
|
+
import tempfile
|
|
15
|
+
import os
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
# Import both original and legacy versions for comparison
|
|
19
|
+
import fishertools.utils as original_utils
|
|
20
|
+
import fishertools.helpers as original_helpers
|
|
21
|
+
import fishertools.decorators as original_decorators
|
|
22
|
+
import fishertools.legacy as legacy
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class TestBackwardCompatibilityPreservation:
|
|
26
|
+
"""
|
|
27
|
+
Property 1: Backward Compatibility Preservation
|
|
28
|
+
For any retained function from the legacy library, calling it with the same inputs
|
|
29
|
+
should produce the same outputs as in the previous version
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
@given(
|
|
33
|
+
data=st.dictionaries(
|
|
34
|
+
st.text(min_size=1, max_size=10),
|
|
35
|
+
st.one_of(st.text(), st.integers(), st.floats(allow_nan=False))
|
|
36
|
+
),
|
|
37
|
+
indent=st.integers(min_value=0, max_value=8)
|
|
38
|
+
)
|
|
39
|
+
def test_json_operations_compatibility(self, data, indent):
|
|
40
|
+
"""Test that JSON read/write operations maintain identical behavior"""
|
|
41
|
+
assume(len(data) > 0) # Ensure we have some data
|
|
42
|
+
|
|
43
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
|
44
|
+
temp_path = f.name
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
# Test write_json compatibility
|
|
48
|
+
original_utils.write_json(data, temp_path, indent)
|
|
49
|
+
with open(temp_path, 'r', encoding='utf-8') as f:
|
|
50
|
+
original_content = f.read()
|
|
51
|
+
|
|
52
|
+
# Clear file and write with legacy version
|
|
53
|
+
os.remove(temp_path)
|
|
54
|
+
legacy.write_json(data, temp_path, indent)
|
|
55
|
+
with open(temp_path, 'r', encoding='utf-8') as f:
|
|
56
|
+
legacy_content = f.read()
|
|
57
|
+
|
|
58
|
+
assert original_content == legacy_content, "write_json output differs"
|
|
59
|
+
|
|
60
|
+
# Test read_json compatibility
|
|
61
|
+
original_result = original_utils.read_json(temp_path)
|
|
62
|
+
legacy_result = legacy.read_json(temp_path)
|
|
63
|
+
|
|
64
|
+
assert original_result == legacy_result, "read_json output differs"
|
|
65
|
+
|
|
66
|
+
finally:
|
|
67
|
+
if os.path.exists(temp_path):
|
|
68
|
+
os.remove(temp_path)
|
|
69
|
+
|
|
70
|
+
@given(st.text(min_size=1, max_size=100))
|
|
71
|
+
def test_string_operations_compatibility(self, text):
|
|
72
|
+
"""Test that string operations maintain identical behavior"""
|
|
73
|
+
# Test clean_string
|
|
74
|
+
original_cleaned = original_helpers.clean_string(text)
|
|
75
|
+
legacy_cleaned = legacy.clean_string(text)
|
|
76
|
+
assert original_cleaned == legacy_cleaned, "clean_string output differs"
|
|
77
|
+
|
|
78
|
+
# Test validate_email (if text looks like email)
|
|
79
|
+
if '@' in text and '.' in text:
|
|
80
|
+
original_valid = original_helpers.validate_email(text)
|
|
81
|
+
legacy_valid = legacy.validate_email(text)
|
|
82
|
+
assert original_valid == legacy_valid, "validate_email output differs"
|
|
83
|
+
|
|
84
|
+
@given(
|
|
85
|
+
text=st.text(min_size=1, max_size=50),
|
|
86
|
+
algorithm=st.sampled_from(['md5', 'sha1', 'sha256', 'sha512'])
|
|
87
|
+
)
|
|
88
|
+
def test_hash_string_compatibility(self, text, algorithm):
|
|
89
|
+
"""Test that hash_string maintains identical behavior"""
|
|
90
|
+
original_hash = original_helpers.hash_string(text, algorithm)
|
|
91
|
+
legacy_hash = legacy.hash_string(text, algorithm)
|
|
92
|
+
assert original_hash == legacy_hash, "hash_string output differs"
|
|
93
|
+
|
|
94
|
+
@given(
|
|
95
|
+
length=st.integers(min_value=1, max_value=50),
|
|
96
|
+
include_symbols=st.booleans()
|
|
97
|
+
)
|
|
98
|
+
def test_generate_password_compatibility(self, length, include_symbols):
|
|
99
|
+
"""Test that generate_password maintains identical character sets"""
|
|
100
|
+
# We can't test exact output since it's random, but we can test properties
|
|
101
|
+
original_pwd = original_helpers.generate_password(length, include_symbols)
|
|
102
|
+
legacy_pwd = legacy.generate_password(length, include_symbols)
|
|
103
|
+
|
|
104
|
+
# Both should have same length
|
|
105
|
+
assert len(original_pwd) == len(legacy_pwd) == length
|
|
106
|
+
|
|
107
|
+
# Both should use same character sets
|
|
108
|
+
import string
|
|
109
|
+
expected_chars = string.ascii_letters + string.digits
|
|
110
|
+
if include_symbols:
|
|
111
|
+
expected_chars += "!@#$%^&*"
|
|
112
|
+
|
|
113
|
+
for char in original_pwd:
|
|
114
|
+
assert char in expected_chars, f"Original password contains unexpected char: {char}"
|
|
115
|
+
|
|
116
|
+
for char in legacy_pwd:
|
|
117
|
+
assert char in expected_chars, f"Legacy password contains unexpected char: {char}"
|
|
118
|
+
|
|
119
|
+
@given(
|
|
120
|
+
lst=st.lists(st.integers(), min_size=1, max_size=20),
|
|
121
|
+
chunk_size=st.integers(min_value=1, max_value=10)
|
|
122
|
+
)
|
|
123
|
+
def test_chunk_list_compatibility(self, lst, chunk_size):
|
|
124
|
+
"""Test that chunk_list maintains identical behavior"""
|
|
125
|
+
original_chunks = original_helpers.chunk_list(lst, chunk_size)
|
|
126
|
+
legacy_chunks = legacy.chunk_list(lst, chunk_size)
|
|
127
|
+
assert original_chunks == legacy_chunks, "chunk_list output differs"
|
|
128
|
+
|
|
129
|
+
@given(
|
|
130
|
+
dicts=st.lists(
|
|
131
|
+
st.dictionaries(st.text(min_size=1, max_size=5), st.integers()),
|
|
132
|
+
min_size=1, max_size=5
|
|
133
|
+
)
|
|
134
|
+
)
|
|
135
|
+
def test_merge_dicts_compatibility(self, dicts):
|
|
136
|
+
"""Test that merge_dicts maintains identical behavior"""
|
|
137
|
+
original_merged = original_helpers.merge_dicts(*dicts)
|
|
138
|
+
legacy_merged = legacy.merge_dicts(*dicts)
|
|
139
|
+
assert original_merged == legacy_merged, "merge_dicts output differs"
|
|
140
|
+
|
|
141
|
+
@given(
|
|
142
|
+
nested_dict=st.dictionaries(
|
|
143
|
+
st.text(min_size=1, max_size=5),
|
|
144
|
+
st.one_of(
|
|
145
|
+
st.integers(),
|
|
146
|
+
st.dictionaries(st.text(min_size=1, max_size=3), st.integers())
|
|
147
|
+
)
|
|
148
|
+
),
|
|
149
|
+
sep=st.sampled_from(['.', '_', '-'])
|
|
150
|
+
)
|
|
151
|
+
def test_flatten_dict_compatibility(self, nested_dict, sep):
|
|
152
|
+
"""Test that flatten_dict maintains identical behavior"""
|
|
153
|
+
assume(len(nested_dict) > 0)
|
|
154
|
+
|
|
155
|
+
original_flat = original_utils.flatten_dict(nested_dict, sep=sep)
|
|
156
|
+
legacy_flat = legacy.flatten_dict(nested_dict, sep=sep)
|
|
157
|
+
assert original_flat == legacy_flat, "flatten_dict output differs"
|
|
158
|
+
|
|
159
|
+
def test_timestamp_compatibility(self):
|
|
160
|
+
"""Test that timestamp format is identical"""
|
|
161
|
+
# Since timestamp uses current time, we test format compatibility
|
|
162
|
+
import time
|
|
163
|
+
|
|
164
|
+
# Mock time to ensure identical output
|
|
165
|
+
fixed_time = 1640995200.0 # 2022-01-01 00:00:00
|
|
166
|
+
original_time_strftime = time.strftime
|
|
167
|
+
|
|
168
|
+
def mock_strftime(fmt):
|
|
169
|
+
return original_time_strftime(fmt, time.gmtime(fixed_time))
|
|
170
|
+
|
|
171
|
+
time.strftime = mock_strftime
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
# Import fresh to get mocked time
|
|
175
|
+
import importlib
|
|
176
|
+
importlib.reload(original_utils)
|
|
177
|
+
from fishertools.legacy.deprecated import timestamp as legacy_timestamp
|
|
178
|
+
|
|
179
|
+
original_ts = original_utils.timestamp()
|
|
180
|
+
legacy_ts = legacy_timestamp()
|
|
181
|
+
assert original_ts == legacy_ts, "timestamp format differs"
|
|
182
|
+
finally:
|
|
183
|
+
time.strftime = original_time_strftime
|
|
184
|
+
|
|
185
|
+
@given(
|
|
186
|
+
config_data=st.dictionaries(
|
|
187
|
+
st.text(min_size=1, max_size=5),
|
|
188
|
+
st.one_of(
|
|
189
|
+
st.integers(),
|
|
190
|
+
st.text(),
|
|
191
|
+
st.dictionaries(st.text(min_size=1, max_size=3), st.integers())
|
|
192
|
+
)
|
|
193
|
+
),
|
|
194
|
+
key=st.text(min_size=1, max_size=10),
|
|
195
|
+
default_value=st.one_of(st.none(), st.integers(), st.text())
|
|
196
|
+
)
|
|
197
|
+
def test_quick_config_compatibility(self, config_data, key, default_value):
|
|
198
|
+
"""Test that QuickConfig maintains identical behavior"""
|
|
199
|
+
assume(len(config_data) > 0)
|
|
200
|
+
|
|
201
|
+
original_config = original_helpers.QuickConfig(config_data)
|
|
202
|
+
legacy_config = legacy.QuickConfig(config_data)
|
|
203
|
+
|
|
204
|
+
# Test get method
|
|
205
|
+
original_result = original_config.get(key, default_value)
|
|
206
|
+
legacy_result = legacy_config.get(key, default_value)
|
|
207
|
+
assert original_result == legacy_result, "QuickConfig.get output differs"
|
|
208
|
+
|
|
209
|
+
# Test to_dict method
|
|
210
|
+
assert original_config.to_dict() == legacy_config.to_dict(), "QuickConfig.to_dict output differs"
|
|
211
|
+
|
|
212
|
+
def test_simple_logger_compatibility(self):
|
|
213
|
+
"""Test that SimpleLogger maintains identical behavior"""
|
|
214
|
+
import io
|
|
215
|
+
import sys
|
|
216
|
+
from contextlib import redirect_stdout
|
|
217
|
+
|
|
218
|
+
# Capture output from both loggers
|
|
219
|
+
original_logger = original_helpers.SimpleLogger("Test")
|
|
220
|
+
legacy_logger = legacy.SimpleLogger("Test")
|
|
221
|
+
|
|
222
|
+
test_message = "Test message"
|
|
223
|
+
|
|
224
|
+
# Test each log level
|
|
225
|
+
for method_name in ['info', 'warning', 'error', 'debug']:
|
|
226
|
+
original_output = io.StringIO()
|
|
227
|
+
legacy_output = io.StringIO()
|
|
228
|
+
|
|
229
|
+
with redirect_stdout(original_output):
|
|
230
|
+
getattr(original_logger, method_name)(test_message)
|
|
231
|
+
|
|
232
|
+
with redirect_stdout(legacy_output):
|
|
233
|
+
getattr(legacy_logger, method_name)(test_message)
|
|
234
|
+
|
|
235
|
+
assert original_output.getvalue() == legacy_output.getvalue(), \
|
|
236
|
+
f"SimpleLogger.{method_name} output differs"
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Property-based tests for deprecation warnings
|
|
3
|
+
|
|
4
|
+
Feature: fishertools-refactor, Property 2: Deprecation Warning Generation
|
|
5
|
+
For any deprecated function, calling it should generate a deprecation warning
|
|
6
|
+
while still executing the function.
|
|
7
|
+
|
|
8
|
+
**Validates: Requirements 1.5**
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import pytest
|
|
12
|
+
import warnings
|
|
13
|
+
from hypothesis import given, strategies as st
|
|
14
|
+
import tempfile
|
|
15
|
+
import os
|
|
16
|
+
|
|
17
|
+
from fishertools.legacy import (
|
|
18
|
+
unsafe_file_reader,
|
|
19
|
+
risky_divide,
|
|
20
|
+
complex_list_operation,
|
|
21
|
+
show_deprecation_info,
|
|
22
|
+
list_deprecated_functions
|
|
23
|
+
)
|
|
24
|
+
from fishertools.legacy.deprecation import deprecated
|
|
25
|
+
import fishertools.legacy as legacy_module
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class TestDeprecationWarningGeneration:
|
|
29
|
+
"""
|
|
30
|
+
Property 2: Deprecation Warning Generation
|
|
31
|
+
For any deprecated function, calling it should generate a deprecation warning
|
|
32
|
+
while still executing the function
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def test_deprecated_functions_generate_warnings(self):
|
|
36
|
+
"""Test that all deprecated functions generate warnings when called"""
|
|
37
|
+
deprecated_funcs = list_deprecated_functions(legacy_module)
|
|
38
|
+
|
|
39
|
+
# Ensure we have deprecated functions to test
|
|
40
|
+
assert len(deprecated_funcs) > 0, "No deprecated functions found for testing"
|
|
41
|
+
|
|
42
|
+
for func_name in deprecated_funcs:
|
|
43
|
+
func = getattr(legacy_module, func_name)
|
|
44
|
+
|
|
45
|
+
with warnings.catch_warnings(record=True) as w:
|
|
46
|
+
warnings.simplefilter("always")
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
# Call function with appropriate test arguments
|
|
50
|
+
if func_name == 'unsafe_file_reader':
|
|
51
|
+
# Create a temporary file for testing
|
|
52
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
53
|
+
f.write("test content")
|
|
54
|
+
temp_path = f.name
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
result = func(temp_path)
|
|
58
|
+
assert result == "test content", f"{func_name} didn't execute correctly"
|
|
59
|
+
finally:
|
|
60
|
+
os.unlink(temp_path)
|
|
61
|
+
|
|
62
|
+
elif func_name == 'risky_divide':
|
|
63
|
+
result = func(10.0, 2.0)
|
|
64
|
+
assert result == 5.0, f"{func_name} didn't execute correctly"
|
|
65
|
+
|
|
66
|
+
elif func_name == 'complex_list_operation':
|
|
67
|
+
result = func([1, 2, 3, 4, 5])
|
|
68
|
+
assert result == [1, 3, 5], f"{func_name} didn't execute correctly"
|
|
69
|
+
|
|
70
|
+
except Exception as e:
|
|
71
|
+
# Some deprecated functions might fail, but should still warn
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
# Check that deprecation warning was issued
|
|
75
|
+
assert len(w) > 0, f"No warning generated for deprecated function {func_name}"
|
|
76
|
+
|
|
77
|
+
# Check that it's a DeprecationWarning
|
|
78
|
+
deprecation_warnings = [warning for warning in w if issubclass(warning.category, DeprecationWarning)]
|
|
79
|
+
assert len(deprecation_warnings) > 0, f"No DeprecationWarning generated for {func_name}"
|
|
80
|
+
|
|
81
|
+
# Check warning message contains function name
|
|
82
|
+
warning_message = str(deprecation_warnings[0].message)
|
|
83
|
+
assert func_name in warning_message or "устарела" in warning_message, \
|
|
84
|
+
f"Warning message doesn't indicate deprecation: {warning_message}"
|
|
85
|
+
|
|
86
|
+
@given(
|
|
87
|
+
reason=st.text(min_size=10, max_size=100),
|
|
88
|
+
alternative=st.one_of(st.none(), st.text(min_size=5, max_size=50)),
|
|
89
|
+
removal_version=st.one_of(st.none(), st.text(min_size=3, max_size=10))
|
|
90
|
+
)
|
|
91
|
+
def test_deprecated_decorator_generates_warnings(self, reason, alternative, removal_version):
|
|
92
|
+
"""Test that the @deprecated decorator generates appropriate warnings"""
|
|
93
|
+
|
|
94
|
+
# Create a test function with the deprecated decorator
|
|
95
|
+
@deprecated(reason=reason, alternative=alternative, removal_version=removal_version)
|
|
96
|
+
def test_function(x):
|
|
97
|
+
return x * 2
|
|
98
|
+
|
|
99
|
+
with warnings.catch_warnings(record=True) as w:
|
|
100
|
+
warnings.simplefilter("always")
|
|
101
|
+
|
|
102
|
+
# Call the decorated function
|
|
103
|
+
result = test_function(5)
|
|
104
|
+
|
|
105
|
+
# Function should still work correctly
|
|
106
|
+
assert result == 10, "Deprecated function didn't execute correctly"
|
|
107
|
+
|
|
108
|
+
# Should generate exactly one warning
|
|
109
|
+
assert len(w) == 1, f"Expected 1 warning, got {len(w)}"
|
|
110
|
+
|
|
111
|
+
# Should be a DeprecationWarning
|
|
112
|
+
assert issubclass(w[0].category, DeprecationWarning), \
|
|
113
|
+
f"Expected DeprecationWarning, got {w[0].category}"
|
|
114
|
+
|
|
115
|
+
# Warning message should contain key information
|
|
116
|
+
warning_message = str(w[0].message)
|
|
117
|
+
assert "test_function" in warning_message, "Warning should contain function name"
|
|
118
|
+
assert "устарела" in warning_message, "Warning should indicate deprecation in Russian"
|
|
119
|
+
|
|
120
|
+
if alternative:
|
|
121
|
+
assert alternative in warning_message, "Warning should contain alternative suggestion"
|
|
122
|
+
|
|
123
|
+
if removal_version:
|
|
124
|
+
assert removal_version in warning_message, "Warning should contain removal version"
|
|
125
|
+
|
|
126
|
+
def test_deprecation_info_retrieval(self):
|
|
127
|
+
"""Test that deprecation information can be retrieved from decorated functions"""
|
|
128
|
+
deprecated_funcs = list_deprecated_functions(legacy_module)
|
|
129
|
+
|
|
130
|
+
for func_name in deprecated_funcs:
|
|
131
|
+
func = getattr(legacy_module, func_name)
|
|
132
|
+
info = show_deprecation_info(func)
|
|
133
|
+
|
|
134
|
+
# Should return non-empty info for deprecated functions
|
|
135
|
+
assert isinstance(info, dict), f"Deprecation info should be a dict for {func_name}"
|
|
136
|
+
assert len(info) > 0, f"Deprecation info should not be empty for {func_name}"
|
|
137
|
+
|
|
138
|
+
# Should contain expected keys
|
|
139
|
+
expected_keys = ['reason', 'alternative', 'removal_version']
|
|
140
|
+
for key in expected_keys:
|
|
141
|
+
assert key in info, f"Missing key '{key}' in deprecation info for {func_name}"
|
|
142
|
+
|
|
143
|
+
def test_non_deprecated_functions_no_warnings(self):
|
|
144
|
+
"""Test that non-deprecated functions don't generate warnings"""
|
|
145
|
+
# Test some retained functions that should not be deprecated
|
|
146
|
+
from fishertools.legacy import read_json, write_json, clean_string
|
|
147
|
+
|
|
148
|
+
non_deprecated_funcs = [read_json, write_json, clean_string]
|
|
149
|
+
|
|
150
|
+
for func in non_deprecated_funcs:
|
|
151
|
+
info = show_deprecation_info(func)
|
|
152
|
+
assert len(info) == 0, f"Non-deprecated function {func.__name__} has deprecation info"
|
|
153
|
+
|
|
154
|
+
@given(
|
|
155
|
+
a=st.floats(min_value=-1000, max_value=1000, allow_nan=False, allow_infinity=False),
|
|
156
|
+
b=st.floats(min_value=0.1, max_value=1000, allow_nan=False, allow_infinity=False) # Avoid division by zero
|
|
157
|
+
)
|
|
158
|
+
def test_risky_divide_warning_and_execution(self, a, b):
|
|
159
|
+
"""Test that risky_divide generates warning but still performs division"""
|
|
160
|
+
with warnings.catch_warnings(record=True) as w:
|
|
161
|
+
warnings.simplefilter("always")
|
|
162
|
+
|
|
163
|
+
result = risky_divide(a, b)
|
|
164
|
+
|
|
165
|
+
# Should generate deprecation warning
|
|
166
|
+
assert len(w) > 0, "risky_divide should generate a warning"
|
|
167
|
+
assert any(issubclass(warning.category, DeprecationWarning) for warning in w), \
|
|
168
|
+
"Should generate DeprecationWarning"
|
|
169
|
+
|
|
170
|
+
# Should still perform the calculation correctly
|
|
171
|
+
expected = a / b
|
|
172
|
+
assert abs(result - expected) < 1e-10, f"Expected {expected}, got {result}"
|
|
173
|
+
|
|
174
|
+
@given(
|
|
175
|
+
lst=st.lists(st.one_of(st.integers(), st.none()), min_size=0, max_size=20)
|
|
176
|
+
)
|
|
177
|
+
def test_complex_list_operation_warning_and_execution(self, lst):
|
|
178
|
+
"""Test that complex_list_operation generates warning but still works"""
|
|
179
|
+
with warnings.catch_warnings(record=True) as w:
|
|
180
|
+
warnings.simplefilter("always")
|
|
181
|
+
|
|
182
|
+
result = complex_list_operation(lst)
|
|
183
|
+
|
|
184
|
+
# Should generate deprecation warning
|
|
185
|
+
assert len(w) > 0, "complex_list_operation should generate a warning"
|
|
186
|
+
assert any(issubclass(warning.category, DeprecationWarning) for warning in w), \
|
|
187
|
+
"Should generate DeprecationWarning"
|
|
188
|
+
|
|
189
|
+
# Should still perform the operation correctly
|
|
190
|
+
expected = [x for i, x in enumerate(lst) if i % 2 == 0 and x is not None]
|
|
191
|
+
assert result == expected, f"Expected {expected}, got {result}"
|
|
192
|
+
|
|
193
|
+
def test_warning_message_contains_migration_guidance(self):
|
|
194
|
+
"""Test that deprecation warnings contain helpful migration guidance"""
|
|
195
|
+
with warnings.catch_warnings(record=True) as w:
|
|
196
|
+
warnings.simplefilter("always")
|
|
197
|
+
|
|
198
|
+
# Call a deprecated function
|
|
199
|
+
risky_divide(10, 2)
|
|
200
|
+
|
|
201
|
+
# Check warning message content
|
|
202
|
+
assert len(w) > 0, "Should generate warning"
|
|
203
|
+
warning_message = str(w[0].message)
|
|
204
|
+
|
|
205
|
+
# Should contain migration guidance
|
|
206
|
+
assert "safe_divide" in warning_message, "Should suggest safe alternative"
|
|
207
|
+
assert "1.0.0" in warning_message, "Should mention removal version"
|
|
208
|
+
assert "migration" in warning_message.lower(), "Should mention migration guide"
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Property-based tests for safe collection operations.
|
|
3
|
+
|
|
4
|
+
These tests validate Property 9: Safe Utility Error Prevention
|
|
5
|
+
Requirements: 4.1, 4.2
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import pytest
|
|
9
|
+
from hypothesis import given, strategies as st, assume
|
|
10
|
+
from fishertools.safe.collections import safe_get, safe_divide, safe_max, safe_min, safe_sum
|
|
11
|
+
from fishertools.errors.exceptions import SafeUtilityError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TestSafeUtilitiesProperties:
|
|
15
|
+
"""Property-based tests for safe utilities."""
|
|
16
|
+
|
|
17
|
+
@given(
|
|
18
|
+
collection=st.one_of(
|
|
19
|
+
st.lists(st.integers()),
|
|
20
|
+
st.tuples(st.integers()),
|
|
21
|
+
st.dictionaries(st.text(), st.integers()),
|
|
22
|
+
st.text()
|
|
23
|
+
),
|
|
24
|
+
index=st.one_of(st.integers(), st.text()),
|
|
25
|
+
default=st.integers()
|
|
26
|
+
)
|
|
27
|
+
def test_safe_get_never_raises_index_key_error(self, collection, index, default):
|
|
28
|
+
"""
|
|
29
|
+
**Property 9: Safe Utility Error Prevention**
|
|
30
|
+
**Validates: Requirements 4.1, 4.2**
|
|
31
|
+
|
|
32
|
+
For any collection and index, safe_get should never raise IndexError or KeyError,
|
|
33
|
+
instead returning the default value or raising a helpful SafeUtilityError for invalid types.
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
result = safe_get(collection, index, default)
|
|
37
|
+
# If no exception was raised, the result should be either:
|
|
38
|
+
# 1. The actual value from the collection, or
|
|
39
|
+
# 2. The default value
|
|
40
|
+
assert result is not None or default is None
|
|
41
|
+
except SafeUtilityError as e:
|
|
42
|
+
# These are acceptable - they should contain helpful Russian messages
|
|
43
|
+
error_message = str(e)
|
|
44
|
+
assert len(error_message) > 0
|
|
45
|
+
# Should not be the original Python error messages
|
|
46
|
+
assert "list index out of range" not in error_message
|
|
47
|
+
assert "key error" not in error_message.lower()
|
|
48
|
+
except (IndexError, KeyError):
|
|
49
|
+
# These should never be raised
|
|
50
|
+
pytest.fail("safe_get raised IndexError or KeyError - should return default instead")
|
|
51
|
+
|
|
52
|
+
@given(
|
|
53
|
+
a=st.one_of(st.integers(), st.floats(allow_nan=False, allow_infinity=False, min_value=-1e10, max_value=1e10)),
|
|
54
|
+
b=st.one_of(st.integers(), st.floats(allow_nan=False, allow_infinity=False, min_value=-1e10, max_value=1e10)),
|
|
55
|
+
default=st.one_of(st.integers(), st.floats(allow_nan=False, allow_infinity=False))
|
|
56
|
+
)
|
|
57
|
+
def test_safe_divide_never_raises_zero_division_error(self, a, b, default):
|
|
58
|
+
"""
|
|
59
|
+
**Property 9: Safe Utility Error Prevention**
|
|
60
|
+
**Validates: Requirements 4.1, 4.2**
|
|
61
|
+
|
|
62
|
+
For any numbers a and b, safe_divide should never raise ZeroDivisionError,
|
|
63
|
+
instead returning the default value when b is zero.
|
|
64
|
+
"""
|
|
65
|
+
try:
|
|
66
|
+
result = safe_divide(a, b, default)
|
|
67
|
+
if b == 0:
|
|
68
|
+
# When dividing by zero, should return default
|
|
69
|
+
assert result == default
|
|
70
|
+
else:
|
|
71
|
+
# When not dividing by zero, should return actual division
|
|
72
|
+
expected = a / b
|
|
73
|
+
# Handle cases where division might result in very large numbers
|
|
74
|
+
if abs(expected) > 1e15:
|
|
75
|
+
# For very large results, just check that we got a finite number
|
|
76
|
+
assert not (result != result) # Check for NaN
|
|
77
|
+
assert abs(result) < float('inf') # Check for infinity
|
|
78
|
+
else:
|
|
79
|
+
# For normal results, check precision
|
|
80
|
+
assert abs(result - expected) < 1e-10
|
|
81
|
+
except (TypeError, ValueError) as e:
|
|
82
|
+
# These are acceptable for invalid input types
|
|
83
|
+
error_message = str(e)
|
|
84
|
+
assert len(error_message) > 0
|
|
85
|
+
# Should contain helpful Russian messages
|
|
86
|
+
assert any(word in error_message for word in ["должно", "должен", "получен"])
|
|
87
|
+
except ZeroDivisionError:
|
|
88
|
+
# This should never be raised
|
|
89
|
+
pytest.fail("safe_divide raised ZeroDivisionError - should return default instead")
|
|
90
|
+
|
|
91
|
+
@given(
|
|
92
|
+
collection=st.lists(st.integers()),
|
|
93
|
+
default=st.integers()
|
|
94
|
+
)
|
|
95
|
+
def test_safe_max_never_raises_value_error(self, collection, default):
|
|
96
|
+
"""
|
|
97
|
+
**Property 9: Safe Utility Error Prevention**
|
|
98
|
+
**Validates: Requirements 4.1, 4.2**
|
|
99
|
+
|
|
100
|
+
For any collection, safe_max should never raise ValueError for empty sequences,
|
|
101
|
+
instead returning the default value.
|
|
102
|
+
"""
|
|
103
|
+
try:
|
|
104
|
+
result = safe_max(collection, default)
|
|
105
|
+
if len(collection) == 0:
|
|
106
|
+
assert result == default
|
|
107
|
+
else:
|
|
108
|
+
assert result == max(collection)
|
|
109
|
+
except (TypeError, ValueError) as e:
|
|
110
|
+
# Only acceptable for invalid input types, not empty collections
|
|
111
|
+
error_message = str(e)
|
|
112
|
+
assert "empty sequence" not in error_message.lower()
|
|
113
|
+
assert len(error_message) > 0
|
|
114
|
+
|
|
115
|
+
@given(
|
|
116
|
+
collection=st.lists(st.integers()),
|
|
117
|
+
default=st.integers()
|
|
118
|
+
)
|
|
119
|
+
def test_safe_min_never_raises_value_error(self, collection, default):
|
|
120
|
+
"""
|
|
121
|
+
**Property 9: Safe Utility Error Prevention**
|
|
122
|
+
**Validates: Requirements 4.1, 4.2**
|
|
123
|
+
|
|
124
|
+
For any collection, safe_min should never raise ValueError for empty sequences,
|
|
125
|
+
instead returning the default value.
|
|
126
|
+
"""
|
|
127
|
+
try:
|
|
128
|
+
result = safe_min(collection, default)
|
|
129
|
+
if len(collection) == 0:
|
|
130
|
+
assert result == default
|
|
131
|
+
else:
|
|
132
|
+
assert result == min(collection)
|
|
133
|
+
except (TypeError, ValueError) as e:
|
|
134
|
+
# Only acceptable for invalid input types, not empty collections
|
|
135
|
+
error_message = str(e)
|
|
136
|
+
assert "empty sequence" not in error_message.lower()
|
|
137
|
+
assert len(error_message) > 0
|
|
138
|
+
|
|
139
|
+
@given(
|
|
140
|
+
collection=st.lists(st.integers()),
|
|
141
|
+
default=st.integers()
|
|
142
|
+
)
|
|
143
|
+
def test_safe_sum_never_raises_type_error_for_empty(self, collection, default):
|
|
144
|
+
"""
|
|
145
|
+
**Property 9: Safe Utility Error Prevention**
|
|
146
|
+
**Validates: Requirements 4.1, 4.2**
|
|
147
|
+
|
|
148
|
+
For any collection, safe_sum should handle empty collections gracefully
|
|
149
|
+
and provide helpful error messages for type mismatches.
|
|
150
|
+
"""
|
|
151
|
+
try:
|
|
152
|
+
result = safe_sum(collection, default)
|
|
153
|
+
if len(collection) == 0:
|
|
154
|
+
assert result == default
|
|
155
|
+
else:
|
|
156
|
+
assert result == sum(collection)
|
|
157
|
+
except SafeUtilityError as e:
|
|
158
|
+
# Should provide helpful Russian error messages
|
|
159
|
+
error_message = str(e)
|
|
160
|
+
assert len(error_message) > 0
|
|
161
|
+
# Should not be the original Python error messages
|
|
162
|
+
assert "unsupported operand" not in error_message.lower()
|
|
163
|
+
|
|
164
|
+
@given(invalid_input=st.one_of(st.none(), st.booleans(), st.complex_numbers()))
|
|
165
|
+
def test_safe_utilities_provide_helpful_error_messages(self, invalid_input):
|
|
166
|
+
"""
|
|
167
|
+
**Property 9: Safe Utility Error Prevention**
|
|
168
|
+
**Validates: Requirements 4.1, 4.2**
|
|
169
|
+
|
|
170
|
+
For any invalid input types, safe utilities should provide helpful error messages
|
|
171
|
+
in Russian rather than cryptic Python exceptions.
|
|
172
|
+
"""
|
|
173
|
+
# Test safe_get with invalid collection
|
|
174
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
175
|
+
safe_get(invalid_input, 0)
|
|
176
|
+
|
|
177
|
+
error_message = str(exc_info.value)
|
|
178
|
+
assert len(error_message) > 0
|
|
179
|
+
# Should contain Russian words indicating helpful explanation
|
|
180
|
+
assert any(word in error_message for word in ["не может", "должна", "должен", "Неподдерживаемый"])
|
|
181
|
+
|
|
182
|
+
# Test safe_divide with invalid numbers
|
|
183
|
+
if not isinstance(invalid_input, (int, float)):
|
|
184
|
+
with pytest.raises(SafeUtilityError) as exc_info:
|
|
185
|
+
safe_divide(invalid_input, 1)
|
|
186
|
+
|
|
187
|
+
error_message = str(exc_info.value)
|
|
188
|
+
assert len(error_message) > 0
|
|
189
|
+
assert any(word in error_message for word in ["должно", "должен", "получен"])
|