mapFolding 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +38 -0
- mapFolding/basecamp.py +55 -0
- mapFolding/beDRY.py +364 -0
- mapFolding/oeis.py +329 -0
- mapFolding/someAssemblyRequired/makeJob.py +62 -0
- mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
- someAssemblyRequired/synthesizeModuleJob.py → mapFolding/someAssemblyRequired/synthesizeModuleJobNumba.py +81 -27
- mapFolding/someAssemblyRequired/synthesizeModulesNumba.py +506 -0
- mapFolding/syntheticModules/__init__.py +3 -0
- syntheticModules/Initialize.py → mapFolding/syntheticModules/numba_countInitialize.py +5 -4
- syntheticModules/Parallel.py → mapFolding/syntheticModules/numba_countParallel.py +10 -5
- syntheticModules/Sequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -4
- mapFolding/syntheticModules/numba_doTheNeedful.py +33 -0
- mapFolding/theDao.py +214 -0
- mapFolding/theSSOT.py +269 -0
- mapFolding-0.3.9.dist-info/LICENSE +407 -0
- {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/METADATA +9 -5
- mapFolding-0.3.9.dist-info/RECORD +40 -0
- mapFolding-0.3.9.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/conftest.py +224 -0
- tests/conftest_tmpRegistry.py +62 -0
- tests/conftest_uniformTests.py +53 -0
- tests/test_oeis.py +200 -0
- tests/test_other.py +258 -0
- tests/test_tasks.py +44 -0
- tests/test_types.py +5 -0
- benchmarks/benchmarking.py +0 -67
- citations/updateCitation.py +0 -238
- mapFolding-0.3.7.dist-info/RECORD +0 -25
- mapFolding-0.3.7.dist-info/top_level.txt +0 -5
- someAssemblyRequired/makeJob.py +0 -34
- someAssemblyRequired/synthesizeModules.py +0 -216
- syntheticModules/__init__.py +0 -4
- {reference → mapFolding/reference}/flattened.py +0 -0
- {reference → mapFolding/reference}/hunterNumba.py +0 -0
- {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
- {reference → mapFolding/reference}/jax.py +0 -0
- {reference → mapFolding/reference}/lunnan.py +0 -0
- {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
- {reference → mapFolding/reference}/lunnanWhile.py +0 -0
- {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
- {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/__init__.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
- {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/WHEEL +0 -0
- {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/entry_points.txt +0 -0
tests/conftest.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""SSOT for Pytest"""
|
|
2
|
+
|
|
3
|
+
# TODO learn how to run tests and coverage analysis without `env = ["NUMBA_DISABLE_JIT=1"]`
|
|
4
|
+
|
|
5
|
+
from tests.conftest_tmpRegistry import (
|
|
6
|
+
pathCacheTesting,
|
|
7
|
+
pathDataSamples,
|
|
8
|
+
pathFilenameFoldsTotalTesting,
|
|
9
|
+
pathTempTesting,
|
|
10
|
+
setupTeardownTestData,
|
|
11
|
+
)
|
|
12
|
+
from tests.conftest_uniformTests import (
|
|
13
|
+
uniformTestMessage,
|
|
14
|
+
standardizedEqualTo,
|
|
15
|
+
standardizedSystemExit,
|
|
16
|
+
)
|
|
17
|
+
from mapFolding import *
|
|
18
|
+
from mapFolding import basecamp
|
|
19
|
+
from mapFolding import getAlgorithmCallable, getDispatcherCallable
|
|
20
|
+
from mapFolding.beDRY import *
|
|
21
|
+
from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidValues
|
|
22
|
+
from mapFolding.oeis import *
|
|
23
|
+
from Z0Z_tools.pytestForYourUse import PytestFor_defineConcurrencyLimit, PytestFor_intInnit, PytestFor_oopsieKwargsie
|
|
24
|
+
from typing import Any, Callable, ContextManager, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type, Union
|
|
25
|
+
import pathlib
|
|
26
|
+
import pytest
|
|
27
|
+
import random
|
|
28
|
+
import unittest.mock
|
|
29
|
+
|
|
30
|
+
def makeDictionaryFoldsTotalKnown() -> Dict[Tuple[int,...], int]:
|
|
31
|
+
"""Returns a dictionary mapping dimension tuples to their known folding totals."""
|
|
32
|
+
dictionaryMapDimensionsToFoldsTotalKnown: Dict[Tuple[int, ...], int] = {}
|
|
33
|
+
|
|
34
|
+
for settings in settingsOEIS.values():
|
|
35
|
+
sequence = settings['valuesKnown']
|
|
36
|
+
|
|
37
|
+
for n, foldingsTotal in sequence.items():
|
|
38
|
+
dimensions = settings['getMapShape'](n)
|
|
39
|
+
dimensions.sort()
|
|
40
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(dimensions)] = foldingsTotal
|
|
41
|
+
|
|
42
|
+
# Are we in a place that has jobs?
|
|
43
|
+
pathJobDEFAULT = getPathJobDEFAULT()
|
|
44
|
+
if pathJobDEFAULT.exists():
|
|
45
|
+
# Are there foldsTotal files?
|
|
46
|
+
for pathFilenameFoldsTotal in pathJobDEFAULT.rglob('*.foldsTotal'):
|
|
47
|
+
if pathFilenameFoldsTotal.is_file():
|
|
48
|
+
try:
|
|
49
|
+
listDimensions = eval(pathFilenameFoldsTotal.stem)
|
|
50
|
+
except Exception:
|
|
51
|
+
continue
|
|
52
|
+
# Are the dimensions in the dictionary?
|
|
53
|
+
if isinstance(listDimensions, list) and all(isinstance(dimension, int) for dimension in listDimensions):
|
|
54
|
+
listDimensions.sort()
|
|
55
|
+
if tuple(listDimensions) in dictionaryMapDimensionsToFoldsTotalKnown:
|
|
56
|
+
continue
|
|
57
|
+
# Are the contents a reasonably large integer?
|
|
58
|
+
try:
|
|
59
|
+
foldsTotal = pathFilenameFoldsTotal.read_text()
|
|
60
|
+
except Exception:
|
|
61
|
+
continue
|
|
62
|
+
# Why did I sincerely believe this would only be three lines of code?
|
|
63
|
+
if foldsTotal.isdigit():
|
|
64
|
+
foldsTotalInteger = int(foldsTotal)
|
|
65
|
+
if foldsTotalInteger > 85109616 * 10**3:
|
|
66
|
+
# You made it this far, so fuck it: put it in the dictionary
|
|
67
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
|
|
68
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
|
|
69
|
+
# The sunk-costs fallacy claims another victim!
|
|
70
|
+
|
|
71
|
+
return dictionaryMapDimensionsToFoldsTotalKnown
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
Section: Fixtures"""
|
|
75
|
+
|
|
76
|
+
@pytest.fixture(autouse=True)
|
|
77
|
+
def setupWarningsAsErrors() -> Generator[None, Any, None]:
|
|
78
|
+
"""Convert all warnings to errors for all tests."""
|
|
79
|
+
import warnings
|
|
80
|
+
warnings.filterwarnings("error")
|
|
81
|
+
yield
|
|
82
|
+
warnings.resetwarnings()
|
|
83
|
+
|
|
84
|
+
@pytest.fixture
|
|
85
|
+
def foldsTotalKnown() -> Dict[Tuple[int,...], int]:
|
|
86
|
+
"""Returns a dictionary mapping dimension tuples to their known folding totals.
|
|
87
|
+
NOTE I am not convinced this is the best way to do this.
|
|
88
|
+
Advantage: I call `makeDictionaryFoldsTotalKnown()` from modules other than test modules.
|
|
89
|
+
Preference: I _think_ I would prefer a SSOT function available to any module
|
|
90
|
+
similar to `foldsTotalKnown = getFoldsTotalKnown(listDimensions)`."""
|
|
91
|
+
return makeDictionaryFoldsTotalKnown()
|
|
92
|
+
|
|
93
|
+
@pytest.fixture
|
|
94
|
+
def listDimensionsTestCountFolds(oeisID: str) -> List[int]:
|
|
95
|
+
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestValidation`
|
|
96
|
+
if `validateListDimensions` approves. Each `listDimensions` is suitable for testing counts."""
|
|
97
|
+
while True:
|
|
98
|
+
n = random.choice(settingsOEIS[oeisID]['valuesTestValidation'])
|
|
99
|
+
if n < 2:
|
|
100
|
+
continue
|
|
101
|
+
listDimensionsCandidate = settingsOEIS[oeisID]['getMapShape'](n)
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
return validateListDimensions(listDimensionsCandidate)
|
|
105
|
+
except (ValueError, NotImplementedError):
|
|
106
|
+
pass
|
|
107
|
+
|
|
108
|
+
@pytest.fixture
|
|
109
|
+
def listDimensionsTestFunctionality(oeisID_1random: str) -> List[int]:
|
|
110
|
+
"""To test functionality, get one `listDimensions` from `valuesTestValidation` if
|
|
111
|
+
`validateListDimensions` approves. The algorithm can count the folds of the returned
|
|
112
|
+
`listDimensions` in a short enough time suitable for testing."""
|
|
113
|
+
while True:
|
|
114
|
+
n = random.choice(settingsOEIS[oeisID_1random]['valuesTestValidation'])
|
|
115
|
+
if n < 2:
|
|
116
|
+
continue
|
|
117
|
+
listDimensionsCandidate = settingsOEIS[oeisID_1random]['getMapShape'](n)
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
return validateListDimensions(listDimensionsCandidate)
|
|
121
|
+
except (ValueError, NotImplementedError):
|
|
122
|
+
pass
|
|
123
|
+
|
|
124
|
+
@pytest.fixture
|
|
125
|
+
def listDimensionsTestParallelization(oeisID: str) -> List[int]:
|
|
126
|
+
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestParallelization`"""
|
|
127
|
+
n = random.choice(settingsOEIS[oeisID]['valuesTestParallelization'])
|
|
128
|
+
return settingsOEIS[oeisID]['getMapShape'](n)
|
|
129
|
+
|
|
130
|
+
@pytest.fixture
|
|
131
|
+
def mockBenchmarkTimer() -> Generator[unittest.mock.MagicMock | unittest.mock.AsyncMock, Any, None]:
|
|
132
|
+
"""Mock time.perf_counter_ns for consistent benchmark timing."""
|
|
133
|
+
with unittest.mock.patch('time.perf_counter_ns') as mockTimer:
|
|
134
|
+
mockTimer.side_effect = [0, 1e9] # Start and end times for 1 second
|
|
135
|
+
yield mockTimer
|
|
136
|
+
|
|
137
|
+
@pytest.fixture
|
|
138
|
+
def mockFoldingFunction() -> Callable[..., Callable[..., None]]:
|
|
139
|
+
"""Creates a mock function that simulates _countFolds behavior."""
|
|
140
|
+
def make_mock(foldsValue: int, listDimensions: List[int]) -> Callable[..., None]:
|
|
141
|
+
mock_array = makeDataContainer(2)
|
|
142
|
+
mock_array[0] = foldsValue
|
|
143
|
+
mock_array[-1] = getLeavesTotal(listDimensions)
|
|
144
|
+
|
|
145
|
+
def mock_countFolds(**keywordArguments: Any) -> None:
|
|
146
|
+
keywordArguments['foldGroups'][:] = mock_array
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
return mock_countFolds
|
|
150
|
+
return make_mock
|
|
151
|
+
|
|
152
|
+
@pytest.fixture
|
|
153
|
+
def mockDispatcher() -> Callable[[Any], ContextManager[Any]]:
|
|
154
|
+
"""Context manager for mocking dispatcher callable."""
|
|
155
|
+
def wrapper(mockFunction: Any) -> ContextManager[Any]:
|
|
156
|
+
dispatcherCallable = getDispatcherCallable()
|
|
157
|
+
return unittest.mock.patch(
|
|
158
|
+
f"{dispatcherCallable.__module__}.{dispatcherCallable.__name__}",
|
|
159
|
+
side_effect=mockFunction
|
|
160
|
+
)
|
|
161
|
+
return wrapper
|
|
162
|
+
|
|
163
|
+
@pytest.fixture(params=oeisIDsImplemented)
|
|
164
|
+
def oeisID(request: pytest.FixtureRequest) -> Any:
|
|
165
|
+
return request.param
|
|
166
|
+
|
|
167
|
+
@pytest.fixture
|
|
168
|
+
def oeisID_1random() -> str:
|
|
169
|
+
"""Return one random valid OEIS ID."""
|
|
170
|
+
return random.choice(oeisIDsImplemented)
|
|
171
|
+
|
|
172
|
+
@pytest.fixture
|
|
173
|
+
def useAlgorithmDirectly() -> Generator[None, Any, None]:
|
|
174
|
+
"""Temporarily patches getDispatcherCallable to return the algorithm source directly."""
|
|
175
|
+
original_dispatcher = basecamp.getDispatcherCallable
|
|
176
|
+
|
|
177
|
+
# Patch the function at module level
|
|
178
|
+
basecamp.getDispatcherCallable = getAlgorithmCallable
|
|
179
|
+
|
|
180
|
+
yield
|
|
181
|
+
|
|
182
|
+
# Restore original function
|
|
183
|
+
basecamp.getDispatcherCallable = original_dispatcher
|
|
184
|
+
|
|
185
|
+
"""
|
|
186
|
+
Section: Prototype test structures before moving to uniformTests.py"""
|
|
187
|
+
|
|
188
|
+
def prototypeCacheTest(
|
|
189
|
+
expected: Any,
|
|
190
|
+
setupCacheFile: Optional[Callable[[pathlib.Path, str], None]],
|
|
191
|
+
oeisID: str,
|
|
192
|
+
pathCache: pathlib.Path
|
|
193
|
+
) -> None:
|
|
194
|
+
"""Template for tests involving OEIS cache operations.
|
|
195
|
+
|
|
196
|
+
Parameters
|
|
197
|
+
expected: Expected value or exception from _getOEISidValues
|
|
198
|
+
setupCacheFile: Function to prepare the cache file before test
|
|
199
|
+
oeisID: OEIS ID to test
|
|
200
|
+
pathCache: Temporary cache directory path
|
|
201
|
+
"""
|
|
202
|
+
pathFilenameCache = pathCache / _getFilenameOEISbFile(oeisID)
|
|
203
|
+
|
|
204
|
+
# Setup cache file if provided
|
|
205
|
+
if setupCacheFile:
|
|
206
|
+
setupCacheFile(pathFilenameCache, oeisID)
|
|
207
|
+
|
|
208
|
+
# Run test
|
|
209
|
+
try:
|
|
210
|
+
actual: Any = _getOEISidValues(oeisID)
|
|
211
|
+
messageActual = actual
|
|
212
|
+
except Exception as actualError:
|
|
213
|
+
actual = type(actualError)
|
|
214
|
+
messageActual = type(actualError).__name__
|
|
215
|
+
|
|
216
|
+
# Compare results
|
|
217
|
+
if isinstance(expected, type) and issubclass(expected, Exception):
|
|
218
|
+
messageExpected = expected.__name__
|
|
219
|
+
assert isinstance(actual, expected), uniformTestMessage(
|
|
220
|
+
messageExpected, messageActual, "_getOEISidValues", oeisID)
|
|
221
|
+
else:
|
|
222
|
+
messageExpected = expected
|
|
223
|
+
assert actual == expected, uniformTestMessage(
|
|
224
|
+
messageExpected, messageActual, "_getOEISidValues", oeisID)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from typing import Any, Generator, Set
|
|
2
|
+
import pathlib
|
|
3
|
+
import pytest
|
|
4
|
+
import shutil
|
|
5
|
+
import uuid
|
|
6
|
+
|
|
7
|
+
# SSOT for test data paths
|
|
8
|
+
pathDataSamples = pathlib.Path("tests/dataSamples")
|
|
9
|
+
pathTempRoot = pathDataSamples / "tmp"
|
|
10
|
+
|
|
11
|
+
# The registrar maintains the register of temp files
|
|
12
|
+
registerOfTempFiles: Set[pathlib.Path] = set()
|
|
13
|
+
|
|
14
|
+
def addTempFileToRegister(path: pathlib.Path) -> None:
|
|
15
|
+
"""The registrar adds a temp file to the register."""
|
|
16
|
+
registerOfTempFiles.add(path)
|
|
17
|
+
|
|
18
|
+
def cleanupTempFileRegister() -> None:
|
|
19
|
+
"""The registrar cleans up temp files in the register."""
|
|
20
|
+
for pathTemp in sorted(registerOfTempFiles, reverse=True):
|
|
21
|
+
try:
|
|
22
|
+
if pathTemp.is_file():
|
|
23
|
+
pathTemp.unlink(missing_ok=True)
|
|
24
|
+
elif pathTemp.is_dir():
|
|
25
|
+
shutil.rmtree(pathTemp, ignore_errors=True)
|
|
26
|
+
except Exception as ERRORmessage:
|
|
27
|
+
print(f"Warning: Failed to clean up {pathTemp}: {ERRORmessage}")
|
|
28
|
+
registerOfTempFiles.clear()
|
|
29
|
+
|
|
30
|
+
@pytest.fixture(scope="session", autouse=True)
|
|
31
|
+
def setupTeardownTestData() -> Generator[None, None, None]:
|
|
32
|
+
"""Auto-fixture to setup test data directories and cleanup after."""
|
|
33
|
+
pathDataSamples.mkdir(exist_ok=True)
|
|
34
|
+
pathTempRoot.mkdir(exist_ok=True)
|
|
35
|
+
yield
|
|
36
|
+
cleanupTempFileRegister()
|
|
37
|
+
|
|
38
|
+
@pytest.fixture
|
|
39
|
+
def pathTempTesting(request: pytest.FixtureRequest) -> pathlib.Path:
|
|
40
|
+
"""Create a unique temp directory for each test function."""
|
|
41
|
+
# TODO I got rid of this shit. how the fuck is it back?
|
|
42
|
+
# Sanitize test name for filesystem compatibility
|
|
43
|
+
sanitizedName = request.node.name.replace('[', '_').replace(']', '_').replace('/', '_')
|
|
44
|
+
uniqueDirectory = f"{sanitizedName}_{uuid.uuid4()}"
|
|
45
|
+
pathTemp = pathTempRoot / uniqueDirectory
|
|
46
|
+
pathTemp.mkdir(parents=True, exist_ok=True)
|
|
47
|
+
|
|
48
|
+
addTempFileToRegister(pathTemp)
|
|
49
|
+
return pathTemp
|
|
50
|
+
|
|
51
|
+
@pytest.fixture
|
|
52
|
+
def pathCacheTesting(pathTempTesting: pathlib.Path) -> Generator[pathlib.Path, Any, None]:
|
|
53
|
+
"""Temporarily replace the OEIS cache directory with a test directory."""
|
|
54
|
+
from mapFolding import oeis as there_must_be_a_better_way
|
|
55
|
+
pathCacheOriginal = there_must_be_a_better_way._pathCache
|
|
56
|
+
there_must_be_a_better_way._pathCache = pathTempTesting
|
|
57
|
+
yield pathTempTesting
|
|
58
|
+
there_must_be_a_better_way._pathCache = pathCacheOriginal
|
|
59
|
+
|
|
60
|
+
@pytest.fixture
|
|
61
|
+
def pathFilenameFoldsTotalTesting(pathTempTesting: pathlib.Path) -> pathlib.Path:
|
|
62
|
+
return pathTempTesting.joinpath("foldsTotalTest.txt")
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from typing import Any, Callable, Sequence, Type, Union
|
|
2
|
+
import pytest
|
|
3
|
+
|
|
4
|
+
def uniformTestMessage(expected: Any, actual: Any, functionName: str, *arguments: Any) -> str:
|
|
5
|
+
"""Format assertion message for any test comparison."""
|
|
6
|
+
return (f"\nTesting: `{functionName}({', '.join(str(parameter) for parameter in arguments)})`\n"
|
|
7
|
+
f"Expected: {expected}\n"
|
|
8
|
+
f"Got: {actual}")
|
|
9
|
+
|
|
10
|
+
def standardizedEqualTo(expected: Any, functionTarget: Callable, *arguments: Any) -> None:
|
|
11
|
+
"""Template for tests expecting an error."""
|
|
12
|
+
if type(expected) is Type[Exception]:
|
|
13
|
+
messageExpected = expected.__name__
|
|
14
|
+
else:
|
|
15
|
+
messageExpected = expected
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
messageActual = actual = functionTarget(*arguments)
|
|
19
|
+
except Exception as actualError:
|
|
20
|
+
messageActual = type(actualError).__name__
|
|
21
|
+
actual = type(actualError)
|
|
22
|
+
|
|
23
|
+
assert actual == expected, uniformTestMessage(messageExpected, messageActual, functionTarget.__name__, *arguments)
|
|
24
|
+
|
|
25
|
+
def standardizedSystemExit(expected: Union[str, int, Sequence[int]], functionTarget: Callable, *arguments: Any) -> None:
|
|
26
|
+
"""Template for tests expecting SystemExit.
|
|
27
|
+
|
|
28
|
+
Parameters
|
|
29
|
+
expected: Exit code expectation:
|
|
30
|
+
- "error": any non-zero exit code
|
|
31
|
+
- "nonError": specifically zero exit code
|
|
32
|
+
- int: exact exit code match
|
|
33
|
+
- Sequence[int]: exit code must be one of these values
|
|
34
|
+
functionTarget: The function to test
|
|
35
|
+
arguments: Arguments to pass to the function
|
|
36
|
+
"""
|
|
37
|
+
with pytest.raises(SystemExit) as exitInfo:
|
|
38
|
+
functionTarget(*arguments)
|
|
39
|
+
|
|
40
|
+
exitCode = exitInfo.value.code
|
|
41
|
+
|
|
42
|
+
if expected == "error":
|
|
43
|
+
assert exitCode != 0, \
|
|
44
|
+
f"Expected error exit (non-zero) but got code {exitCode}"
|
|
45
|
+
elif expected == "nonError":
|
|
46
|
+
assert exitCode == 0, \
|
|
47
|
+
f"Expected non-error exit (0) but got code {exitCode}"
|
|
48
|
+
elif isinstance(expected, (list, tuple)):
|
|
49
|
+
assert exitCode in expected, \
|
|
50
|
+
f"Expected exit code to be one of {expected} but got {exitCode}"
|
|
51
|
+
else:
|
|
52
|
+
assert exitCode == expected, \
|
|
53
|
+
f"Expected exit code {expected} but got {exitCode}"
|
tests/test_oeis.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
from contextlib import redirect_stdout
|
|
2
|
+
from datetime import datetime, timedelta
|
|
3
|
+
from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidValues, _parseBFileOEIS, _validateOEISid
|
|
4
|
+
from tests.conftest import *
|
|
5
|
+
from typing import Optional, NoReturn, Tuple, Union
|
|
6
|
+
import io
|
|
7
|
+
import os
|
|
8
|
+
import pathlib
|
|
9
|
+
import pytest
|
|
10
|
+
import random
|
|
11
|
+
import re as regex
|
|
12
|
+
import unittest
|
|
13
|
+
import unittest.mock
|
|
14
|
+
import urllib
|
|
15
|
+
from urllib.error import URLError
|
|
16
|
+
import urllib.request
|
|
17
|
+
|
|
18
|
+
def test_algorithmSourceSequential(oeisID: str, useAlgorithmDirectly: None) -> None:
|
|
19
|
+
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
20
|
+
standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
21
|
+
|
|
22
|
+
def test_aOFn_calculate_value(oeisID: str) -> None:
|
|
23
|
+
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
24
|
+
standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
25
|
+
|
|
26
|
+
@pytest.mark.parametrize("badID", ["A999999", " A999999 ", "A999999extra"])
|
|
27
|
+
def test__validateOEISid_invalid_id(badID: str) -> None:
|
|
28
|
+
standardizedEqualTo(KeyError, _validateOEISid, badID)
|
|
29
|
+
|
|
30
|
+
def test__validateOEISid_partially_valid(oeisID_1random: str) -> None:
|
|
31
|
+
standardizedEqualTo(KeyError, _validateOEISid, f"{oeisID_1random}extra")
|
|
32
|
+
|
|
33
|
+
def test__validateOEISid_valid_id(oeisID: str) -> None:
|
|
34
|
+
standardizedEqualTo(oeisID, _validateOEISid, oeisID)
|
|
35
|
+
|
|
36
|
+
def test__validateOEISid_valid_id_case_insensitive(oeisID: str) -> None:
|
|
37
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.lower())
|
|
38
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.upper())
|
|
39
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.swapcase())
|
|
40
|
+
|
|
41
|
+
parameters_test_aOFn_invalid_n = [
|
|
42
|
+
# (2, "ok"), # test the test template
|
|
43
|
+
(-random.randint(1, 100), "randomNegative"),
|
|
44
|
+
("foo", "string"),
|
|
45
|
+
(1.5, "float")
|
|
46
|
+
]
|
|
47
|
+
badValues, badValuesIDs = zip(*parameters_test_aOFn_invalid_n)
|
|
48
|
+
@pytest.mark.parametrize("badN", badValues, ids=badValuesIDs)
|
|
49
|
+
def test_aOFn_invalid_n(oeisID_1random: str, badN: Any) -> None:
|
|
50
|
+
"""Check that negative or non-integer n raises ValueError."""
|
|
51
|
+
standardizedEqualTo(ValueError, oeisIDfor_n, oeisID_1random, badN)
|
|
52
|
+
|
|
53
|
+
def test_aOFn_zeroDim_A001418() -> None:
|
|
54
|
+
standardizedEqualTo(ArithmeticError, oeisIDfor_n, 'A001418', 0)
|
|
55
|
+
|
|
56
|
+
# ===== OEIS Cache Tests =====
|
|
57
|
+
@pytest.mark.parametrize("cacheExists", [True, False])
|
|
58
|
+
@unittest.mock.patch('pathlib.Path.exists')
|
|
59
|
+
@unittest.mock.patch('pathlib.Path.unlink')
|
|
60
|
+
def test_clearOEIScache(mock_unlink: unittest.mock.MagicMock, mock_exists: unittest.mock.MagicMock, cacheExists: bool) -> None:
|
|
61
|
+
"""Test OEIS cache clearing with both existing and non-existing cache."""
|
|
62
|
+
mock_exists.return_value = cacheExists
|
|
63
|
+
clearOEIScache()
|
|
64
|
+
|
|
65
|
+
if cacheExists:
|
|
66
|
+
assert mock_unlink.call_count == len(settingsOEIS)
|
|
67
|
+
mock_unlink.assert_has_calls([unittest.mock.call(missing_ok=True)] * len(settingsOEIS))
|
|
68
|
+
else:
|
|
69
|
+
mock_exists.assert_called_once()
|
|
70
|
+
mock_unlink.assert_not_called()
|
|
71
|
+
|
|
72
|
+
@pytest.mark.parametrize("scenarioCache", ["miss", "expired", "invalid"])
|
|
73
|
+
def testCacheScenarios(pathCacheTesting: pathlib.Path, oeisID_1random: str, scenarioCache: str) -> None:
|
|
74
|
+
"""Test cache scenarios: missing file, expired file, and invalid file."""
|
|
75
|
+
|
|
76
|
+
def setupCacheExpired(pathCache: pathlib.Path, oeisID: str) -> None:
|
|
77
|
+
pathCache.write_text("# Old cache content")
|
|
78
|
+
oldModificationTime = datetime.now() - timedelta(days=30)
|
|
79
|
+
os.utime(pathCache, times=(oldModificationTime.timestamp(), oldModificationTime.timestamp()))
|
|
80
|
+
|
|
81
|
+
def setupCacheInvalid(pathCache: pathlib.Path, oeisID: str) -> None:
|
|
82
|
+
pathCache.write_text("Invalid content")
|
|
83
|
+
|
|
84
|
+
if scenarioCache == "miss":
|
|
85
|
+
prototypeCacheTest(settingsOEIS[oeisID_1random]['valuesKnown'], None, oeisID_1random, pathCacheTesting)
|
|
86
|
+
elif scenarioCache == "expired":
|
|
87
|
+
prototypeCacheTest(settingsOEIS[oeisID_1random]['valuesKnown'], setupCacheExpired, oeisID_1random, pathCacheTesting)
|
|
88
|
+
else:
|
|
89
|
+
prototypeCacheTest(settingsOEIS[oeisID_1random]['valuesKnown'], setupCacheInvalid, oeisID_1random, pathCacheTesting)
|
|
90
|
+
|
|
91
|
+
def testInvalidFileContent(pathCacheTesting: pathlib.Path, oeisID_1random: str) -> None:
|
|
92
|
+
pathFilenameCache = pathCacheTesting / _getFilenameOEISbFile(oeisID=oeisID_1random)
|
|
93
|
+
|
|
94
|
+
# Write invalid content to cache
|
|
95
|
+
pathFilenameCache.write_text("# A999999\n1 1\n2 2\n")
|
|
96
|
+
modificationTimeOriginal = pathFilenameCache.stat().st_mtime
|
|
97
|
+
|
|
98
|
+
# Function should detect invalid content, fetch fresh data, and update cache
|
|
99
|
+
OEISsequence = _getOEISidValues(oeisID_1random)
|
|
100
|
+
|
|
101
|
+
# Verify the function succeeded
|
|
102
|
+
assert OEISsequence is not None
|
|
103
|
+
# Verify cache was updated (modification time changed)
|
|
104
|
+
assert pathFilenameCache.stat().st_mtime > modificationTimeOriginal
|
|
105
|
+
# Verify cache now contains correct sequence ID
|
|
106
|
+
assert f"# {oeisID_1random}" in pathFilenameCache.read_text()
|
|
107
|
+
|
|
108
|
+
def testParseContentErrors() -> None:
|
|
109
|
+
"""Test invalid content parsing."""
|
|
110
|
+
standardizedEqualTo(ValueError, _parseBFileOEIS, "Invalid content\n1 2\n", 'A001415')
|
|
111
|
+
|
|
112
|
+
def testExtraComments(pathCacheTesting: pathlib.Path, oeisID_1random: str) -> None:
|
|
113
|
+
pathFilenameCache = pathCacheTesting / _getFilenameOEISbFile(oeisID=oeisID_1random)
|
|
114
|
+
|
|
115
|
+
# Write content with extra comment lines
|
|
116
|
+
contentWithExtraComments = f"""# {oeisID_1random}
|
|
117
|
+
# Normal place for comment line 1
|
|
118
|
+
# Abnormal comment line
|
|
119
|
+
1 2
|
|
120
|
+
2 4
|
|
121
|
+
3 6
|
|
122
|
+
# Another comment in the middle
|
|
123
|
+
4 8
|
|
124
|
+
5 10"""
|
|
125
|
+
pathFilenameCache.write_text(contentWithExtraComments)
|
|
126
|
+
|
|
127
|
+
OEISsequence = _getOEISidValues(oeisID_1random)
|
|
128
|
+
# Verify sequence values are correct despite extra comments
|
|
129
|
+
standardizedEqualTo(2, lambda d: d[1], OEISsequence) # First value
|
|
130
|
+
standardizedEqualTo(8, lambda d: d[4], OEISsequence) # Value after mid-sequence comment
|
|
131
|
+
standardizedEqualTo(10, lambda d: d[5], OEISsequence) # Last value
|
|
132
|
+
|
|
133
|
+
def testNetworkError(monkeypatch: pytest.MonkeyPatch, pathCacheTesting: pathlib.Path) -> None:
|
|
134
|
+
"""Test network error handling."""
|
|
135
|
+
def mockUrlopen(*args: Any, **kwargs: Any) -> NoReturn:
|
|
136
|
+
raise URLError("Network error")
|
|
137
|
+
|
|
138
|
+
monkeypatch.setattr(urllib.request, 'urlopen', mockUrlopen)
|
|
139
|
+
standardizedEqualTo(URLError, _getOEISidValues, next(iter(settingsOEIS)))
|
|
140
|
+
|
|
141
|
+
# ===== Command Line Interface Tests =====
|
|
142
|
+
def testHelpText() -> None:
|
|
143
|
+
"""Test that help text is complete and examples are valid."""
|
|
144
|
+
outputStream = io.StringIO()
|
|
145
|
+
with redirect_stdout(outputStream):
|
|
146
|
+
getOEISids()
|
|
147
|
+
|
|
148
|
+
helpText = outputStream.getvalue()
|
|
149
|
+
|
|
150
|
+
# Verify content
|
|
151
|
+
for oeisID in oeisIDsImplemented:
|
|
152
|
+
assert oeisID in helpText
|
|
153
|
+
assert settingsOEIS[oeisID]['description'] in helpText
|
|
154
|
+
|
|
155
|
+
# Extract and verify examples
|
|
156
|
+
|
|
157
|
+
cliMatch = regex.search(r'OEIS_for_n (\w+) (\d+)', helpText)
|
|
158
|
+
pythonMatch = regex.search(r"oeisIDfor_n\('(\w+)', (\d+)\)", helpText)
|
|
159
|
+
|
|
160
|
+
assert cliMatch and pythonMatch, "Help text missing examples"
|
|
161
|
+
oeisID, n = pythonMatch.groups()
|
|
162
|
+
n = int(n)
|
|
163
|
+
|
|
164
|
+
# Verify CLI and Python examples use same values
|
|
165
|
+
assert cliMatch.groups() == (oeisID, str(n)), "CLI and Python examples inconsistent"
|
|
166
|
+
|
|
167
|
+
# Verify the example works
|
|
168
|
+
expectedValue = oeisIDfor_n(oeisID, n)
|
|
169
|
+
|
|
170
|
+
# Test CLI execution of the example
|
|
171
|
+
with unittest.mock.patch('sys.argv', ['OEIS_for_n', oeisID, str(n)]):
|
|
172
|
+
outputStream = io.StringIO()
|
|
173
|
+
with redirect_stdout(outputStream):
|
|
174
|
+
OEIS_for_n()
|
|
175
|
+
standardizedEqualTo(expectedValue, lambda: int(outputStream.getvalue().strip().split()[0]))
|
|
176
|
+
|
|
177
|
+
def testCLI_InvalidInputs() -> None:
|
|
178
|
+
"""Test CLI error handling."""
|
|
179
|
+
testCases = [
|
|
180
|
+
(['OEIS_for_n'], "missing arguments"),
|
|
181
|
+
(['OEIS_for_n', 'A999999', '1'], "invalid OEIS ID"),
|
|
182
|
+
(['OEIS_for_n', 'A001415', '-1'], "negative n"),
|
|
183
|
+
(['OEIS_for_n', 'A001415', 'abc'], "non-integer n"),
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
for arguments, testID in testCases:
|
|
187
|
+
with unittest.mock.patch('sys.argv', arguments):
|
|
188
|
+
standardizedSystemExit("error", OEIS_for_n)
|
|
189
|
+
|
|
190
|
+
def testCLI_HelpFlag() -> None:
|
|
191
|
+
"""Verify --help output contains required information."""
|
|
192
|
+
with unittest.mock.patch('sys.argv', ['OEIS_for_n', '--help']):
|
|
193
|
+
outputStream = io.StringIO()
|
|
194
|
+
with redirect_stdout(outputStream):
|
|
195
|
+
standardizedSystemExit("nonError", OEIS_for_n)
|
|
196
|
+
|
|
197
|
+
helpOutput = outputStream.getvalue()
|
|
198
|
+
assert "Available OEIS sequences:" in helpOutput
|
|
199
|
+
assert "Usage examples:" in helpOutput
|
|
200
|
+
assert all(oeisID in helpOutput for oeisID in oeisIDsImplemented)
|