mapFolding 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +56 -0
- mapFolding/basecamp.py +55 -0
- mapFolding/beDRY.py +376 -0
- mapFolding/oeis.py +339 -0
- mapFolding/someAssemblyRequired/__init__.py +2 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/makeJob.py +4 -5
- mapFolding/someAssemblyRequired/synthesizeJobNumba.py +383 -0
- mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/synthesizeModulesNumba.py +186 -99
- syntheticModules/numbaInitialize.py → mapFolding/syntheticModules/numba_countInitialize.py +6 -9
- syntheticModules/numbaParallel.py → mapFolding/syntheticModules/numba_countParallel.py +4 -4
- syntheticModules/numbaSequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -5
- mapFolding/syntheticModules/numba_doTheNeedful.py +30 -0
- mapFolding/theDao.py +213 -0
- mapFolding/theSSOT.py +251 -0
- mapFolding/theSSOTnumba.py +115 -0
- mapFolding-0.3.10.dist-info/LICENSE +407 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.10.dist-info}/METADATA +9 -11
- mapFolding-0.3.10.dist-info/RECORD +40 -0
- mapFolding-0.3.10.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/conftest.py +183 -0
- tests/conftest_tmpRegistry.py +62 -0
- tests/conftest_uniformTests.py +53 -0
- tests/test_oeis.py +141 -0
- tests/test_other.py +259 -0
- tests/test_tasks.py +44 -0
- tests/test_types.py +5 -0
- benchmarks/benchmarking.py +0 -67
- citations/constants.py +0 -3
- citations/updateCitation.py +0 -354
- mapFolding-0.3.8.dist-info/RECORD +0 -26
- mapFolding-0.3.8.dist-info/top_level.txt +0 -5
- someAssemblyRequired/__init__.py +0 -1
- someAssemblyRequired/synthesizeModuleJobNumba.py +0 -212
- syntheticModules/__init__.py +0 -3
- {reference → mapFolding/reference}/flattened.py +0 -0
- {reference → mapFolding/reference}/hunterNumba.py +0 -0
- {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
- {reference → mapFolding/reference}/jax.py +0 -0
- {reference → mapFolding/reference}/lunnan.py +0 -0
- {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
- {reference → mapFolding/reference}/lunnanWhile.py +0 -0
- {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
- {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.10.dist-info}/WHEEL +0 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.10.dist-info}/entry_points.txt +0 -0
tests/conftest.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""SSOT for Pytest"""
|
|
2
|
+
|
|
3
|
+
# TODO learn how to run tests and coverage analysis without `env = ["NUMBA_DISABLE_JIT=1"]`
|
|
4
|
+
|
|
5
|
+
from tests.conftest_tmpRegistry import (
|
|
6
|
+
pathCacheTesting,
|
|
7
|
+
pathDataSamples,
|
|
8
|
+
pathFilenameFoldsTotalTesting,
|
|
9
|
+
pathTempTesting,
|
|
10
|
+
setupTeardownTestData,
|
|
11
|
+
)
|
|
12
|
+
from tests.conftest_uniformTests import (
|
|
13
|
+
uniformTestMessage,
|
|
14
|
+
standardizedEqualTo,
|
|
15
|
+
standardizedSystemExit,
|
|
16
|
+
)
|
|
17
|
+
from mapFolding import *
|
|
18
|
+
from mapFolding import basecamp
|
|
19
|
+
from mapFolding import getAlgorithmCallable, getDispatcherCallable
|
|
20
|
+
from mapFolding.beDRY import *
|
|
21
|
+
from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidValues, _getOEISidInformation
|
|
22
|
+
from mapFolding.oeis import *
|
|
23
|
+
from Z0Z_tools.pytestForYourUse import PytestFor_defineConcurrencyLimit, PytestFor_intInnit, PytestFor_oopsieKwargsie
|
|
24
|
+
from typing import Any, Callable, ContextManager, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type, Union
|
|
25
|
+
import pathlib
|
|
26
|
+
import pytest
|
|
27
|
+
import random
|
|
28
|
+
import unittest.mock
|
|
29
|
+
|
|
30
|
+
def makeDictionaryFoldsTotalKnown() -> Dict[Tuple[int,...], int]:
|
|
31
|
+
"""Returns a dictionary mapping dimension tuples to their known folding totals."""
|
|
32
|
+
dictionaryMapDimensionsToFoldsTotalKnown: Dict[Tuple[int, ...], int] = {}
|
|
33
|
+
|
|
34
|
+
for settings in settingsOEIS.values():
|
|
35
|
+
sequence = settings['valuesKnown']
|
|
36
|
+
|
|
37
|
+
for n, foldingsTotal in sequence.items():
|
|
38
|
+
dimensions = settings['getMapShape'](n)
|
|
39
|
+
dimensions.sort()
|
|
40
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(dimensions)] = foldingsTotal
|
|
41
|
+
|
|
42
|
+
# Are we in a place that has jobs?
|
|
43
|
+
pathJobDEFAULT = getPathJobRootDEFAULT()
|
|
44
|
+
if pathJobDEFAULT.exists():
|
|
45
|
+
# Are there foldsTotal files?
|
|
46
|
+
for pathFilenameFoldsTotal in pathJobDEFAULT.rglob('*.foldsTotal'):
|
|
47
|
+
if pathFilenameFoldsTotal.is_file():
|
|
48
|
+
try:
|
|
49
|
+
listDimensions = eval(pathFilenameFoldsTotal.stem)
|
|
50
|
+
except Exception:
|
|
51
|
+
continue
|
|
52
|
+
# Are the dimensions in the dictionary?
|
|
53
|
+
if isinstance(listDimensions, list) and all(isinstance(dimension, int) for dimension in listDimensions):
|
|
54
|
+
listDimensions.sort()
|
|
55
|
+
if tuple(listDimensions) in dictionaryMapDimensionsToFoldsTotalKnown:
|
|
56
|
+
continue
|
|
57
|
+
# Are the contents a reasonably large integer?
|
|
58
|
+
try:
|
|
59
|
+
foldsTotal = pathFilenameFoldsTotal.read_text()
|
|
60
|
+
except Exception:
|
|
61
|
+
continue
|
|
62
|
+
# Why did I sincerely believe this would only be three lines of code?
|
|
63
|
+
if foldsTotal.isdigit():
|
|
64
|
+
foldsTotalInteger = int(foldsTotal)
|
|
65
|
+
if foldsTotalInteger > 85109616 * 10**3:
|
|
66
|
+
# You made it this far, so fuck it: put it in the dictionary
|
|
67
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
|
|
68
|
+
dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
|
|
69
|
+
# The sunk-costs fallacy claims another victim!
|
|
70
|
+
|
|
71
|
+
return dictionaryMapDimensionsToFoldsTotalKnown
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
Section: Fixtures"""
|
|
75
|
+
|
|
76
|
+
@pytest.fixture(autouse=True)
|
|
77
|
+
def setupWarningsAsErrors() -> Generator[None, Any, None]:
|
|
78
|
+
"""Convert all warnings to errors for all tests."""
|
|
79
|
+
import warnings
|
|
80
|
+
warnings.filterwarnings("error")
|
|
81
|
+
yield
|
|
82
|
+
warnings.resetwarnings()
|
|
83
|
+
|
|
84
|
+
@pytest.fixture
|
|
85
|
+
def foldsTotalKnown() -> Dict[Tuple[int,...], int]:
|
|
86
|
+
"""Returns a dictionary mapping dimension tuples to their known folding totals.
|
|
87
|
+
NOTE I am not convinced this is the best way to do this.
|
|
88
|
+
Advantage: I call `makeDictionaryFoldsTotalKnown()` from modules other than test modules.
|
|
89
|
+
Preference: I _think_ I would prefer a SSOT function available to any module
|
|
90
|
+
similar to `foldsTotalKnown = getFoldsTotalKnown(listDimensions)`."""
|
|
91
|
+
return makeDictionaryFoldsTotalKnown()
|
|
92
|
+
|
|
93
|
+
@pytest.fixture
|
|
94
|
+
def listDimensionsTestCountFolds(oeisID: str) -> List[int]:
|
|
95
|
+
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestValidation`
|
|
96
|
+
if `validateListDimensions` approves. Each `listDimensions` is suitable for testing counts."""
|
|
97
|
+
while True:
|
|
98
|
+
n = random.choice(settingsOEIS[oeisID]['valuesTestValidation'])
|
|
99
|
+
if n < 2:
|
|
100
|
+
continue
|
|
101
|
+
listDimensionsCandidate = settingsOEIS[oeisID]['getMapShape'](n)
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
return validateListDimensions(listDimensionsCandidate)
|
|
105
|
+
except (ValueError, NotImplementedError):
|
|
106
|
+
pass
|
|
107
|
+
|
|
108
|
+
@pytest.fixture
|
|
109
|
+
def listDimensionsTestFunctionality(oeisID_1random: str) -> List[int]:
|
|
110
|
+
"""To test functionality, get one `listDimensions` from `valuesTestValidation` if
|
|
111
|
+
`validateListDimensions` approves. The algorithm can count the folds of the returned
|
|
112
|
+
`listDimensions` in a short enough time suitable for testing."""
|
|
113
|
+
while True:
|
|
114
|
+
n = random.choice(settingsOEIS[oeisID_1random]['valuesTestValidation'])
|
|
115
|
+
if n < 2:
|
|
116
|
+
continue
|
|
117
|
+
listDimensionsCandidate = settingsOEIS[oeisID_1random]['getMapShape'](n)
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
return validateListDimensions(listDimensionsCandidate)
|
|
121
|
+
except (ValueError, NotImplementedError):
|
|
122
|
+
pass
|
|
123
|
+
|
|
124
|
+
@pytest.fixture
|
|
125
|
+
def listDimensionsTestParallelization(oeisID: str) -> List[int]:
|
|
126
|
+
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestParallelization`"""
|
|
127
|
+
n = random.choice(settingsOEIS[oeisID]['valuesTestParallelization'])
|
|
128
|
+
return settingsOEIS[oeisID]['getMapShape'](n)
|
|
129
|
+
|
|
130
|
+
@pytest.fixture
|
|
131
|
+
def mockBenchmarkTimer() -> Generator[unittest.mock.MagicMock | unittest.mock.AsyncMock, Any, None]:
|
|
132
|
+
"""Mock time.perf_counter_ns for consistent benchmark timing."""
|
|
133
|
+
with unittest.mock.patch('time.perf_counter_ns') as mockTimer:
|
|
134
|
+
mockTimer.side_effect = [0, 1e9] # Start and end times for 1 second
|
|
135
|
+
yield mockTimer
|
|
136
|
+
|
|
137
|
+
@pytest.fixture
|
|
138
|
+
def mockFoldingFunction() -> Callable[..., Callable[..., None]]:
|
|
139
|
+
"""Creates a mock function that simulates _countFolds behavior."""
|
|
140
|
+
def make_mock(foldsValue: int, listDimensions: List[int]) -> Callable[..., None]:
|
|
141
|
+
mock_array = makeDataContainer(2)
|
|
142
|
+
mock_array[0] = foldsValue
|
|
143
|
+
mock_array[-1] = getLeavesTotal(listDimensions)
|
|
144
|
+
|
|
145
|
+
def mock_countFolds(**keywordArguments: Any) -> None:
|
|
146
|
+
keywordArguments['foldGroups'][:] = mock_array
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
return mock_countFolds
|
|
150
|
+
return make_mock
|
|
151
|
+
|
|
152
|
+
@pytest.fixture
|
|
153
|
+
def mockDispatcher() -> Callable[[Any], ContextManager[Any]]:
|
|
154
|
+
"""Context manager for mocking dispatcher callable."""
|
|
155
|
+
def wrapper(mockFunction: Any) -> ContextManager[Any]:
|
|
156
|
+
dispatcherCallable = getDispatcherCallable()
|
|
157
|
+
return unittest.mock.patch(
|
|
158
|
+
f"{dispatcherCallable.__module__}.{dispatcherCallable.__name__}",
|
|
159
|
+
side_effect=mockFunction
|
|
160
|
+
)
|
|
161
|
+
return wrapper
|
|
162
|
+
|
|
163
|
+
@pytest.fixture(params=oeisIDsImplemented)
|
|
164
|
+
def oeisID(request: pytest.FixtureRequest) -> Any:
|
|
165
|
+
return request.param
|
|
166
|
+
|
|
167
|
+
@pytest.fixture
|
|
168
|
+
def oeisID_1random() -> str:
|
|
169
|
+
"""Return one random valid OEIS ID."""
|
|
170
|
+
return random.choice(oeisIDsImplemented)
|
|
171
|
+
|
|
172
|
+
@pytest.fixture
|
|
173
|
+
def useAlgorithmDirectly() -> Generator[None, Any, None]:
|
|
174
|
+
"""Temporarily patches getDispatcherCallable to return the algorithm source directly."""
|
|
175
|
+
original_dispatcher = basecamp.getDispatcherCallable
|
|
176
|
+
|
|
177
|
+
# Patch the function at module level
|
|
178
|
+
basecamp.getDispatcherCallable = getAlgorithmCallable
|
|
179
|
+
|
|
180
|
+
yield
|
|
181
|
+
|
|
182
|
+
# Restore original function
|
|
183
|
+
basecamp.getDispatcherCallable = original_dispatcher
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from typing import Any, Generator, Set
|
|
2
|
+
import pathlib
|
|
3
|
+
import pytest
|
|
4
|
+
import shutil
|
|
5
|
+
import uuid
|
|
6
|
+
|
|
7
|
+
# SSOT for test data paths
|
|
8
|
+
pathDataSamples = pathlib.Path("tests/dataSamples")
|
|
9
|
+
pathTempRoot = pathDataSamples / "tmp"
|
|
10
|
+
|
|
11
|
+
# The registrar maintains the register of temp files
|
|
12
|
+
registerOfTempFiles: Set[pathlib.Path] = set()
|
|
13
|
+
|
|
14
|
+
def addTempFileToRegister(path: pathlib.Path) -> None:
|
|
15
|
+
"""The registrar adds a temp file to the register."""
|
|
16
|
+
registerOfTempFiles.add(path)
|
|
17
|
+
|
|
18
|
+
def cleanupTempFileRegister() -> None:
|
|
19
|
+
"""The registrar cleans up temp files in the register."""
|
|
20
|
+
for pathTemp in sorted(registerOfTempFiles, reverse=True):
|
|
21
|
+
try:
|
|
22
|
+
if pathTemp.is_file():
|
|
23
|
+
pathTemp.unlink(missing_ok=True)
|
|
24
|
+
elif pathTemp.is_dir():
|
|
25
|
+
shutil.rmtree(pathTemp, ignore_errors=True)
|
|
26
|
+
except Exception as ERRORmessage:
|
|
27
|
+
print(f"Warning: Failed to clean up {pathTemp}: {ERRORmessage}")
|
|
28
|
+
registerOfTempFiles.clear()
|
|
29
|
+
|
|
30
|
+
@pytest.fixture(scope="session", autouse=True)
|
|
31
|
+
def setupTeardownTestData() -> Generator[None, None, None]:
|
|
32
|
+
"""Auto-fixture to setup test data directories and cleanup after."""
|
|
33
|
+
pathDataSamples.mkdir(exist_ok=True)
|
|
34
|
+
pathTempRoot.mkdir(exist_ok=True)
|
|
35
|
+
yield
|
|
36
|
+
cleanupTempFileRegister()
|
|
37
|
+
|
|
38
|
+
@pytest.fixture
|
|
39
|
+
def pathTempTesting(request: pytest.FixtureRequest) -> pathlib.Path:
|
|
40
|
+
"""Create a unique temp directory for each test function."""
|
|
41
|
+
# TODO I got rid of this shit. how the fuck is it back?
|
|
42
|
+
# Sanitize test name for filesystem compatibility
|
|
43
|
+
sanitizedName = request.node.name.replace('[', '_').replace(']', '_').replace('/', '_')
|
|
44
|
+
uniqueDirectory = f"{sanitizedName}_{uuid.uuid4()}"
|
|
45
|
+
pathTemp = pathTempRoot / uniqueDirectory
|
|
46
|
+
pathTemp.mkdir(parents=True, exist_ok=True)
|
|
47
|
+
|
|
48
|
+
addTempFileToRegister(pathTemp)
|
|
49
|
+
return pathTemp
|
|
50
|
+
|
|
51
|
+
@pytest.fixture
|
|
52
|
+
def pathCacheTesting(pathTempTesting: pathlib.Path) -> Generator[pathlib.Path, Any, None]:
|
|
53
|
+
"""Temporarily replace the OEIS cache directory with a test directory."""
|
|
54
|
+
from mapFolding import oeis as there_must_be_a_better_way
|
|
55
|
+
pathCacheOriginal = there_must_be_a_better_way._pathCache
|
|
56
|
+
there_must_be_a_better_way._pathCache = pathTempTesting
|
|
57
|
+
yield pathTempTesting
|
|
58
|
+
there_must_be_a_better_way._pathCache = pathCacheOriginal
|
|
59
|
+
|
|
60
|
+
@pytest.fixture
|
|
61
|
+
def pathFilenameFoldsTotalTesting(pathTempTesting: pathlib.Path) -> pathlib.Path:
|
|
62
|
+
return pathTempTesting.joinpath("foldsTotalTest.txt")
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from typing import Any, Callable, Sequence, Type, Union
|
|
2
|
+
import pytest
|
|
3
|
+
|
|
4
|
+
def uniformTestMessage(expected: Any, actual: Any, functionName: str, *arguments: Any) -> str:
|
|
5
|
+
"""Format assertion message for any test comparison."""
|
|
6
|
+
return (f"\nTesting: `{functionName}({', '.join(str(parameter) for parameter in arguments)})`\n"
|
|
7
|
+
f"Expected: {expected}\n"
|
|
8
|
+
f"Got: {actual}")
|
|
9
|
+
|
|
10
|
+
def standardizedEqualTo(expected: Any, functionTarget: Callable, *arguments: Any) -> None:
|
|
11
|
+
"""Template for tests expecting an error."""
|
|
12
|
+
if type(expected) is Type[Exception]:
|
|
13
|
+
messageExpected = expected.__name__
|
|
14
|
+
else:
|
|
15
|
+
messageExpected = expected
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
messageActual = actual = functionTarget(*arguments)
|
|
19
|
+
except Exception as actualError:
|
|
20
|
+
messageActual = type(actualError).__name__
|
|
21
|
+
actual = type(actualError)
|
|
22
|
+
|
|
23
|
+
assert actual == expected, uniformTestMessage(messageExpected, messageActual, functionTarget.__name__, *arguments)
|
|
24
|
+
|
|
25
|
+
def standardizedSystemExit(expected: Union[str, int, Sequence[int]], functionTarget: Callable, *arguments: Any) -> None:
|
|
26
|
+
"""Template for tests expecting SystemExit.
|
|
27
|
+
|
|
28
|
+
Parameters
|
|
29
|
+
expected: Exit code expectation:
|
|
30
|
+
- "error": any non-zero exit code
|
|
31
|
+
- "nonError": specifically zero exit code
|
|
32
|
+
- int: exact exit code match
|
|
33
|
+
- Sequence[int]: exit code must be one of these values
|
|
34
|
+
functionTarget: The function to test
|
|
35
|
+
arguments: Arguments to pass to the function
|
|
36
|
+
"""
|
|
37
|
+
with pytest.raises(SystemExit) as exitInfo:
|
|
38
|
+
functionTarget(*arguments)
|
|
39
|
+
|
|
40
|
+
exitCode = exitInfo.value.code
|
|
41
|
+
|
|
42
|
+
if expected == "error":
|
|
43
|
+
assert exitCode != 0, \
|
|
44
|
+
f"Expected error exit (non-zero) but got code {exitCode}"
|
|
45
|
+
elif expected == "nonError":
|
|
46
|
+
assert exitCode == 0, \
|
|
47
|
+
f"Expected non-error exit (0) but got code {exitCode}"
|
|
48
|
+
elif isinstance(expected, (list, tuple)):
|
|
49
|
+
assert exitCode in expected, \
|
|
50
|
+
f"Expected exit code to be one of {expected} but got {exitCode}"
|
|
51
|
+
else:
|
|
52
|
+
assert exitCode == expected, \
|
|
53
|
+
f"Expected exit code {expected} but got {exitCode}"
|
tests/test_oeis.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from contextlib import redirect_stdout
|
|
2
|
+
from datetime import datetime, timedelta
|
|
3
|
+
from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidValues, _parseBFileOEIS, _validateOEISid, _getOEISidInformation
|
|
4
|
+
from tests.conftest import *
|
|
5
|
+
from typing import NoReturn
|
|
6
|
+
from urllib.error import URLError
|
|
7
|
+
import io
|
|
8
|
+
import os
|
|
9
|
+
import pathlib
|
|
10
|
+
import pytest
|
|
11
|
+
import random
|
|
12
|
+
import re as regex
|
|
13
|
+
import unittest
|
|
14
|
+
import unittest.mock
|
|
15
|
+
import urllib
|
|
16
|
+
import urllib.request
|
|
17
|
+
|
|
18
|
+
def test_algorithmSourceSequential(oeisID: str, useAlgorithmDirectly: None) -> None:
|
|
19
|
+
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
20
|
+
standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
21
|
+
|
|
22
|
+
def test_aOFn_calculate_value(oeisID: str) -> None:
|
|
23
|
+
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
24
|
+
standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
25
|
+
|
|
26
|
+
@pytest.mark.parametrize("badID", ["A999999", " A999999 ", "A999999extra"])
|
|
27
|
+
def test__validateOEISid_invalid_id(badID: str) -> None:
|
|
28
|
+
standardizedEqualTo(KeyError, _validateOEISid, badID)
|
|
29
|
+
|
|
30
|
+
def test__validateOEISid_partially_valid(oeisID_1random: str) -> None:
|
|
31
|
+
standardizedEqualTo(KeyError, _validateOEISid, f"{oeisID_1random}extra")
|
|
32
|
+
|
|
33
|
+
def test__validateOEISid_valid_id(oeisID: str) -> None:
|
|
34
|
+
standardizedEqualTo(oeisID, _validateOEISid, oeisID)
|
|
35
|
+
|
|
36
|
+
def test__validateOEISid_valid_id_case_insensitive(oeisID: str) -> None:
|
|
37
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.lower())
|
|
38
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.upper())
|
|
39
|
+
standardizedEqualTo(oeisID.upper(), _validateOEISid, oeisID.swapcase())
|
|
40
|
+
|
|
41
|
+
parameters_test_aOFn_invalid_n = [
|
|
42
|
+
# (2, "ok"), # test the test template
|
|
43
|
+
(-random.randint(1, 100), "randomNegative"),
|
|
44
|
+
("foo", "string"),
|
|
45
|
+
(1.5, "float")
|
|
46
|
+
]
|
|
47
|
+
badValues, badValuesIDs = zip(*parameters_test_aOFn_invalid_n)
|
|
48
|
+
@pytest.mark.parametrize("badN", badValues, ids=badValuesIDs)
|
|
49
|
+
def test_aOFn_invalid_n(oeisID_1random: str, badN: Any) -> None:
|
|
50
|
+
"""Check that negative or non-integer n raises ValueError."""
|
|
51
|
+
standardizedEqualTo(ValueError, oeisIDfor_n, oeisID_1random, badN)
|
|
52
|
+
|
|
53
|
+
def test_aOFn_zeroDim_A001418() -> None:
|
|
54
|
+
standardizedEqualTo(ArithmeticError, oeisIDfor_n, 'A001418', 0)
|
|
55
|
+
|
|
56
|
+
# ===== OEIS Cache Tests =====
|
|
57
|
+
@pytest.mark.parametrize("cacheExists", [True, False])
|
|
58
|
+
@unittest.mock.patch('pathlib.Path.exists')
|
|
59
|
+
@unittest.mock.patch('pathlib.Path.unlink')
|
|
60
|
+
def test_clearOEIScache(mock_unlink: unittest.mock.MagicMock, mock_exists: unittest.mock.MagicMock, cacheExists: bool) -> None:
|
|
61
|
+
"""Test OEIS cache clearing with both existing and non-existing cache."""
|
|
62
|
+
mock_exists.return_value = cacheExists
|
|
63
|
+
clearOEIScache()
|
|
64
|
+
|
|
65
|
+
if cacheExists:
|
|
66
|
+
# Each OEIS ID has two cache files
|
|
67
|
+
expected_calls = len(settingsOEIS) * 2
|
|
68
|
+
assert mock_unlink.call_count == expected_calls
|
|
69
|
+
mock_unlink.assert_has_calls([unittest.mock.call(missing_ok=True)] * expected_calls)
|
|
70
|
+
else:
|
|
71
|
+
mock_exists.assert_called_once()
|
|
72
|
+
mock_unlink.assert_not_called()
|
|
73
|
+
|
|
74
|
+
def testNetworkError(monkeypatch: pytest.MonkeyPatch, pathCacheTesting: pathlib.Path) -> None:
|
|
75
|
+
"""Test network error handling."""
|
|
76
|
+
def mockUrlopen(*args: Any, **kwargs: Any) -> NoReturn:
|
|
77
|
+
raise URLError("Network error")
|
|
78
|
+
|
|
79
|
+
monkeypatch.setattr(urllib.request, 'urlopen', mockUrlopen)
|
|
80
|
+
standardizedEqualTo(URLError, _getOEISidValues, next(iter(settingsOEIS)))
|
|
81
|
+
|
|
82
|
+
# ===== Command Line Interface Tests =====
|
|
83
|
+
def testHelpText() -> None:
|
|
84
|
+
"""Test that help text is complete and examples are valid."""
|
|
85
|
+
outputStream = io.StringIO()
|
|
86
|
+
with redirect_stdout(outputStream):
|
|
87
|
+
getOEISids()
|
|
88
|
+
|
|
89
|
+
helpText = outputStream.getvalue()
|
|
90
|
+
|
|
91
|
+
# Verify content
|
|
92
|
+
for oeisID in oeisIDsImplemented:
|
|
93
|
+
assert oeisID in helpText
|
|
94
|
+
assert settingsOEIS[oeisID]['description'] in helpText
|
|
95
|
+
|
|
96
|
+
# Extract and verify examples
|
|
97
|
+
|
|
98
|
+
cliMatch = regex.search(r'OEIS_for_n (\w+) (\d+)', helpText)
|
|
99
|
+
pythonMatch = regex.search(r"oeisIDfor_n\('(\w+)', (\d+)\)", helpText)
|
|
100
|
+
|
|
101
|
+
assert cliMatch and pythonMatch, "Help text missing examples"
|
|
102
|
+
oeisID, n = pythonMatch.groups()
|
|
103
|
+
n = int(n)
|
|
104
|
+
|
|
105
|
+
# Verify CLI and Python examples use same values
|
|
106
|
+
assert cliMatch.groups() == (oeisID, str(n)), "CLI and Python examples inconsistent"
|
|
107
|
+
|
|
108
|
+
# Verify the example works
|
|
109
|
+
expectedValue = oeisIDfor_n(oeisID, n)
|
|
110
|
+
|
|
111
|
+
# Test CLI execution of the example
|
|
112
|
+
with unittest.mock.patch('sys.argv', ['OEIS_for_n', oeisID, str(n)]):
|
|
113
|
+
outputStream = io.StringIO()
|
|
114
|
+
with redirect_stdout(outputStream):
|
|
115
|
+
OEIS_for_n()
|
|
116
|
+
standardizedEqualTo(expectedValue, lambda: int(outputStream.getvalue().strip().split()[0]))
|
|
117
|
+
|
|
118
|
+
def testCLI_InvalidInputs() -> None:
|
|
119
|
+
"""Test CLI error handling."""
|
|
120
|
+
testCases = [
|
|
121
|
+
(['OEIS_for_n'], "missing arguments"),
|
|
122
|
+
(['OEIS_for_n', 'A999999', '1'], "invalid OEIS ID"),
|
|
123
|
+
(['OEIS_for_n', 'A001415', '-1'], "negative n"),
|
|
124
|
+
(['OEIS_for_n', 'A001415', 'abc'], "non-integer n"),
|
|
125
|
+
]
|
|
126
|
+
|
|
127
|
+
for arguments, testID in testCases:
|
|
128
|
+
with unittest.mock.patch('sys.argv', arguments):
|
|
129
|
+
standardizedSystemExit("error", OEIS_for_n)
|
|
130
|
+
|
|
131
|
+
def testCLI_HelpFlag() -> None:
|
|
132
|
+
"""Verify --help output contains required information."""
|
|
133
|
+
with unittest.mock.patch('sys.argv', ['OEIS_for_n', '--help']):
|
|
134
|
+
outputStream = io.StringIO()
|
|
135
|
+
with redirect_stdout(outputStream):
|
|
136
|
+
standardizedSystemExit("nonError", OEIS_for_n)
|
|
137
|
+
|
|
138
|
+
helpOutput = outputStream.getvalue()
|
|
139
|
+
assert "Available OEIS sequences:" in helpOutput
|
|
140
|
+
assert "Usage examples:" in helpOutput
|
|
141
|
+
assert all(oeisID in helpOutput for oeisID in oeisIDsImplemented)
|