mapFolding 0.3.8__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +38 -0
- mapFolding/basecamp.py +55 -0
- mapFolding/beDRY.py +364 -0
- mapFolding/oeis.py +329 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/makeJob.py +3 -3
- mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/synthesizeModulesNumba.py +149 -89
- mapFolding/syntheticModules/__init__.py +3 -0
- syntheticModules/numbaInitialize.py → mapFolding/syntheticModules/numba_countInitialize.py +5 -7
- syntheticModules/numbaParallel.py → mapFolding/syntheticModules/numba_countParallel.py +5 -4
- syntheticModules/numbaSequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -4
- mapFolding/syntheticModules/numba_doTheNeedful.py +33 -0
- mapFolding/theDao.py +214 -0
- mapFolding/theSSOT.py +269 -0
- mapFolding-0.3.9.dist-info/LICENSE +407 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/METADATA +9 -5
- mapFolding-0.3.9.dist-info/RECORD +40 -0
- mapFolding-0.3.9.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/conftest.py +224 -0
- tests/conftest_tmpRegistry.py +62 -0
- tests/conftest_uniformTests.py +53 -0
- tests/test_oeis.py +200 -0
- tests/test_other.py +258 -0
- tests/test_tasks.py +44 -0
- tests/test_types.py +5 -0
- benchmarks/benchmarking.py +0 -67
- citations/constants.py +0 -3
- citations/updateCitation.py +0 -354
- mapFolding-0.3.8.dist-info/RECORD +0 -26
- mapFolding-0.3.8.dist-info/top_level.txt +0 -5
- syntheticModules/__init__.py +0 -3
- {reference → mapFolding/reference}/flattened.py +0 -0
- {reference → mapFolding/reference}/hunterNumba.py +0 -0
- {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
- {reference → mapFolding/reference}/jax.py +0 -0
- {reference → mapFolding/reference}/lunnan.py +0 -0
- {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
- {reference → mapFolding/reference}/lunnanWhile.py +0 -0
- {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
- {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/__init__.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
- {someAssemblyRequired → mapFolding/someAssemblyRequired}/synthesizeModuleJobNumba.py +0 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/WHEEL +0 -0
- {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/entry_points.txt +0 -0
tests/test_other.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
from contextlib import redirect_stdout
|
|
2
|
+
from tests.conftest import *
|
|
3
|
+
from typing import Dict, List, Optional, Any, Tuple, Literal, Callable, Generator
|
|
4
|
+
from Z0Z_tools import intInnit
|
|
5
|
+
import io
|
|
6
|
+
import itertools
|
|
7
|
+
import numba
|
|
8
|
+
import numpy
|
|
9
|
+
import pathlib
|
|
10
|
+
import pytest
|
|
11
|
+
import random
|
|
12
|
+
import sys
|
|
13
|
+
|
|
14
|
+
@pytest.mark.parametrize("listDimensions,expected_intInnit,expected_parseListDimensions,expected_validateListDimensions,expected_getLeavesTotal", [
|
|
15
|
+
(None, ValueError, ValueError, ValueError, ValueError), # None instead of list
|
|
16
|
+
(['a'], ValueError, ValueError, ValueError, ValueError), # string
|
|
17
|
+
([-4, 2], [-4, 2], ValueError, ValueError, ValueError), # negative
|
|
18
|
+
([-3], [-3], ValueError, ValueError, ValueError), # negative
|
|
19
|
+
([0, 0], [0, 0], [0, 0], NotImplementedError, 0), # no positive dimensions
|
|
20
|
+
([0, 5, 6], [0, 5, 6], [0, 5, 6], [5, 6], 30), # zeros ignored
|
|
21
|
+
([0], [0], [0], NotImplementedError, 0), # edge case
|
|
22
|
+
([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 120), # sequential
|
|
23
|
+
([1, sys.maxsize], [1, sys.maxsize], [1, sys.maxsize], [1, sys.maxsize], sys.maxsize), # maxint
|
|
24
|
+
([7.5], ValueError, ValueError, ValueError, ValueError), # float
|
|
25
|
+
([1] * 1000, [1] * 1000, [1] * 1000, [1] * 1000, 1), # long list
|
|
26
|
+
([11], [11], [11], NotImplementedError, 11), # single dimension
|
|
27
|
+
([13, 0, 17], [13, 0, 17], [13, 0, 17], [13, 17], 221), # zeros handled
|
|
28
|
+
([2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], 16), # repeated dimensions
|
|
29
|
+
([2, 3, 4], [2, 3, 4], [2, 3, 4], [2, 3, 4], 24),
|
|
30
|
+
([2, 3], [2, 3], [2, 3], [2, 3], 6),
|
|
31
|
+
([2] * 11, [2] * 11, [2] * 11, [2] * 11, 2048), # power of 2
|
|
32
|
+
([3, 2], [3, 2], [3, 2], [2, 3], 6), # return value is the input when valid
|
|
33
|
+
([3] * 5, [3] * 5, [3] * 5, [3, 3, 3, 3, 3], 243), # power of 3
|
|
34
|
+
([None], TypeError, TypeError, TypeError, TypeError), # None
|
|
35
|
+
([True], TypeError, TypeError, TypeError, TypeError), # bool
|
|
36
|
+
([[17, 39]], TypeError, TypeError, TypeError, TypeError), # nested
|
|
37
|
+
([], ValueError, ValueError, ValueError, ValueError), # empty
|
|
38
|
+
([complex(1,1)], ValueError, ValueError, ValueError, ValueError), # complex number
|
|
39
|
+
([float('inf')], ValueError, ValueError, ValueError, ValueError), # infinity
|
|
40
|
+
([float('nan')], ValueError, ValueError, ValueError, ValueError), # NaN
|
|
41
|
+
([sys.maxsize - 1, 1], [sys.maxsize - 1, 1], [sys.maxsize - 1, 1], [1, sys.maxsize - 1], sys.maxsize - 1), # near maxint
|
|
42
|
+
([sys.maxsize // 2, sys.maxsize // 2, 2], [sys.maxsize // 2, sys.maxsize // 2, 2], [sys.maxsize // 2, sys.maxsize // 2, 2], [2, sys.maxsize // 2, sys.maxsize // 2], OverflowError), # overflow protection
|
|
43
|
+
([sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], OverflowError), # overflow protection
|
|
44
|
+
(range(3, 7), [3, 4, 5, 6], [3, 4, 5, 6], [3, 4, 5, 6], 360), # range sequence type
|
|
45
|
+
(tuple([3, 5, 7]), [3, 5, 7], [3, 5, 7], [3, 5, 7], 105), # tuple sequence type
|
|
46
|
+
])
|
|
47
|
+
def test_listDimensionsAsParameter(listDimensions: None | List[str] | List[int] | List[float] | List[None] | List[bool] | List[List[int]] | List[complex] | range | tuple[int, ...], expected_intInnit: type[ValueError] | List[int] | type[TypeError], expected_parseListDimensions: type[ValueError] | List[int] | type[TypeError], expected_validateListDimensions: type[ValueError] | type[NotImplementedError] | List[int] | type[TypeError], expected_getLeavesTotal: type[ValueError] | int | type[TypeError] | type[OverflowError]) -> None:
|
|
48
|
+
"""Test both validateListDimensions and getLeavesTotal with the same inputs."""
|
|
49
|
+
standardizedEqualTo(expected_intInnit, intInnit, listDimensions)
|
|
50
|
+
standardizedEqualTo(expected_parseListDimensions, parseDimensions, listDimensions)
|
|
51
|
+
standardizedEqualTo(expected_validateListDimensions, validateListDimensions, listDimensions)
|
|
52
|
+
standardizedEqualTo(expected_getLeavesTotal, getLeavesTotal, listDimensions)
|
|
53
|
+
|
|
54
|
+
def test_getLeavesTotal_edge_cases() -> None:
|
|
55
|
+
"""Test edge cases for getLeavesTotal."""
|
|
56
|
+
# Order independence
|
|
57
|
+
standardizedEqualTo(getLeavesTotal([2, 3, 4]), getLeavesTotal, [4, 2, 3])
|
|
58
|
+
|
|
59
|
+
# Immutability
|
|
60
|
+
listOriginal = [2, 3]
|
|
61
|
+
standardizedEqualTo(6, getLeavesTotal, listOriginal)
|
|
62
|
+
standardizedEqualTo([2, 3], lambda x: x, listOriginal) # Check that the list wasn't modified
|
|
63
|
+
|
|
64
|
+
@pytest.mark.parametrize("foldsValue,writeFoldsTarget", [
|
|
65
|
+
(756839, "foldsTotalTest.txt"), # Direct file
|
|
66
|
+
(2640919, "foldsTotalTest.txt"), # Direct file
|
|
67
|
+
(7715177, None), # Directory, will use default filename
|
|
68
|
+
])
|
|
69
|
+
def test_countFolds_writeFoldsTotal(
|
|
70
|
+
listDimensionsTestFunctionality: List[int],
|
|
71
|
+
pathTempTesting: pathlib.Path,
|
|
72
|
+
mockFoldingFunction: Callable[..., Callable[..., None]],
|
|
73
|
+
mockDispatcher: Callable[[Callable[..., None]], Any],
|
|
74
|
+
foldsValue: int,
|
|
75
|
+
writeFoldsTarget: Optional[str]
|
|
76
|
+
) -> None:
|
|
77
|
+
"""Test writing folds total to either a file or directory."""
|
|
78
|
+
# For directory case, use the directory path directly
|
|
79
|
+
if writeFoldsTarget is None:
|
|
80
|
+
pathWriteTarget = pathTempTesting
|
|
81
|
+
filenameFoldsTotalExpected = getFilenameFoldsTotal(listDimensionsTestFunctionality)
|
|
82
|
+
else:
|
|
83
|
+
pathWriteTarget = pathTempTesting / writeFoldsTarget
|
|
84
|
+
filenameFoldsTotalExpected = writeFoldsTarget
|
|
85
|
+
|
|
86
|
+
foldsTotalExpected = foldsValue * getLeavesTotal(listDimensionsTestFunctionality)
|
|
87
|
+
mock_countFolds = mockFoldingFunction(foldsValue, listDimensionsTestFunctionality)
|
|
88
|
+
|
|
89
|
+
with mockDispatcher(mock_countFolds):
|
|
90
|
+
returned = countFolds(listDimensionsTestFunctionality, pathLikeWriteFoldsTotal=pathWriteTarget)
|
|
91
|
+
|
|
92
|
+
standardizedEqualTo(str(foldsTotalExpected), lambda: (pathTempTesting / filenameFoldsTotalExpected).read_text())
|
|
93
|
+
|
|
94
|
+
@pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_intInnit())
|
|
95
|
+
def testIntInnit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
|
|
96
|
+
callablePytest()
|
|
97
|
+
|
|
98
|
+
@pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_oopsieKwargsie())
|
|
99
|
+
def testOopsieKwargsie(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
|
|
100
|
+
callablePytest()
|
|
101
|
+
|
|
102
|
+
@pytest.mark.parametrize("CPUlimit, expectedLimit", [
|
|
103
|
+
(None, numba.get_num_threads()),
|
|
104
|
+
(False, numba.get_num_threads()),
|
|
105
|
+
(True, 1),
|
|
106
|
+
(4, 4),
|
|
107
|
+
(0.5, max(1, numba.get_num_threads() // 2)),
|
|
108
|
+
(-0.5, max(1, numba.get_num_threads() // 2)),
|
|
109
|
+
(-2, max(1, numba.get_num_threads() - 2)),
|
|
110
|
+
(0, numba.get_num_threads()),
|
|
111
|
+
(1, 1),
|
|
112
|
+
])
|
|
113
|
+
def test_setCPUlimit(CPUlimit: None | float | bool | Literal[4] | Literal[-2] | Literal[0] | Literal[1], expectedLimit: Any | int) -> None:
|
|
114
|
+
standardizedEqualTo(expectedLimit, setCPUlimit, CPUlimit)
|
|
115
|
+
|
|
116
|
+
def test_makeConnectionGraph_nonNegative(listDimensionsTestFunctionality: List[int]) -> None:
|
|
117
|
+
connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality)
|
|
118
|
+
assert numpy.all(connectionGraph >= 0), "All values in the connection graph should be non-negative."
|
|
119
|
+
|
|
120
|
+
# @pytest.mark.parametrize("datatype", ['int16', 'uint64'])
|
|
121
|
+
# def test_makeConnectionGraph_datatype(listDimensionsTestFunctionality: List[int], datatype) -> None:
|
|
122
|
+
# connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality, datatype=datatype)
|
|
123
|
+
# assert connectionGraph.dtype == datatype, f"Expected datatype {datatype}, but got {connectionGraph.dtype}."
|
|
124
|
+
|
|
125
|
+
"""5 parameters
|
|
126
|
+
listDimensionsTestFunctionality
|
|
127
|
+
|
|
128
|
+
computationDivisions
|
|
129
|
+
None
|
|
130
|
+
random: int, first included: 2, first excluded: leavesTotal
|
|
131
|
+
maximum
|
|
132
|
+
cpu
|
|
133
|
+
|
|
134
|
+
CPUlimit
|
|
135
|
+
None
|
|
136
|
+
True
|
|
137
|
+
False
|
|
138
|
+
0
|
|
139
|
+
1
|
|
140
|
+
-1
|
|
141
|
+
random: 0 < float < 1
|
|
142
|
+
random: -1 < float < 0
|
|
143
|
+
random: int, first included: 2, first excluded: (min(leavesTotal, 16) - 1)
|
|
144
|
+
random: int, first included: -1 * (min(leavesTotal, 16) - 1), first excluded: -1
|
|
145
|
+
|
|
146
|
+
datatypeMedium
|
|
147
|
+
None
|
|
148
|
+
numpy.int64
|
|
149
|
+
numpy.intc
|
|
150
|
+
numpy.uint16
|
|
151
|
+
|
|
152
|
+
datatypeLarge
|
|
153
|
+
None
|
|
154
|
+
numpy.int64
|
|
155
|
+
numpy.intp
|
|
156
|
+
numpy.uint32
|
|
157
|
+
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
@pytest.fixture
|
|
161
|
+
def parameterIterator() -> Callable[[List[int]], Generator[Dict[str, Any], None, None]]:
|
|
162
|
+
"""Generate random combinations of parameters for outfitCountFolds testing."""
|
|
163
|
+
parameterSets: Dict[str, List[Any]] = {
|
|
164
|
+
'computationDivisions': [
|
|
165
|
+
None,
|
|
166
|
+
'maximum',
|
|
167
|
+
'cpu',
|
|
168
|
+
],
|
|
169
|
+
'CPUlimit': [
|
|
170
|
+
None, True, False, 0, 1, -1,
|
|
171
|
+
],
|
|
172
|
+
'datatypeMedium': [
|
|
173
|
+
None,
|
|
174
|
+
numpy.int64,
|
|
175
|
+
numpy.intc,
|
|
176
|
+
numpy.uint16
|
|
177
|
+
],
|
|
178
|
+
'datatypeLarge': [
|
|
179
|
+
None,
|
|
180
|
+
numpy.int64,
|
|
181
|
+
numpy.intp,
|
|
182
|
+
numpy.uint32
|
|
183
|
+
]
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
def makeParametersDynamic(listDimensions: List[int]) -> Dict[str, List[Any]]:
|
|
187
|
+
"""Add context-dependent parameter values."""
|
|
188
|
+
parametersDynamic = parameterSets.copy()
|
|
189
|
+
leavesTotal = getLeavesTotal(listDimensions)
|
|
190
|
+
concurrencyLimit = min(leavesTotal, 16)
|
|
191
|
+
|
|
192
|
+
# Add dynamic computationDivisions values
|
|
193
|
+
dynamicDivisions = [random.randint(2, leavesTotal-1) for iterator in range(3)]
|
|
194
|
+
parametersDynamic['computationDivisions'] = parametersDynamic['computationDivisions'] + dynamicDivisions
|
|
195
|
+
|
|
196
|
+
# Add dynamic CPUlimit values
|
|
197
|
+
parameterDynamicCPU = [
|
|
198
|
+
random.random(), # 0 to 1
|
|
199
|
+
-random.random(), # -1 to 0
|
|
200
|
+
]
|
|
201
|
+
parameterDynamicCPU.extend(
|
|
202
|
+
[random.randint(2, concurrencyLimit-1) for iterator in range(2)]
|
|
203
|
+
)
|
|
204
|
+
parameterDynamicCPU.extend(
|
|
205
|
+
[random.randint(-concurrencyLimit+1, -2) for iterator in range(2)]
|
|
206
|
+
)
|
|
207
|
+
parametersDynamic['CPUlimit'] = parametersDynamic['CPUlimit'] + parameterDynamicCPU
|
|
208
|
+
|
|
209
|
+
return parametersDynamic
|
|
210
|
+
|
|
211
|
+
def generateCombinations(listDimensions: List[int]) -> Generator[Dict[str, Any], None, None]:
|
|
212
|
+
parametersDynamic = makeParametersDynamic(listDimensions)
|
|
213
|
+
parameterKeys = list(parametersDynamic.keys())
|
|
214
|
+
parameterValues = [parametersDynamic[key] for key in parameterKeys]
|
|
215
|
+
|
|
216
|
+
# Shuffle each parameter list
|
|
217
|
+
for valueList in parameterValues:
|
|
218
|
+
random.shuffle(valueList)
|
|
219
|
+
|
|
220
|
+
# Use zip_longest to iterate, filling with None when shorter lists are exhausted
|
|
221
|
+
for combination in itertools.zip_longest(*parameterValues, fillvalue=None):
|
|
222
|
+
yield dict(zip(parameterKeys, combination))
|
|
223
|
+
|
|
224
|
+
return generateCombinations
|
|
225
|
+
|
|
226
|
+
# TODO refactor due to changes
|
|
227
|
+
# def test_pathJobDEFAULT_colab() -> None:
|
|
228
|
+
# """Test that pathJobDEFAULT is set correctly when running in Google Colab."""
|
|
229
|
+
# # Mock sys.modules to simulate running in Colab
|
|
230
|
+
# with unittest.mock.patch.dict('sys.modules', {'google.colab': unittest.mock.MagicMock()}):
|
|
231
|
+
# # Force reload of theSSOT to trigger Colab path logic
|
|
232
|
+
# import importlib
|
|
233
|
+
# import mapFolding.theSSOT
|
|
234
|
+
# importlib.reload(mapFolding.theSSOT)
|
|
235
|
+
|
|
236
|
+
# # Check that path was set to Colab-specific value
|
|
237
|
+
# assert mapFolding.theSSOT.pathJobDEFAULT == pathlib.Path("/content/drive/MyDrive") / "jobs"
|
|
238
|
+
|
|
239
|
+
# # Reload one more time to restore original state
|
|
240
|
+
# importlib.reload(mapFolding.theSSOT)
|
|
241
|
+
|
|
242
|
+
def test_saveFoldsTotal_fallback(pathTempTesting: pathlib.Path) -> None:
|
|
243
|
+
foldsTotal = 123
|
|
244
|
+
pathFilename = pathTempTesting / "foldsTotal.txt"
|
|
245
|
+
with unittest.mock.patch("pathlib.Path.write_text", side_effect=OSError("Simulated write failure")):
|
|
246
|
+
with unittest.mock.patch("os.getcwd", return_value=str(pathTempTesting)):
|
|
247
|
+
capturedOutput = io.StringIO()
|
|
248
|
+
with redirect_stdout(capturedOutput):
|
|
249
|
+
saveFoldsTotal(pathFilename, foldsTotal)
|
|
250
|
+
fallbackFiles = list(pathTempTesting.glob("foldsTotalYO_*.txt"))
|
|
251
|
+
assert len(fallbackFiles) == 1, "Fallback file was not created upon write failure."
|
|
252
|
+
|
|
253
|
+
def test_makeDataContainer_default_datatype() -> None:
|
|
254
|
+
"""Test that makeDataContainer uses dtypeLargeDEFAULT when no datatype is specified."""
|
|
255
|
+
testShape = (3, 4)
|
|
256
|
+
container = makeDataContainer(testShape)
|
|
257
|
+
assert container.dtype == hackSSOTdtype('dtypeFoldsTotal'), f"Expected datatype but got {container.dtype}"
|
|
258
|
+
assert container.shape == testShape, f"Expected shape {testShape}, but got {container.shape}"
|
tests/test_tasks.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from tests.conftest import *
|
|
2
|
+
import pytest
|
|
3
|
+
from typing import List, Dict, Literal, Tuple, Any
|
|
4
|
+
|
|
5
|
+
# TODO add a test. `C` = number of logical cores available. `n = C + 1`. Ensure that `[2,n]` is computed correctly.
|
|
6
|
+
# Or, probably smarter: limit the number of cores, then run a test with C+1.
|
|
7
|
+
|
|
8
|
+
def test_algorithmSourceParallel(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], useAlgorithmDirectly: None) -> None:
|
|
9
|
+
standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
|
|
10
|
+
|
|
11
|
+
def test_countFoldsComputationDivisionsInvalid(listDimensionsTestFunctionality: List[int]) -> None:
|
|
12
|
+
standardizedEqualTo(ValueError, countFolds, listDimensionsTestFunctionality, None, {"wrong": "value"})
|
|
13
|
+
|
|
14
|
+
def test_countFoldsComputationDivisionsMaximum(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int]) -> None:
|
|
15
|
+
standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
|
|
16
|
+
|
|
17
|
+
@pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_defineConcurrencyLimit())
|
|
18
|
+
def test_defineConcurrencyLimit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
|
|
19
|
+
callablePytest()
|
|
20
|
+
|
|
21
|
+
# @pytest.mark.parametrize("CPUlimitParameter", [{"invalid": True}, ["weird"]])
|
|
22
|
+
# def test_countFolds_cpuLimitOopsie(listDimensionsTestFunctionality: List[int], CPUlimitParameter: Dict[str, bool] | List[str]) -> None:
|
|
23
|
+
# standardizedEqualTo((AttributeError or ValueError), countFolds, listDimensionsTestFunctionality, None, 'cpu', CPUlimitParameter)
|
|
24
|
+
|
|
25
|
+
@pytest.mark.parametrize("computationDivisions, concurrencyLimit, listDimensions, expectedTaskDivisions", [
|
|
26
|
+
(None, 4, [9, 11], 0),
|
|
27
|
+
("maximum", 4, [7, 11], 77),
|
|
28
|
+
("cpu", 4, [3, 7], 4),
|
|
29
|
+
(["invalid"], 4, [19, 23], ValueError),
|
|
30
|
+
(20, 4, [3,5], ValueError)
|
|
31
|
+
])
|
|
32
|
+
def test_getTaskDivisions(computationDivisions: None | List[str] | Literal['maximum'] | Literal['cpu'] | Literal[20], concurrencyLimit: Literal[4], listDimensions: List[int], expectedTaskDivisions: type[ValueError] | Literal[0] | Literal[77] | Literal[4]) -> None:
|
|
33
|
+
standardizedEqualTo(expectedTaskDivisions, getTaskDivisions, computationDivisions, concurrencyLimit, None, listDimensions)
|
|
34
|
+
|
|
35
|
+
@pytest.mark.parametrize("expected,parameter", [
|
|
36
|
+
(2, "2"), # string
|
|
37
|
+
(ValueError, [4]), # list
|
|
38
|
+
(ValueError, (2,)), # tuple
|
|
39
|
+
(ValueError, {2}), # set
|
|
40
|
+
(ValueError, {"cores": 2}), # dict
|
|
41
|
+
])
|
|
42
|
+
def test_setCPUlimitMalformedParameter(expected: type[ValueError] | Literal[2], parameter: List[int] | Tuple[int] | set[int] | Dict[str, int] | Literal['2']) -> None:
|
|
43
|
+
"""Test that invalid CPUlimit types are properly handled."""
|
|
44
|
+
standardizedEqualTo(expected, setCPUlimit, parameter)
|
tests/test_types.py
ADDED
benchmarks/benchmarking.py
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
"""An incompetent benchmarking module for mapFolding."""
|
|
2
|
-
from typing import Callable
|
|
3
|
-
import multiprocessing
|
|
4
|
-
import numpy
|
|
5
|
-
import pathlib
|
|
6
|
-
import time
|
|
7
|
-
|
|
8
|
-
pathRecordedBenchmarks = pathlib.Path('mapFolding/benchmarks/marks')
|
|
9
|
-
pathRecordedBenchmarks.mkdir(parents=True, exist_ok=True)
|
|
10
|
-
pathFilenameRecordedBenchmarks = pathRecordedBenchmarks / "benchmarks.npy"
|
|
11
|
-
|
|
12
|
-
def recordBenchmarks():
|
|
13
|
-
"""Decorator to benchmark a function."""
|
|
14
|
-
def AzeemTheWrapper(functionTarget: Callable):
|
|
15
|
-
def djZeph(*arguments, **keywordArguments):
|
|
16
|
-
timeStart = time.perf_counter_ns()
|
|
17
|
-
returnValueTarget = functionTarget(*arguments, **keywordArguments)
|
|
18
|
-
timeElapsed = (time.perf_counter_ns() - timeStart) / 1e9
|
|
19
|
-
|
|
20
|
-
# Extract mapShape from arguments
|
|
21
|
-
mapShape = keywordArguments['mapShape']
|
|
22
|
-
# mapShape = tuple(arguments)[2]
|
|
23
|
-
# leavesTotal = tuple(arguments[3])[4]
|
|
24
|
-
|
|
25
|
-
# Store benchmark data in single file
|
|
26
|
-
benchmarkEntry = numpy.array([(timeElapsed, mapShape)], dtype=[('time', 'f8'), ('mapShape', 'O')])
|
|
27
|
-
# benchmarkEntry = numpy.array([(timeElapsed, leavesTotal)], dtype=[('time', 'f8'), ('leaves', 'O')])
|
|
28
|
-
|
|
29
|
-
if pathFilenameRecordedBenchmarks.exists():
|
|
30
|
-
arrayExisting = numpy.load(str(pathFilenameRecordedBenchmarks), allow_pickle=True)
|
|
31
|
-
arrayBenchmark = numpy.concatenate([arrayExisting, benchmarkEntry])
|
|
32
|
-
else:
|
|
33
|
-
arrayBenchmark = benchmarkEntry
|
|
34
|
-
|
|
35
|
-
numpy.save(str(pathFilenameRecordedBenchmarks), arrayBenchmark)
|
|
36
|
-
return returnValueTarget
|
|
37
|
-
|
|
38
|
-
return djZeph
|
|
39
|
-
return AzeemTheWrapper
|
|
40
|
-
|
|
41
|
-
def runBenchmarks(benchmarkIterations: int = 30) -> None:
|
|
42
|
-
"""Run benchmark iterations.
|
|
43
|
-
|
|
44
|
-
Parameters:
|
|
45
|
-
benchmarkIterations (30): Number of benchmark iterations to run
|
|
46
|
-
"""
|
|
47
|
-
# TODO warmUp (False): Whether to perform one warm-up iteration
|
|
48
|
-
|
|
49
|
-
import itertools
|
|
50
|
-
from tqdm.auto import tqdm
|
|
51
|
-
from mapFolding.oeis import settingsOEIS, oeisIDfor_n
|
|
52
|
-
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
53
|
-
max_workers = 6
|
|
54
|
-
|
|
55
|
-
listParametersOEIS = [(oeisIdentifier, dimensionValue) for oeisIdentifier, settings in settingsOEIS.items() for dimensionValue in settings['valuesBenchmark']]
|
|
56
|
-
# for (oeisIdentifier, dimensionValue), iterationIndex in tqdm(itertools.product(listParametersOEIS, range(benchmarkIterations)), total=len(listParametersOEIS) * benchmarkIterations):
|
|
57
|
-
# oeisIDfor_n(oeisIdentifier, dimensionValue)
|
|
58
|
-
listCartesianProduct = list(itertools.product(listParametersOEIS, range(benchmarkIterations)))
|
|
59
|
-
with ProcessPoolExecutor(max_workers) as concurrencyManager:
|
|
60
|
-
listConcurrency = [concurrencyManager.submit(oeisIDfor_n, *parameters[0]) for parameters in listCartesianProduct]
|
|
61
|
-
for _complete in tqdm(as_completed(listConcurrency), total=len(listCartesianProduct)):
|
|
62
|
-
pass
|
|
63
|
-
|
|
64
|
-
if __name__ == '__main__':
|
|
65
|
-
multiprocessing.set_start_method('spawn')
|
|
66
|
-
pathFilenameRecordedBenchmarks.unlink(missing_ok=True)
|
|
67
|
-
runBenchmarks(30)
|
citations/constants.py
DELETED