mapFolding 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/babbage.py +9 -4
- mapFolding/beDRY.py +0 -3
- mapFolding/benchmarks/benchmarking.py +3 -2
- mapFolding/countInitialize.py +44 -0
- mapFolding/countParallel.py +49 -0
- mapFolding/countSequential.py +43 -0
- mapFolding/importSelector.py +12 -0
- mapFolding/inlineAfunction.py +124 -0
- mapFolding/lovelace.py +92 -96
- mapFolding/startHere.py +0 -11
- mapFolding/theSSOT.py +13 -5
- {mapFolding-0.2.3.dist-info → mapFolding-0.2.4.dist-info}/METADATA +3 -1
- {mapFolding-0.2.3.dist-info → mapFolding-0.2.4.dist-info}/RECORD +19 -14
- tests/conftest.py +8 -1
- tests/test_other.py +164 -94
- tests/test_temporary.py +25 -0
- mapFolding/benchmarks/test_benchmarks.py +0 -74
- {mapFolding-0.2.3.dist-info → mapFolding-0.2.4.dist-info}/WHEEL +0 -0
- {mapFolding-0.2.3.dist-info → mapFolding-0.2.4.dist-info}/entry_points.txt +0 -0
- {mapFolding-0.2.3.dist-info → mapFolding-0.2.4.dist-info}/top_level.txt +0 -0
mapFolding/babbage.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
from mapFolding.
|
|
1
|
+
from mapFolding.importSelector import countSequential, countParallel, countInitialize
|
|
2
|
+
from mapFolding import indexThe
|
|
2
3
|
from numpy import integer
|
|
3
4
|
from numpy.typing import NDArray
|
|
4
5
|
from typing import Any, Tuple
|
|
@@ -25,6 +26,10 @@ def _countFolds(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDArray[
|
|
|
25
26
|
- and just a few dozen-jillion other things.
|
|
26
27
|
|
|
27
28
|
"""
|
|
28
|
-
#
|
|
29
|
-
|
|
30
|
-
|
|
29
|
+
# print("babbage")
|
|
30
|
+
countInitialize(connectionGraph=connectionGraph, gapsWhere=gapsWhere, my=my, the=the, track=track)
|
|
31
|
+
|
|
32
|
+
if the[indexThe.taskDivisions.value] > 0:
|
|
33
|
+
countParallel(connectionGraph=connectionGraph, foldsSubTotals=foldsSubTotals, gapsWherePARALLEL=gapsWhere, myPARALLEL=my, the=the, trackPARALLEL=track)
|
|
34
|
+
else:
|
|
35
|
+
countSequential(connectionGraph=connectionGraph, foldsSubTotals=foldsSubTotals, gapsWhere=gapsWhere, my=my, the=the, track=track)
|
mapFolding/beDRY.py
CHANGED
|
@@ -230,9 +230,6 @@ def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed par
|
|
|
230
230
|
raise ValueError(f"Dimension {dimension} must be non-negative")
|
|
231
231
|
listNonNegative.append(dimension)
|
|
232
232
|
|
|
233
|
-
if not listNonNegative:
|
|
234
|
-
raise ValueError("At least one dimension must be non-negative")
|
|
235
|
-
|
|
236
233
|
return listNonNegative
|
|
237
234
|
|
|
238
235
|
def setCPUlimit(CPUlimit: Union[bool, float, int, None]) -> int:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
|
|
1
|
+
"""An incompetent benchmarking module for mapFolding."""
|
|
2
2
|
from typing import Callable
|
|
3
|
+
import multiprocessing
|
|
3
4
|
import numpy
|
|
4
5
|
import pathlib
|
|
5
6
|
import time
|
|
@@ -57,7 +58,7 @@ def runBenchmarks(benchmarkIterations: int = 30) -> None:
|
|
|
57
58
|
listCartesianProduct = list(itertools.product(listParametersOEIS, range(benchmarkIterations)))
|
|
58
59
|
with ProcessPoolExecutor(max_workers) as concurrencyManager:
|
|
59
60
|
listConcurrency = [concurrencyManager.submit(oeisIDfor_n, *parameters[0]) for parameters in listCartesianProduct]
|
|
60
|
-
for
|
|
61
|
+
for _complete in tqdm(as_completed(listConcurrency), total=len(listCartesianProduct)):
|
|
61
62
|
pass
|
|
62
63
|
|
|
63
64
|
if __name__ == '__main__':
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import numba
|
|
2
|
+
|
|
3
|
+
@numba.jit((numba.int64[:, :, ::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:, ::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
4
|
+
def countInitialize(connectionGraph, gapsWhere, my, the, track):
|
|
5
|
+
while my[6] > 0:
|
|
6
|
+
if my[6] <= 1 or track[1, 0] == 1:
|
|
7
|
+
my[1] = 0
|
|
8
|
+
my[3] = track[3, my[6] - 1]
|
|
9
|
+
my[0] = 1
|
|
10
|
+
while my[0] <= the[0]:
|
|
11
|
+
if connectionGraph[my[0], my[6], my[6]] == my[6]:
|
|
12
|
+
my[1] += 1
|
|
13
|
+
else:
|
|
14
|
+
my[7] = connectionGraph[my[0], my[6], my[6]]
|
|
15
|
+
while my[7] != my[6]:
|
|
16
|
+
gapsWhere[my[3]] = my[7]
|
|
17
|
+
if track[2, my[7]] == 0:
|
|
18
|
+
my[3] += 1
|
|
19
|
+
track[2, my[7]] += 1
|
|
20
|
+
my[7] = connectionGraph[my[0], my[6], track[1, my[7]]]
|
|
21
|
+
my[0] += 1
|
|
22
|
+
if my[1] == the[0]:
|
|
23
|
+
my[4] = 0
|
|
24
|
+
while my[4] < my[6]:
|
|
25
|
+
gapsWhere[my[3]] = my[4]
|
|
26
|
+
my[3] += 1
|
|
27
|
+
my[4] += 1
|
|
28
|
+
my[5] = my[2]
|
|
29
|
+
while my[5] < my[3]:
|
|
30
|
+
gapsWhere[my[2]] = gapsWhere[my[5]]
|
|
31
|
+
if track[2, gapsWhere[my[5]]] == the[0] - my[1]:
|
|
32
|
+
my[2] += 1
|
|
33
|
+
track[2, gapsWhere[my[5]]] = 0
|
|
34
|
+
my[5] += 1
|
|
35
|
+
if my[6] > 0:
|
|
36
|
+
my[2] -= 1
|
|
37
|
+
track[0, my[6]] = gapsWhere[my[2]]
|
|
38
|
+
track[1, my[6]] = track[1, track[0, my[6]]]
|
|
39
|
+
track[1, track[0, my[6]]] = my[6]
|
|
40
|
+
track[0, track[1, my[6]]] = my[6]
|
|
41
|
+
track[3, my[6]] = my[2]
|
|
42
|
+
my[6] += 1
|
|
43
|
+
if my[2] > 0:
|
|
44
|
+
return
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import numba
|
|
2
|
+
|
|
3
|
+
@numba.jit((numba.int64[:, :, ::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:, ::1]), parallel=True, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
4
|
+
def countParallel(connectionGraph, foldsSubTotals, gapsWherePARALLEL, myPARALLEL, the, trackPARALLEL):
|
|
5
|
+
for indexSherpa in numba.prange(the[2]):
|
|
6
|
+
gapsWhere = gapsWherePARALLEL.copy()
|
|
7
|
+
my = myPARALLEL.copy()
|
|
8
|
+
my[8] = indexSherpa
|
|
9
|
+
track = trackPARALLEL.copy()
|
|
10
|
+
while my[6] > 0:
|
|
11
|
+
if my[6] <= 1 or track[1, 0] == 1:
|
|
12
|
+
if my[6] > the[1]:
|
|
13
|
+
foldsSubTotals[my[8]] += the[1]
|
|
14
|
+
else:
|
|
15
|
+
my[1] = 0
|
|
16
|
+
my[3] = track[3, my[6] - 1]
|
|
17
|
+
my[0] = 1
|
|
18
|
+
while my[0] <= the[0]:
|
|
19
|
+
if connectionGraph[my[0], my[6], my[6]] == my[6]:
|
|
20
|
+
my[1] += 1
|
|
21
|
+
else:
|
|
22
|
+
my[7] = connectionGraph[my[0], my[6], my[6]]
|
|
23
|
+
while my[7] != my[6]:
|
|
24
|
+
if my[6] != the[2] or my[7] % the[2] == my[8]:
|
|
25
|
+
gapsWhere[my[3]] = my[7]
|
|
26
|
+
if track[2, my[7]] == 0:
|
|
27
|
+
my[3] += 1
|
|
28
|
+
track[2, my[7]] += 1
|
|
29
|
+
my[7] = connectionGraph[my[0], my[6], track[1, my[7]]]
|
|
30
|
+
my[0] += 1
|
|
31
|
+
my[5] = my[2]
|
|
32
|
+
while my[5] < my[3]:
|
|
33
|
+
gapsWhere[my[2]] = gapsWhere[my[5]]
|
|
34
|
+
if track[2, gapsWhere[my[5]]] == the[0] - my[1]:
|
|
35
|
+
my[2] += 1
|
|
36
|
+
track[2, gapsWhere[my[5]]] = 0
|
|
37
|
+
my[5] += 1
|
|
38
|
+
while my[6] > 0 and my[2] == track[3, my[6] - 1]:
|
|
39
|
+
my[6] -= 1
|
|
40
|
+
track[1, track[0, my[6]]] = track[1, my[6]]
|
|
41
|
+
track[0, track[1, my[6]]] = track[0, my[6]]
|
|
42
|
+
if my[6] > 0:
|
|
43
|
+
my[2] -= 1
|
|
44
|
+
track[0, my[6]] = gapsWhere[my[2]]
|
|
45
|
+
track[1, my[6]] = track[1, track[0, my[6]]]
|
|
46
|
+
track[1, track[0, my[6]]] = my[6]
|
|
47
|
+
track[0, track[1, my[6]]] = my[6]
|
|
48
|
+
track[3, my[6]] = my[2]
|
|
49
|
+
my[6] += 1
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import numba
|
|
2
|
+
|
|
3
|
+
@numba.jit((numba.int64[:, :, ::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:, ::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
4
|
+
def countSequential(connectionGraph, foldsSubTotals, gapsWhere, my, the, track):
|
|
5
|
+
while my[6] > 0:
|
|
6
|
+
if my[6] <= 1 or track[1, 0] == 1:
|
|
7
|
+
if my[6] > the[1]:
|
|
8
|
+
foldsSubTotals[my[8]] += the[1]
|
|
9
|
+
else:
|
|
10
|
+
my[1] = 0
|
|
11
|
+
my[3] = track[3, my[6] - 1]
|
|
12
|
+
my[0] = 1
|
|
13
|
+
while my[0] <= the[0]:
|
|
14
|
+
if connectionGraph[my[0], my[6], my[6]] == my[6]:
|
|
15
|
+
my[1] += 1
|
|
16
|
+
else:
|
|
17
|
+
my[7] = connectionGraph[my[0], my[6], my[6]]
|
|
18
|
+
while my[7] != my[6]:
|
|
19
|
+
gapsWhere[my[3]] = my[7]
|
|
20
|
+
if track[2, my[7]] == 0:
|
|
21
|
+
my[3] += 1
|
|
22
|
+
track[2, my[7]] += 1
|
|
23
|
+
my[7] = connectionGraph[my[0], my[6], track[1, my[7]]]
|
|
24
|
+
my[0] += 1
|
|
25
|
+
my[5] = my[2]
|
|
26
|
+
while my[5] < my[3]:
|
|
27
|
+
gapsWhere[my[2]] = gapsWhere[my[5]]
|
|
28
|
+
if track[2, gapsWhere[my[5]]] == the[0] - my[1]:
|
|
29
|
+
my[2] += 1
|
|
30
|
+
track[2, gapsWhere[my[5]]] = 0
|
|
31
|
+
my[5] += 1
|
|
32
|
+
while my[6] > 0 and my[2] == track[3, my[6] - 1]:
|
|
33
|
+
my[6] -= 1
|
|
34
|
+
track[1, track[0, my[6]]] = track[1, my[6]]
|
|
35
|
+
track[0, track[1, my[6]]] = track[0, my[6]]
|
|
36
|
+
if my[6] > 0:
|
|
37
|
+
my[2] -= 1
|
|
38
|
+
track[0, my[6]] = gapsWhere[my[2]]
|
|
39
|
+
track[1, my[6]] = track[1, track[0, my[6]]]
|
|
40
|
+
track[1, track[0, my[6]]] = my[6]
|
|
41
|
+
track[0, track[1, my[6]]] = my[6]
|
|
42
|
+
track[3, my[6]] = my[2]
|
|
43
|
+
my[6] += 1
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# useLovelace = True
|
|
2
|
+
useLovelace = False
|
|
3
|
+
|
|
4
|
+
if useLovelace:
|
|
5
|
+
from mapFolding.lovelace import countSequential
|
|
6
|
+
from mapFolding.lovelace import countParallel
|
|
7
|
+
from mapFolding.lovelace import countInitialize
|
|
8
|
+
|
|
9
|
+
else:
|
|
10
|
+
from mapFolding.countSequential import countSequential
|
|
11
|
+
from mapFolding.countParallel import countParallel
|
|
12
|
+
from mapFolding.countInitialize import countInitialize
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
from mapFolding import indexMy, indexThe, indexTrack
|
|
2
|
+
import ast
|
|
3
|
+
import pathlib
|
|
4
|
+
|
|
5
|
+
dictionaryEnumValues = {}
|
|
6
|
+
for enumIndex in [indexMy, indexThe, indexTrack]:
|
|
7
|
+
for memberName, memberValue in enumIndex._member_map_.items():
|
|
8
|
+
dictionaryEnumValues[f"{enumIndex.__name__}.{memberName}.value"] = memberValue.value
|
|
9
|
+
|
|
10
|
+
class RecursiveInliner(ast.NodeTransformer):
|
|
11
|
+
def __init__(self, dictionaryFunctions, dictionaryEnumValues):
|
|
12
|
+
self.dictionaryFunctions = dictionaryFunctions
|
|
13
|
+
self.dictionaryEnumValues = dictionaryEnumValues
|
|
14
|
+
self.processed = set() # Track processed functions to avoid infinite recursion
|
|
15
|
+
|
|
16
|
+
def inline_function_body(self, functionName):
|
|
17
|
+
if functionName in self.processed:
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
self.processed.add(functionName)
|
|
21
|
+
inlineDefinition = self.dictionaryFunctions[functionName]
|
|
22
|
+
# Recursively process the function body
|
|
23
|
+
for node in ast.walk(inlineDefinition):
|
|
24
|
+
self.visit(node)
|
|
25
|
+
return inlineDefinition
|
|
26
|
+
|
|
27
|
+
def visit_Attribute(self, node):
|
|
28
|
+
# Substitute enum identifiers (e.g., indexMy.leaf1ndex.value)
|
|
29
|
+
if isinstance(node.value, ast.Attribute) and isinstance(node.value.value, ast.Name):
|
|
30
|
+
enumPath = f"{node.value.value.id}.{node.value.attr}.{node.attr}"
|
|
31
|
+
if enumPath in self.dictionaryEnumValues:
|
|
32
|
+
return ast.Constant(value=self.dictionaryEnumValues[enumPath])
|
|
33
|
+
return self.generic_visit(node)
|
|
34
|
+
|
|
35
|
+
def visit_Call(self, node):
|
|
36
|
+
callNode = self.generic_visit(node)
|
|
37
|
+
if isinstance(callNode, ast.Call) and isinstance(callNode.func, ast.Name) and callNode.func.id in self.dictionaryFunctions:
|
|
38
|
+
inlineDefinition = self.inline_function_body(callNode.func.id)
|
|
39
|
+
if inlineDefinition and inlineDefinition.body:
|
|
40
|
+
lastStmt = inlineDefinition.body[-1]
|
|
41
|
+
if isinstance(lastStmt, ast.Return) and lastStmt.value is not None:
|
|
42
|
+
return self.visit(lastStmt.value)
|
|
43
|
+
elif isinstance(lastStmt, ast.Expr) and lastStmt.value is not None:
|
|
44
|
+
return self.visit(lastStmt.value)
|
|
45
|
+
return None
|
|
46
|
+
return callNode
|
|
47
|
+
|
|
48
|
+
def visit_Expr(self, node):
|
|
49
|
+
if isinstance(node.value, ast.Call):
|
|
50
|
+
if isinstance(node.value.func, ast.Name) and node.value.func.id in self.dictionaryFunctions:
|
|
51
|
+
inlineDefinition = self.inline_function_body(node.value.func.id)
|
|
52
|
+
if inlineDefinition:
|
|
53
|
+
return [self.visit(stmt) for stmt in inlineDefinition.body]
|
|
54
|
+
return self.generic_visit(node)
|
|
55
|
+
|
|
56
|
+
def find_required_imports(node):
|
|
57
|
+
"""Find all modules that need to be imported based on AST analysis."""
|
|
58
|
+
requiredImports = set()
|
|
59
|
+
|
|
60
|
+
class ImportFinder(ast.NodeVisitor):
|
|
61
|
+
def visit_Name(self, node):
|
|
62
|
+
# Common modules we might need
|
|
63
|
+
if node.id in {'numba'}:
|
|
64
|
+
requiredImports.add(node.id)
|
|
65
|
+
self.generic_visit(node)
|
|
66
|
+
|
|
67
|
+
def visit_Decorator(self, node):
|
|
68
|
+
if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
|
|
69
|
+
if node.func.id == 'jit':
|
|
70
|
+
requiredImports.add('numba')
|
|
71
|
+
self.generic_visit(node)
|
|
72
|
+
|
|
73
|
+
ImportFinder().visit(node)
|
|
74
|
+
return requiredImports
|
|
75
|
+
|
|
76
|
+
def generate_imports(requiredImports):
|
|
77
|
+
"""Generate import statements based on required modules."""
|
|
78
|
+
importStatements = []
|
|
79
|
+
|
|
80
|
+
# Map of module names to their import statements
|
|
81
|
+
importMapping = {
|
|
82
|
+
'numba': 'import numba',
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
for moduleName in sorted(requiredImports):
|
|
86
|
+
if moduleName in importMapping:
|
|
87
|
+
importStatements.append(importMapping[moduleName])
|
|
88
|
+
|
|
89
|
+
return '\n'.join(importStatements)
|
|
90
|
+
|
|
91
|
+
def inline_functions(sourceCode, targetFunctionName, dictionaryEnumValues):
|
|
92
|
+
dictionaryParsed = ast.parse(sourceCode)
|
|
93
|
+
dictionaryFunctions = {
|
|
94
|
+
element.name: element
|
|
95
|
+
for element in dictionaryParsed.body
|
|
96
|
+
if isinstance(element, ast.FunctionDef)
|
|
97
|
+
}
|
|
98
|
+
nodeTarget = dictionaryFunctions[targetFunctionName]
|
|
99
|
+
nodeInliner = RecursiveInliner(dictionaryFunctions, dictionaryEnumValues)
|
|
100
|
+
nodeInlined = nodeInliner.visit(nodeTarget)
|
|
101
|
+
ast.fix_missing_locations(nodeInlined)
|
|
102
|
+
|
|
103
|
+
# Generate imports
|
|
104
|
+
requiredImports = find_required_imports(nodeInlined)
|
|
105
|
+
importStatements = generate_imports(requiredImports)
|
|
106
|
+
|
|
107
|
+
# Combine imports with inlined code
|
|
108
|
+
inlinedCode = importStatements + '\n\n' + ast.unparse(ast.Module(body=[nodeInlined], type_ignores=[]))
|
|
109
|
+
return inlinedCode
|
|
110
|
+
|
|
111
|
+
pathFilenameSource = pathlib.Path("/apps/mapFolding/mapFolding/lovelace.py")
|
|
112
|
+
codeSource = pathFilenameSource.read_text()
|
|
113
|
+
|
|
114
|
+
listCallables = [
|
|
115
|
+
'countSequential',
|
|
116
|
+
'countParallel',
|
|
117
|
+
'countInitialize',
|
|
118
|
+
]
|
|
119
|
+
listPathFilenamesDestination = []
|
|
120
|
+
for callableTarget in listCallables:
|
|
121
|
+
pathFilenameDestination = pathFilenameSource.with_stem(callableTarget)
|
|
122
|
+
codeInlined = inline_functions(codeSource, callableTarget, dictionaryEnumValues)
|
|
123
|
+
pathFilenameDestination.write_text(codeInlined)
|
|
124
|
+
listPathFilenamesDestination.append(pathFilenameDestination)
|
mapFolding/lovelace.py
CHANGED
|
@@ -1,100 +1,118 @@
|
|
|
1
1
|
from mapFolding import indexMy, indexThe, indexTrack
|
|
2
|
-
from numpy import integer
|
|
3
|
-
from numpy.typing import NDArray
|
|
4
|
-
from typing import Any
|
|
5
2
|
import numba
|
|
6
|
-
import numpy
|
|
7
3
|
|
|
8
|
-
|
|
4
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
5
|
+
def activeGapIncrement(my):
|
|
9
6
|
my[indexMy.gap1ndex.value] += 1
|
|
10
7
|
|
|
11
|
-
|
|
8
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
9
|
+
def activeLeafGreaterThan0Condition(my):
|
|
12
10
|
return my[indexMy.leaf1ndex.value] > 0
|
|
13
11
|
|
|
14
|
-
|
|
12
|
+
@numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
13
|
+
def activeLeafGreaterThanLeavesTotalCondition(my, the):
|
|
15
14
|
return my[indexMy.leaf1ndex.value] > the[indexThe.leavesTotal.value]
|
|
16
15
|
|
|
17
|
-
|
|
16
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
17
|
+
def activeLeafIsTheFirstLeafCondition(my):
|
|
18
18
|
return my[indexMy.leaf1ndex.value] <= 1
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def allDimensionsAreUnconstrained(my: NDArray[integer[Any]], the: NDArray[integer[Any]]):
|
|
20
|
+
@numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
21
|
+
def allDimensionsAreUnconstrained(my, the):
|
|
24
22
|
return my[indexMy.dimensionsUnconstrained.value] == the[indexThe.dimensionsTotal.value]
|
|
25
23
|
|
|
26
|
-
|
|
24
|
+
@numba.jit((numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
25
|
+
def backtrack(my, track):
|
|
27
26
|
my[indexMy.leaf1ndex.value] -= 1
|
|
28
27
|
track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]
|
|
29
28
|
track[indexTrack.leafAbove.value, track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]
|
|
30
29
|
|
|
31
|
-
|
|
30
|
+
@numba.jit((numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
31
|
+
def backtrackCondition(my, track):
|
|
32
32
|
return my[indexMy.leaf1ndex.value] > 0 and my[indexMy.gap1ndex.value] == track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
|
|
33
33
|
|
|
34
|
-
|
|
34
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
35
|
+
def gap1ndexCeilingIncrement(my):
|
|
36
|
+
my[indexMy.gap1ndexCeiling.value] += 1
|
|
37
|
+
|
|
38
|
+
@numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
39
|
+
def countGaps(gapsWhere, my, track):
|
|
35
40
|
gapsWhere[my[indexMy.gap1ndexCeiling.value]] = my[indexMy.leafConnectee.value]
|
|
36
41
|
if track[indexTrack.countDimensionsGapped.value, my[indexMy.leafConnectee.value]] == 0:
|
|
37
42
|
gap1ndexCeilingIncrement(my=my)
|
|
38
43
|
track[indexTrack.countDimensionsGapped.value, my[indexMy.leafConnectee.value]] += 1
|
|
39
44
|
|
|
40
|
-
|
|
45
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
46
|
+
def dimension1ndexIncrement(my):
|
|
41
47
|
my[indexMy.dimension1ndex.value] += 1
|
|
42
48
|
|
|
43
|
-
|
|
49
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
50
|
+
def dimensionsUnconstrainedCondition(connectionGraph, my):
|
|
44
51
|
return connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]] == my[indexMy.leaf1ndex.value]
|
|
45
52
|
|
|
46
|
-
|
|
53
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
54
|
+
def dimensionsUnconstrainedIncrement(my):
|
|
47
55
|
my[indexMy.dimensionsUnconstrained.value] += 1
|
|
48
56
|
|
|
49
|
-
|
|
57
|
+
@numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
58
|
+
def filterCommonGaps(gapsWhere, my, the, track):
|
|
50
59
|
gapsWhere[my[indexMy.gap1ndex.value]] = gapsWhere[my[indexMy.indexMiniGap.value]]
|
|
51
60
|
if track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] == the[indexThe.dimensionsTotal.value] - my[indexMy.dimensionsUnconstrained.value]:
|
|
52
61
|
activeGapIncrement(my=my)
|
|
53
62
|
track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] = 0
|
|
54
63
|
|
|
55
|
-
|
|
64
|
+
@numba.jit((numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
65
|
+
def findGapsInitializeVariables(my, track):
|
|
56
66
|
my[indexMy.dimensionsUnconstrained.value] = 0
|
|
57
67
|
my[indexMy.gap1ndexCeiling.value] = track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
|
|
58
68
|
my[indexMy.dimension1ndex.value] = 1
|
|
59
69
|
|
|
60
|
-
|
|
70
|
+
@numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
71
|
+
def foldsSubTotalIncrement(foldsSubTotals, my, the):
|
|
61
72
|
foldsSubTotals[my[indexMy.taskIndex.value]] += the[indexThe.leavesTotal.value]
|
|
62
73
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
def indexMiniGapIncrement(my: NDArray[integer[Any]]):
|
|
74
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
75
|
+
def indexMiniGapIncrement(my):
|
|
67
76
|
my[indexMy.indexMiniGap.value] += 1
|
|
68
77
|
|
|
69
|
-
|
|
78
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
79
|
+
def indexMiniGapInitialization(my):
|
|
70
80
|
my[indexMy.indexMiniGap.value] = my[indexMy.gap1ndex.value]
|
|
71
81
|
|
|
72
|
-
|
|
82
|
+
@numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
83
|
+
def insertUnconstrainedLeaf(gapsWhere, my):
|
|
73
84
|
my[indexMy.indexLeaf.value] = 0
|
|
74
85
|
while my[indexMy.indexLeaf.value] < my[indexMy.leaf1ndex.value]:
|
|
75
86
|
gapsWhere[my[indexMy.gap1ndexCeiling.value]] = my[indexMy.indexLeaf.value]
|
|
76
87
|
my[indexMy.gap1ndexCeiling.value] += 1
|
|
77
88
|
my[indexMy.indexLeaf.value] += 1
|
|
78
89
|
|
|
79
|
-
|
|
90
|
+
@numba.jit((numba.int64[:,::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
91
|
+
def leafBelowSentinelIs1Condition(track):
|
|
80
92
|
return track[indexTrack.leafBelow.value, 0] == 1
|
|
81
93
|
|
|
82
|
-
|
|
94
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
95
|
+
def leafConnecteeInitialization(connectionGraph, my):
|
|
83
96
|
my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]]
|
|
84
97
|
|
|
85
|
-
|
|
98
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
99
|
+
def leafConnecteeUpdate(connectionGraph, my, track):
|
|
86
100
|
my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], track[indexTrack.leafBelow.value, my[indexMy.leafConnectee.value]]]
|
|
87
101
|
|
|
88
|
-
|
|
102
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
103
|
+
def loopingLeavesConnectedToActiveLeaf(my):
|
|
89
104
|
return my[indexMy.leafConnectee.value] != my[indexMy.leaf1ndex.value]
|
|
90
105
|
|
|
91
|
-
|
|
106
|
+
@numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
107
|
+
def loopingTheDimensions(my, the):
|
|
92
108
|
return my[indexMy.dimension1ndex.value] <= the[indexThe.dimensionsTotal.value]
|
|
93
109
|
|
|
94
|
-
|
|
110
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
111
|
+
def loopingToActiveGapCeiling(my):
|
|
95
112
|
return my[indexMy.indexMiniGap.value] < my[indexMy.gap1ndexCeiling.value]
|
|
96
113
|
|
|
97
|
-
|
|
114
|
+
@numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
115
|
+
def placeLeaf(gapsWhere, my, track):
|
|
98
116
|
my[indexMy.gap1ndex.value] -= 1
|
|
99
117
|
track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]] = gapsWhere[my[indexMy.gap1ndex.value]]
|
|
100
118
|
track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]] = track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]]
|
|
@@ -103,20 +121,16 @@ def placeLeaf(gapsWhere: NDArray[integer[Any]], my: NDArray[integer[Any]], track
|
|
|
103
121
|
track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value]] = my[indexMy.gap1ndex.value]
|
|
104
122
|
my[indexMy.leaf1ndex.value] += 1
|
|
105
123
|
|
|
106
|
-
|
|
124
|
+
@numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
125
|
+
def placeLeafCondition(my):
|
|
107
126
|
return my[indexMy.leaf1ndex.value] > 0
|
|
108
127
|
|
|
109
|
-
|
|
110
|
-
|
|
128
|
+
@numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
129
|
+
def thereAreComputationDivisionsYouMightSkip(my, the):
|
|
130
|
+
return my[indexMy.leaf1ndex.value] != the[indexThe.taskDivisions.value] or my[indexMy.leafConnectee.value] % the[indexThe.taskDivisions.value] == my[indexMy.taskIndex.value]
|
|
111
131
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
return True
|
|
115
|
-
if taskIndexCondition(my=my, the=the):
|
|
116
|
-
return True
|
|
117
|
-
return False
|
|
118
|
-
|
|
119
|
-
def initialize(connectionGraph: NDArray[integer[Any]], gapsWhere: NDArray[integer[Any]], my: NDArray[integer[Any]], the: NDArray[integer[Any]], track: NDArray[integer[Any]]):
|
|
132
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
133
|
+
def countInitialize(connectionGraph, gapsWhere, my, the, track):
|
|
120
134
|
while activeLeafGreaterThan0Condition(my=my):
|
|
121
135
|
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
122
136
|
findGapsInitializeVariables(my=my, track=track)
|
|
@@ -138,35 +152,10 @@ def initialize(connectionGraph: NDArray[integer[Any]], gapsWhere: NDArray[intege
|
|
|
138
152
|
if placeLeafCondition(my=my):
|
|
139
153
|
placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
|
|
140
154
|
if my[indexMy.gap1ndex.value] > 0:
|
|
141
|
-
|
|
155
|
+
return
|
|
142
156
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
146
|
-
if activeLeafGreaterThanLeavesTotalCondition(my=my, the=the):
|
|
147
|
-
foldsSubTotalIncrement(foldsSubTotals=foldsSubTotals, my=my, the=the)
|
|
148
|
-
else:
|
|
149
|
-
findGapsInitializeVariables(my=my, track=track)
|
|
150
|
-
while loopingTheDimensions(my=my, the=the):
|
|
151
|
-
if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
|
|
152
|
-
dimensionsUnconstrainedIncrement(my=my)
|
|
153
|
-
else:
|
|
154
|
-
leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
|
|
155
|
-
while loopingLeavesConnectedToActiveLeaf(my=my):
|
|
156
|
-
if thereAreComputationDivisionsYouMightSkip(my=my, the=the):
|
|
157
|
-
countGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
158
|
-
leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
|
|
159
|
-
dimension1ndexIncrement(my=my)
|
|
160
|
-
indexMiniGapInitialization(my=my)
|
|
161
|
-
while loopingToActiveGapCeiling(my=my):
|
|
162
|
-
filterCommonGaps(gapsWhere=gapsWhere, my=my, the=the, track=track)
|
|
163
|
-
indexMiniGapIncrement(my=my)
|
|
164
|
-
while backtrackCondition(my=my, track=track):
|
|
165
|
-
backtrack(my=my, track=track)
|
|
166
|
-
if placeLeafCondition(my=my):
|
|
167
|
-
placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
|
|
168
|
-
|
|
169
|
-
def countSequential(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDArray[integer[Any]], gapsWhere: NDArray[integer[Any]], my: NDArray[integer[Any]], the: NDArray[integer[Any]], track: NDArray[integer[Any]]):
|
|
157
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
158
|
+
def countSequential(connectionGraph, foldsSubTotals, gapsWhere, my, the, track):
|
|
170
159
|
while activeLeafGreaterThan0Condition(my=my):
|
|
171
160
|
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
172
161
|
if activeLeafGreaterThanLeavesTotalCondition(my=my, the=the):
|
|
@@ -191,27 +180,34 @@ def countSequential(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDAr
|
|
|
191
180
|
if placeLeafCondition(my=my):
|
|
192
181
|
placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
|
|
193
182
|
|
|
194
|
-
@numba.jit(
|
|
195
|
-
def
|
|
196
|
-
|
|
197
|
-
stateGapsWhere = gapsWhere.copy()
|
|
198
|
-
stateMy = my.copy()
|
|
199
|
-
stateTrack = track.copy()
|
|
200
|
-
|
|
183
|
+
@numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1],numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=True, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
|
|
184
|
+
def countParallel(connectionGraph, foldsSubTotals, gapsWherePARALLEL, myPARALLEL, the, trackPARALLEL):
|
|
201
185
|
for indexSherpa in numba.prange(the[indexThe.taskDivisions.value]):
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
186
|
+
gapsWhere = gapsWherePARALLEL.copy()
|
|
187
|
+
my = myPARALLEL.copy()
|
|
188
|
+
my[indexMy.taskIndex.value] = indexSherpa
|
|
189
|
+
track = trackPARALLEL.copy()
|
|
190
|
+
while activeLeafGreaterThan0Condition(my=my):
|
|
191
|
+
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
192
|
+
if activeLeafGreaterThanLeavesTotalCondition(my=my, the=the):
|
|
193
|
+
foldsSubTotalIncrement(foldsSubTotals=foldsSubTotals, my=my, the=the)
|
|
194
|
+
else:
|
|
195
|
+
findGapsInitializeVariables(my=my, track=track)
|
|
196
|
+
while loopingTheDimensions(my=my, the=the):
|
|
197
|
+
if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
|
|
198
|
+
dimensionsUnconstrainedIncrement(my=my)
|
|
199
|
+
else:
|
|
200
|
+
leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
|
|
201
|
+
while loopingLeavesConnectedToActiveLeaf(my=my):
|
|
202
|
+
if thereAreComputationDivisionsYouMightSkip(my=my, the=the):
|
|
203
|
+
countGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
204
|
+
leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
|
|
205
|
+
dimension1ndexIncrement(my=my)
|
|
206
|
+
indexMiniGapInitialization(my=my)
|
|
207
|
+
while loopingToActiveGapCeiling(my=my):
|
|
208
|
+
filterCommonGaps(gapsWhere=gapsWhere, my=my, the=the, track=track)
|
|
209
|
+
indexMiniGapIncrement(my=my)
|
|
210
|
+
while backtrackCondition(my=my, track=track):
|
|
211
|
+
backtrack(my=my, track=track)
|
|
212
|
+
if placeLeafCondition(my=my):
|
|
213
|
+
placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
|
mapFolding/startHere.py
CHANGED
|
@@ -44,10 +44,8 @@ def countFolds(listDimensions: Sequence[int], writeFoldsTotal: Optional[Union[st
|
|
|
44
44
|
pathFilenameFoldsTotal = pathFilenameFoldsTotal / filenameFoldsTotalDEFAULT
|
|
45
45
|
pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
|
|
46
46
|
|
|
47
|
-
# NOTE Don't import a module with a numba.jit function until you want the function to compile and to freeze all settings for that function.
|
|
48
47
|
from mapFolding.babbage import _countFolds
|
|
49
48
|
_countFolds(**stateUniversal)
|
|
50
|
-
# foldsSubTotals = benchmarkSherpa(**stateUniversal)
|
|
51
49
|
|
|
52
50
|
foldsTotal = stateUniversal['foldsSubTotals'].sum().item()
|
|
53
51
|
|
|
@@ -59,12 +57,3 @@ def countFolds(listDimensions: Sequence[int], writeFoldsTotal: Optional[Union[st
|
|
|
59
57
|
print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal")
|
|
60
58
|
|
|
61
59
|
return foldsTotal
|
|
62
|
-
|
|
63
|
-
# from numpy import integer
|
|
64
|
-
# from numpy.typing import NDArray
|
|
65
|
-
# from typing import Any, Tuple
|
|
66
|
-
# from mapFolding.benchmarks.benchmarking import recordBenchmarks
|
|
67
|
-
# @recordBenchmarks()
|
|
68
|
-
# def benchmarkSherpa(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDArray[integer[Any]], gapsWhere: NDArray[integer[Any]], mapShape: Tuple[int, ...], my: NDArray[integer[Any]], the: NDArray[integer[Any]], track: NDArray[integer[Any]]):
|
|
69
|
-
# from mapFolding.babbage import _countFolds
|
|
70
|
-
# return _countFolds(connectionGraph, foldsSubTotals, gapsWhere, mapShape, my, the, track)
|
mapFolding/theSSOT.py
CHANGED
|
@@ -5,9 +5,17 @@ import numpy.typing
|
|
|
5
5
|
import pathlib
|
|
6
6
|
import sys
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
8
|
+
datatypeModule = 'numpy'
|
|
9
|
+
|
|
10
|
+
datatypeLarge = 'int64'
|
|
11
|
+
datatypeDefault = datatypeLarge
|
|
12
|
+
datatypeSmall = datatypeDefault
|
|
13
|
+
|
|
14
|
+
make_dtype = lambda _datatype: eval(f"{datatypeModule}.{_datatype}")
|
|
15
|
+
|
|
16
|
+
dtypeLarge = make_dtype(datatypeLarge)
|
|
17
|
+
dtypeDefault = make_dtype(datatypeDefault)
|
|
18
|
+
dtypeSmall = make_dtype(datatypeSmall)
|
|
11
19
|
|
|
12
20
|
try:
|
|
13
21
|
_pathModule = pathlib.Path(__file__).parent
|
|
@@ -59,9 +67,9 @@ class indexTrack(EnumIndices):
|
|
|
59
67
|
|
|
60
68
|
class computationState(TypedDict):
|
|
61
69
|
connectionGraph: numpy.typing.NDArray[numpy.integer[Any]]
|
|
62
|
-
foldsSubTotals: numpy.
|
|
70
|
+
foldsSubTotals: numpy.typing.NDArray[numpy.integer[Any]]
|
|
71
|
+
gapsWhere: numpy.typing.NDArray[numpy.integer[Any]]
|
|
63
72
|
mapShape: Tuple[int, ...]
|
|
64
73
|
my: numpy.typing.NDArray[numpy.integer[Any]]
|
|
65
|
-
gapsWhere: numpy.typing.NDArray[numpy.integer[Any]]
|
|
66
74
|
the: numpy.typing.NDArray[numpy.integer[Any]]
|
|
67
75
|
track: numpy.typing.NDArray[numpy.integer[Any]]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mapFolding
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.4
|
|
4
4
|
Summary: Algorithm(s) for counting distinct ways to fold a map (or a strip of stamps)
|
|
5
5
|
Author-email: Hunter Hogan <HunterHogan@pm.me>
|
|
6
6
|
Project-URL: homepage, https://github.com/hunterhogan/mapFolding
|
|
@@ -22,6 +22,8 @@ Requires-Dist: pytest; extra == "testing"
|
|
|
22
22
|
Requires-Dist: pytest-cov; extra == "testing"
|
|
23
23
|
Requires-Dist: pytest-env; extra == "testing"
|
|
24
24
|
Requires-Dist: pytest-xdist; extra == "testing"
|
|
25
|
+
Requires-Dist: pytest-order; extra == "testing"
|
|
26
|
+
Requires-Dist: pytest-dependency; extra == "testing"
|
|
25
27
|
|
|
26
28
|
# Algorithm(s) for counting distinct ways to fold a map (or a strip of stamps)
|
|
27
29
|
|
|
@@ -1,14 +1,18 @@
|
|
|
1
1
|
mapFolding/__init__.py,sha256=wnf2EzHR2unVha6-Y0gRoSPaE4PDdT4VngINa_dfT2E,337
|
|
2
|
-
mapFolding/babbage.py,sha256=
|
|
3
|
-
mapFolding/beDRY.py,sha256=
|
|
4
|
-
mapFolding/
|
|
2
|
+
mapFolding/babbage.py,sha256=51fO7lwcTsTvSMwzKW1G2nGslGoEQt19IgnqZi8znao,2222
|
|
3
|
+
mapFolding/beDRY.py,sha256=XawGabR1vhzOfdA46HSXmisA5EmxisTKdA3D98KDeac,13699
|
|
4
|
+
mapFolding/countInitialize.py,sha256=pIeH52OwDMfuHXT2T4BbPmMm6r7zJnGc-e0QVQCKyDc,1824
|
|
5
|
+
mapFolding/countParallel.py,sha256=1sLGIlMj_xZ4bFkG1srOPcDUCrSKc1q3x2QN_8l_sgY,2451
|
|
6
|
+
mapFolding/countSequential.py,sha256=QSXwK3o8YBcxNrir_wGMXgqp38hXYTJanYXFLxUPCPo,1993
|
|
7
|
+
mapFolding/importSelector.py,sha256=OY_LuUrLW5SFV6qM1tSgI2Rnfi5Bj3Fhdrkryo0WycE,392
|
|
8
|
+
mapFolding/inlineAfunction.py,sha256=KO2snTNSGX-4urRtTOYqAZBCsBCaMfr5bo6rNZR9MPA,5102
|
|
9
|
+
mapFolding/lovelace.py,sha256=iu7anbA_TacIAjc4EKkeBVxIJKAMdrYgvR4evzMZ1WY,15193
|
|
5
10
|
mapFolding/oeis.py,sha256=_-fLGc1ybZ2eFxoiBrSmojMexeg6ROxtrLaBF2BzMn4,12144
|
|
6
|
-
mapFolding/startHere.py,sha256=
|
|
7
|
-
mapFolding/theSSOT.py,sha256=
|
|
11
|
+
mapFolding/startHere.py,sha256=or7QhxgMls2hvP_I2eTBP5tffLrc3SMiE5Gz_Ik2aJY,4328
|
|
12
|
+
mapFolding/theSSOT.py,sha256=3Zty4rYWOqrwivuCaKA71R0HM4rjmvtkL_Bsn4ZhwFo,2318
|
|
8
13
|
mapFolding/JAX/lunnanJAX.py,sha256=xMZloN47q-MVfjdYOM1hi9qR4OnLq7qALmGLMraevQs,14819
|
|
9
14
|
mapFolding/JAX/taskJAX.py,sha256=yJNeH0rL6EhJ6ppnATHF0Zf81CDMC10bnPnimVxE1hc,20037
|
|
10
|
-
mapFolding/benchmarks/benchmarking.py,sha256=
|
|
11
|
-
mapFolding/benchmarks/test_benchmarks.py,sha256=c4ANeR3jgqpKXFoxDeZkmAHxSuenMwsjmrhKJ1_XPqY,3659
|
|
15
|
+
mapFolding/benchmarks/benchmarking.py,sha256=HD_0NSvuabblg94ftDre6LFnXShTe8MYj3hIodW-zV0,3076
|
|
12
16
|
mapFolding/reference/flattened.py,sha256=X9nvRzg7YDcpCtSDTL4YiidjshlX9rg2e6JVCY6i2u0,16547
|
|
13
17
|
mapFolding/reference/hunterNumba.py,sha256=0giUyqAFzP-XKcq3Kz8wIWCK0BVFhjABVJ1s-w4Jhu0,7109
|
|
14
18
|
mapFolding/reference/irvineJavaPort.py,sha256=Sj-63Z-OsGuDoEBXuxyjRrNmmyl0d7Yz_XuY7I47Oyg,4250
|
|
@@ -18,13 +22,14 @@ mapFolding/reference/lunnanWhile.py,sha256=7NY2IKO5XBgol0aWWF_Fi-7oTL9pvu_z6lB0T
|
|
|
18
22
|
mapFolding/reference/rotatedEntryPoint.py,sha256=z0QyDQtnMvXNj5ntWzzJUQUMFm1-xHGLVhtYzwmczUI,11530
|
|
19
23
|
mapFolding/reference/total_countPlus1vsPlusN.py,sha256=usenM8Yn_G1dqlPl7NKKkcnbohBZVZBXTQRm2S3_EDA,8106
|
|
20
24
|
tests/__init__.py,sha256=eg9smg-6VblOr0kisM40CpGnuDtU2JgEEWGDTFVOlW8,57
|
|
21
|
-
tests/conftest.py,sha256=
|
|
25
|
+
tests/conftest.py,sha256=AWB3m_jxMlkmOmGvk2ApJEk2ro5v8gmmJDcyLwN1oow,13761
|
|
22
26
|
tests/pythons_idiotic_namespace.py,sha256=oOLDBergQqqhGuRpsXUnFD-R_6AlJipNKYHw-kk_OKw,33
|
|
23
27
|
tests/test_oeis.py,sha256=vxnwO-cSR68htkyMh9QMVv-lvxBo6qlwPg1Rbx4JylY,7963
|
|
24
|
-
tests/test_other.py,sha256=
|
|
28
|
+
tests/test_other.py,sha256=amhsy7VWzpuW_slBOTFPhC7e4o4k6Yp4xweNK1VHZnc,11906
|
|
25
29
|
tests/test_tasks.py,sha256=Nwe4iuSjwGZvsw5CXCcic7tkBxgM5JX9mrGZMDYhAwE,1785
|
|
26
|
-
|
|
27
|
-
mapFolding-0.2.
|
|
28
|
-
mapFolding-0.2.
|
|
29
|
-
mapFolding-0.2.
|
|
30
|
-
mapFolding-0.2.
|
|
30
|
+
tests/test_temporary.py,sha256=4FIEc9KGRpNsgU_eh8mXG49PSPqo8WLeZEyFI4Dpy3U,1127
|
|
31
|
+
mapFolding-0.2.4.dist-info/METADATA,sha256=w1OgxNLylmuYfEKUsSIChm_8jLtjV63_OB04n8Btjm8,6543
|
|
32
|
+
mapFolding-0.2.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
33
|
+
mapFolding-0.2.4.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
|
|
34
|
+
mapFolding-0.2.4.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
|
|
35
|
+
mapFolding-0.2.4.dist-info/RECORD,,
|
tests/conftest.py
CHANGED
|
@@ -26,7 +26,6 @@ from mapFolding.oeis import _parseBFileOEIS
|
|
|
26
26
|
from mapFolding.oeis import _validateOEISid
|
|
27
27
|
from mapFolding.oeis import oeisIDsImplemented
|
|
28
28
|
from mapFolding.oeis import settingsOEIS
|
|
29
|
-
from mapFolding import *
|
|
30
29
|
|
|
31
30
|
__all__ = [
|
|
32
31
|
'OEIS_for_n',
|
|
@@ -135,6 +134,14 @@ def setupTeardownTestData() -> Generator[None, None, None]:
|
|
|
135
134
|
yield
|
|
136
135
|
cleanupTempFileRegister()
|
|
137
136
|
|
|
137
|
+
@pytest.fixture(autouse=True)
|
|
138
|
+
def setupWarningsAsErrors():
|
|
139
|
+
"""Convert all warnings to errors for all tests."""
|
|
140
|
+
import warnings
|
|
141
|
+
warnings.filterwarnings("error")
|
|
142
|
+
yield
|
|
143
|
+
warnings.resetwarnings()
|
|
144
|
+
|
|
138
145
|
@pytest.fixture
|
|
139
146
|
def pathTempTesting(request: pytest.FixtureRequest) -> pathlib.Path:
|
|
140
147
|
"""Create a unique temp directory for each test function."""
|
tests/test_other.py
CHANGED
|
@@ -1,12 +1,14 @@
|
|
|
1
|
-
|
|
2
|
-
from typing import List, Optional, Dict, Any, Union
|
|
1
|
+
import pathlib
|
|
3
2
|
from tests.conftest import *
|
|
4
3
|
from tests.pythons_idiotic_namespace import *
|
|
4
|
+
from typing import List, Optional
|
|
5
|
+
import itertools
|
|
6
|
+
import numba
|
|
7
|
+
import numpy
|
|
5
8
|
import pytest
|
|
9
|
+
import random
|
|
6
10
|
import sys
|
|
7
11
|
import unittest.mock
|
|
8
|
-
import numpy
|
|
9
|
-
import numba
|
|
10
12
|
|
|
11
13
|
@pytest.mark.parametrize("listDimensions,expected_intInnit,expected_parseListDimensions,expected_validateListDimensions,expected_getLeavesTotal", [
|
|
12
14
|
(None, ValueError, ValueError, ValueError, ValueError), # None instead of list
|
|
@@ -65,7 +67,7 @@ def test_getLeavesTotal_edge_cases() -> None:
|
|
|
65
67
|
])
|
|
66
68
|
def test_countFolds_writeFoldsTotal(
|
|
67
69
|
listDimensionsTestFunctionality: List[int],
|
|
68
|
-
pathTempTesting: Path,
|
|
70
|
+
pathTempTesting: pathlib.Path,
|
|
69
71
|
mockFoldingFunction,
|
|
70
72
|
foldsValue: int,
|
|
71
73
|
writeFoldsTarget: Optional[str]
|
|
@@ -97,18 +99,19 @@ def test_oopsieKwargsie() -> None:
|
|
|
97
99
|
for testName, testFunction in makeTestSuiteOopsieKwargsie(oopsieKwargsie).items():
|
|
98
100
|
testFunction()
|
|
99
101
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
102
|
+
@pytest.mark.parametrize("CPUlimit, expectedLimit", [
|
|
103
|
+
(None, numba.config.NUMBA_DEFAULT_NUM_THREADS), # type: ignore
|
|
104
|
+
(False, numba.config.NUMBA_DEFAULT_NUM_THREADS), # type: ignore
|
|
105
|
+
(True, 1),
|
|
106
|
+
(4, 4),
|
|
107
|
+
(0.5, max(1, numba.config.NUMBA_DEFAULT_NUM_THREADS // 2)), # type: ignore
|
|
108
|
+
(-0.5, max(1, numba.config.NUMBA_DEFAULT_NUM_THREADS // 2)), # type: ignore
|
|
109
|
+
(-2, max(1, numba.config.NUMBA_DEFAULT_NUM_THREADS - 2)), # type: ignore
|
|
110
|
+
(0, numba.config.NUMBA_DEFAULT_NUM_THREADS), # type: ignore
|
|
111
|
+
(1, 1),
|
|
112
|
+
])
|
|
113
|
+
def test_setCPUlimit(CPUlimit, expectedLimit) -> None:
|
|
114
|
+
standardComparison(expectedLimit, setCPUlimit, CPUlimit)
|
|
112
115
|
|
|
113
116
|
def test_makeConnectionGraph_nonNegative(listDimensionsTestFunctionality: List[int]) -> None:
|
|
114
117
|
connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality)
|
|
@@ -119,80 +122,147 @@ def test_makeConnectionGraph_datatype(listDimensionsTestFunctionality: List[int]
|
|
|
119
122
|
connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality, datatype=datatype)
|
|
120
123
|
assert connectionGraph.dtype == datatype, f"Expected datatype {datatype}, but got {connectionGraph.dtype}."
|
|
121
124
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
#
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
#
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
#
|
|
125
|
+
|
|
126
|
+
"""5 parameters
|
|
127
|
+
listDimensionsTestFunctionality
|
|
128
|
+
|
|
129
|
+
computationDivisions
|
|
130
|
+
None
|
|
131
|
+
random: int, first included: 2, first excluded: leavesTotal
|
|
132
|
+
maximum
|
|
133
|
+
cpu
|
|
134
|
+
|
|
135
|
+
CPUlimit
|
|
136
|
+
None
|
|
137
|
+
True
|
|
138
|
+
False
|
|
139
|
+
0
|
|
140
|
+
1
|
|
141
|
+
-1
|
|
142
|
+
random: 0 < float < 1
|
|
143
|
+
random: -1 < float < 0
|
|
144
|
+
random: int, first included: 2, first excluded: (min(leavesTotal, 16) - 1)
|
|
145
|
+
random: int, first included: -1 * (min(leavesTotal, 16) - 1), first excluded: -1
|
|
146
|
+
|
|
147
|
+
datatypeDefault
|
|
148
|
+
None
|
|
149
|
+
numpy.int64
|
|
150
|
+
numpy.intc
|
|
151
|
+
numpy.uint16
|
|
152
|
+
|
|
153
|
+
datatypeLarge
|
|
154
|
+
None
|
|
155
|
+
numpy.int64
|
|
156
|
+
numpy.intp
|
|
157
|
+
numpy.uint32
|
|
158
|
+
|
|
159
|
+
"""
|
|
160
|
+
|
|
161
|
+
@pytest.fixture
|
|
162
|
+
def parameterIterator():
|
|
163
|
+
"""Generate random combinations of parameters for outfitCountFolds testing."""
|
|
164
|
+
parameterSets = {
|
|
165
|
+
'computationDivisions': [
|
|
166
|
+
None,
|
|
167
|
+
'maximum',
|
|
168
|
+
'cpu',
|
|
169
|
+
],
|
|
170
|
+
'CPUlimit': [
|
|
171
|
+
None, True, False, 0, 1, -1,
|
|
172
|
+
],
|
|
173
|
+
'datatypeDefault': [
|
|
174
|
+
None,
|
|
175
|
+
numpy.int64,
|
|
176
|
+
numpy.intc,
|
|
177
|
+
numpy.uint16
|
|
178
|
+
],
|
|
179
|
+
'datatypeLarge': [
|
|
180
|
+
None,
|
|
181
|
+
numpy.int64,
|
|
182
|
+
numpy.intp,
|
|
183
|
+
numpy.uint32
|
|
184
|
+
]
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
def makeParametersDynamic(listDimensions):
|
|
188
|
+
"""Add context-dependent parameter values."""
|
|
189
|
+
parametersDynamic = parameterSets.copy()
|
|
190
|
+
leavesTotal = getLeavesTotal(listDimensions)
|
|
191
|
+
concurrencyLimit = min(leavesTotal, 16)
|
|
192
|
+
|
|
193
|
+
# Add dynamic computationDivisions
|
|
194
|
+
parametersDynamic['computationDivisions'].extend(
|
|
195
|
+
[random.randint(2, leavesTotal-1) for iterator in range(3)]
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Add dynamic CPUlimit values
|
|
199
|
+
parameterDynamicCPU = [
|
|
200
|
+
random.random(), # 0 to 1
|
|
201
|
+
-random.random(), # -1 to 0
|
|
202
|
+
]
|
|
203
|
+
parameterDynamicCPU.extend(
|
|
204
|
+
[random.randint(2, concurrencyLimit-1) for iterator in range(2)]
|
|
205
|
+
)
|
|
206
|
+
parameterDynamicCPU.extend(
|
|
207
|
+
[random.randint(-concurrencyLimit+1, -2) for iterator in range(2)]
|
|
208
|
+
)
|
|
209
|
+
parametersDynamic['CPUlimit'].extend(parameterDynamicCPU)
|
|
210
|
+
|
|
211
|
+
return parametersDynamic
|
|
212
|
+
|
|
213
|
+
def generateCombinations(listDimensions):
|
|
214
|
+
parametersDynamic = makeParametersDynamic(listDimensions)
|
|
215
|
+
parameterKeys = list(parametersDynamic.keys())
|
|
216
|
+
parameterValues = [parametersDynamic[key] for key in parameterKeys]
|
|
217
|
+
|
|
218
|
+
# Shuffle each parameter list
|
|
219
|
+
for valueList in parameterValues:
|
|
220
|
+
random.shuffle(valueList)
|
|
221
|
+
|
|
222
|
+
# Use zip_longest to iterate, filling with None when shorter lists are exhausted
|
|
223
|
+
for combination in itertools.zip_longest(*parameterValues, fillvalue=None):
|
|
224
|
+
yield dict(zip(parameterKeys, combination))
|
|
225
|
+
|
|
226
|
+
return generateCombinations
|
|
227
|
+
|
|
228
|
+
def test_outfitCountFolds_basic(listDimensionsTestFunctionality, parameterIterator):
|
|
229
|
+
"""Basic validation of outfitCountFolds return value structure."""
|
|
230
|
+
parameters = next(parameterIterator(listDimensionsTestFunctionality))
|
|
231
|
+
|
|
232
|
+
stateInitialized = outfitCountFolds(
|
|
233
|
+
listDimensionsTestFunctionality,
|
|
234
|
+
**{k: v for k, v in parameters.items() if v is not None}
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Basic structure tests
|
|
238
|
+
assert isinstance(stateInitialized, dict)
|
|
239
|
+
assert len(stateInitialized) == 7 # 6 ndarray + 1 tuple
|
|
240
|
+
|
|
241
|
+
# Check for specific keys
|
|
242
|
+
requiredKeys = set(computationState.__annotations__.keys())
|
|
243
|
+
assert set(stateInitialized.keys()) == requiredKeys
|
|
244
|
+
|
|
245
|
+
# Check types more carefully
|
|
246
|
+
for key, value in stateInitialized.items():
|
|
247
|
+
if key == 'mapShape':
|
|
248
|
+
assert isinstance(value, tuple)
|
|
249
|
+
assert all(isinstance(dim, int) for dim in value)
|
|
250
|
+
else:
|
|
251
|
+
assert isinstance(value, numpy.ndarray), f"{key} should be ndarray but is {type(value)}"
|
|
252
|
+
assert issubclass(value.dtype.type, numpy.integer), \
|
|
253
|
+
f"{key} should have integer dtype but has {value.dtype}"
|
|
254
|
+
|
|
255
|
+
def test_pathJobDEFAULT_colab():
|
|
256
|
+
"""Test that pathJobDEFAULT is set correctly when running in Google Colab."""
|
|
257
|
+
# Mock sys.modules to simulate running in Colab
|
|
258
|
+
with unittest.mock.patch.dict('sys.modules', {'google.colab': unittest.mock.MagicMock()}):
|
|
259
|
+
# Force reload of theSSOT to trigger Colab path logic
|
|
260
|
+
import importlib
|
|
261
|
+
import mapFolding.theSSOT
|
|
262
|
+
importlib.reload(mapFolding.theSSOT)
|
|
263
|
+
|
|
264
|
+
# Check that path was set to Colab-specific value
|
|
265
|
+
assert mapFolding.theSSOT.pathJobDEFAULT == pathlib.Path("/content/drive/MyDrive") / "jobs"
|
|
266
|
+
|
|
267
|
+
# Reload one more time to restore original state
|
|
268
|
+
importlib.reload(mapFolding.theSSOT)
|
tests/test_temporary.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from tests.conftest import *
|
|
2
|
+
from typing import Dict, List, Tuple
|
|
3
|
+
import importlib
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
@pytest.fixture(scope="session", autouse=True)
|
|
7
|
+
def runSecondSetAfterAll(request: pytest.FixtureRequest):
|
|
8
|
+
"""Run after all other tests complete."""
|
|
9
|
+
def toggleAndRerun():
|
|
10
|
+
import mapFolding.importSelector
|
|
11
|
+
import mapFolding.babbage
|
|
12
|
+
mapFolding.importSelector.useLovelace = not mapFolding.importSelector.useLovelace
|
|
13
|
+
importlib.reload(mapFolding.importSelector)
|
|
14
|
+
importlib.reload(mapFolding.babbage)
|
|
15
|
+
|
|
16
|
+
request.addfinalizer(toggleAndRerun)
|
|
17
|
+
|
|
18
|
+
@pytest.mark.order(after="runSecondSetAfterAll")
|
|
19
|
+
def test_myabilitytodealwithbs(oeisID: str):
|
|
20
|
+
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
21
|
+
standardComparison(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
22
|
+
|
|
23
|
+
@pytest.mark.order(after="runSecondSetAfterAll")
|
|
24
|
+
def test_eff_em_el(listDimensionsTest_countFolds: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int]) -> None:
|
|
25
|
+
standardComparison(foldsTotalKnown[tuple(listDimensionsTest_countFolds)], countFolds, listDimensionsTest_countFolds, None, 'maximum')
|
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
from ...tests.conftest import *
|
|
2
|
-
from .benchmarking import recordBenchmarks, runBenchmarks
|
|
3
|
-
import numpy
|
|
4
|
-
import pathlib
|
|
5
|
-
import pytest
|
|
6
|
-
import unittest.mock
|
|
7
|
-
from typing import List
|
|
8
|
-
|
|
9
|
-
def test_recordBenchmarks_decorator(pathBenchmarksTesting: pathlib.Path,
|
|
10
|
-
listDimensionsTestFunctionality: List[int],
|
|
11
|
-
mockBenchmarkTimer: unittest.mock.MagicMock):
|
|
12
|
-
"""Test that the decorator correctly records benchmark data."""
|
|
13
|
-
@recordBenchmarks()
|
|
14
|
-
def functionTest(listDimensions: List[int]) -> int:
|
|
15
|
-
return sum(listDimensions)
|
|
16
|
-
|
|
17
|
-
with mockBenchmarkTimer:
|
|
18
|
-
mockBenchmarkTimer.side_effect = [0, 1e9]
|
|
19
|
-
result = functionTest(listDimensionsTestFunctionality)
|
|
20
|
-
|
|
21
|
-
# Verify function still works normally
|
|
22
|
-
assert result == sum(listDimensionsTestFunctionality)
|
|
23
|
-
|
|
24
|
-
# Verify benchmark data was saved
|
|
25
|
-
arrayBenchmarks = numpy.load(str(pathBenchmarksTesting), allow_pickle=True)
|
|
26
|
-
assert len(arrayBenchmarks) == 1
|
|
27
|
-
assert arrayBenchmarks[0]['time'] == 1.0
|
|
28
|
-
assert tuple(arrayBenchmarks[0]['dimensions']) == tuple(listDimensionsTestFunctionality)
|
|
29
|
-
|
|
30
|
-
def test_recordBenchmarks_multiple_calls(pathBenchmarksTesting: pathlib.Path,
|
|
31
|
-
listDimensionsTestFunctionality: List[int],
|
|
32
|
-
mockBenchmarkTimer: unittest.mock.MagicMock):
|
|
33
|
-
"""Test that multiple function calls append to benchmark data."""
|
|
34
|
-
@recordBenchmarks()
|
|
35
|
-
def functionTest(listDimensions: List[int]) -> int:
|
|
36
|
-
return sum(listDimensions)
|
|
37
|
-
|
|
38
|
-
with mockBenchmarkTimer:
|
|
39
|
-
mockBenchmarkTimer.side_effect = [0, 1e9, 2e9, 4e9]
|
|
40
|
-
functionTest(listDimensionsTestFunctionality)
|
|
41
|
-
functionTest(listDimensionsTestFunctionality)
|
|
42
|
-
|
|
43
|
-
arrayBenchmarks = numpy.load(str(pathBenchmarksTesting), allow_pickle=True)
|
|
44
|
-
assert len(arrayBenchmarks) == 2
|
|
45
|
-
assert arrayBenchmarks[0]['time'] == 1.0
|
|
46
|
-
assert arrayBenchmarks[1]['time'] == 2.0
|
|
47
|
-
|
|
48
|
-
# NOTE This test tries to collect benchmark data without ensuring that a function is decorated.
|
|
49
|
-
# def test_runBenchmarks_integration(pathBenchmarksTesting: pathlib.Path, listDimensionsTestFunctionality: List[int]):
|
|
50
|
-
# """Test runBenchmarks creates valid benchmark data."""
|
|
51
|
-
# countIterations = 2
|
|
52
|
-
# runBenchmarks(countIterations)
|
|
53
|
-
|
|
54
|
-
# arrayBenchmarks = numpy.load(str(pathBenchmarksTesting), allow_pickle=True)
|
|
55
|
-
# assert len(arrayBenchmarks) > 0 # Should have recorded some benchmarks
|
|
56
|
-
|
|
57
|
-
# # Verify data structure integrity
|
|
58
|
-
# assert arrayBenchmarks.dtype.names == ('time', 'dimensions')
|
|
59
|
-
# assert all(isinstance(record['time'], float) for record in arrayBenchmarks)
|
|
60
|
-
# assert all(isinstance(record['dimensions'], tuple) for record in arrayBenchmarks)
|
|
61
|
-
|
|
62
|
-
# # Verify at least one benchmark entry matches our test dimensions
|
|
63
|
-
# assert any(tuple(listDimensionsTestFunctionality) == record['dimensions'] for record in arrayBenchmarks)
|
|
64
|
-
|
|
65
|
-
# NOTE This test tries to collect benchmark data without ensuring that a function is decorated.
|
|
66
|
-
# @pytest.mark.parametrize("countIterations", [1, 2])
|
|
67
|
-
# def test_runBenchmarks_iterations(countIterations: int, pathBenchmarksTesting: pathlib.Path, listDimensionsTestFunctionality: List[int]):
|
|
68
|
-
# """Test runBenchmarks records data for each iteration."""
|
|
69
|
-
# runBenchmarks(countIterations)
|
|
70
|
-
# arrayBenchmarks = numpy.load(str(pathBenchmarksTesting), allow_pickle=True)
|
|
71
|
-
|
|
72
|
-
# # Should have at least countIterations entries for our test dimensions
|
|
73
|
-
# countMatches = sum(1 for record in arrayBenchmarks if tuple(listDimensionsTestFunctionality) == record['dimensions'])
|
|
74
|
-
# assert countMatches >= countIterations
|
|
File without changes
|
|
File without changes
|
|
File without changes
|