mapFolding 0.3.10__py3-none-any.whl → 0.3.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +10 -0
- mapFolding/someAssemblyRequired/__init__.py +0 -1
- mapFolding/someAssemblyRequired/makeJob.py +5 -5
- mapFolding/someAssemblyRequired/synthesizeNumba.py +637 -0
- mapFolding/someAssemblyRequired/synthesizeNumbaHardcoding.py +188 -0
- mapFolding/syntheticModules/numba_countInitialize.py +10 -6
- mapFolding/syntheticModules/numba_countParallel.py +14 -8
- mapFolding/syntheticModules/numba_countSequential.py +37 -32
- mapFolding/syntheticModules/numba_doTheNeedful.py +10 -20
- mapFolding/theDao.py +61 -45
- mapFolding/theSSOT.py +32 -8
- mapFolding/theSSOTnumba.py +2 -1
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/METADATA +3 -1
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/RECORD +18 -18
- mapFolding/someAssemblyRequired/synthesizeJobNumba.py +0 -383
- mapFolding/someAssemblyRequired/synthesizeModulesNumba.py +0 -533
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/LICENSE +0 -0
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/WHEEL +0 -0
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/entry_points.txt +0 -0
- {mapFolding-0.3.10.dist-info → mapFolding-0.3.12.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
from synthesizeNumba import *
|
|
2
|
+
|
|
3
|
+
def makeNumbaOptimizedFlow(listCallablesInline: List[str], callableDispatcher: Optional[str] = None, algorithmSource: Optional[ModuleType] = None) -> None:
|
|
4
|
+
if not algorithmSource:
|
|
5
|
+
algorithmSource = getAlgorithmSource()
|
|
6
|
+
|
|
7
|
+
formatModuleNameDEFAULT = "numba_{callableTarget}"
|
|
8
|
+
|
|
9
|
+
# When I am a more competent programmer, I will make getPathFilenameWrite dependent on makeAstImport or vice versa,
|
|
10
|
+
# so the name of the physical file doesn't get out of whack with the name of the logical module.
|
|
11
|
+
def getPathFilenameWrite(callableTarget: str
|
|
12
|
+
, pathWrite: Optional[pathlib.Path] = None
|
|
13
|
+
, formatFilenameWrite: Optional[str] = None
|
|
14
|
+
) -> pathlib.Path:
|
|
15
|
+
if not pathWrite:
|
|
16
|
+
pathWrite = getPathSyntheticModules()
|
|
17
|
+
if not formatFilenameWrite:
|
|
18
|
+
formatFilenameWrite = formatModuleNameDEFAULT + '.py'
|
|
19
|
+
|
|
20
|
+
pathFilename = pathWrite / formatFilenameWrite.format(callableTarget=callableTarget)
|
|
21
|
+
return pathFilename
|
|
22
|
+
|
|
23
|
+
def makeAstImport(callableTarget: str
|
|
24
|
+
, packageName: Optional[str] = None
|
|
25
|
+
, subPackageName: Optional[str] = None
|
|
26
|
+
, moduleName: Optional[str] = None
|
|
27
|
+
, astNodeLogicalPathThingy: Optional[ast.AST] = None
|
|
28
|
+
) -> ast.ImportFrom:
|
|
29
|
+
"""Creates import AST node for synthetic modules."""
|
|
30
|
+
if astNodeLogicalPathThingy is None:
|
|
31
|
+
if packageName is None:
|
|
32
|
+
packageName = myPackageNameIs
|
|
33
|
+
if subPackageName is None:
|
|
34
|
+
subPackageName = moduleOfSyntheticModules
|
|
35
|
+
if moduleName is None:
|
|
36
|
+
moduleName = formatModuleNameDEFAULT.format(callableTarget=callableTarget)
|
|
37
|
+
module=f'{packageName}.{subPackageName}.{moduleName}'
|
|
38
|
+
else:
|
|
39
|
+
module = str(astNodeLogicalPathThingy)
|
|
40
|
+
return ast.ImportFrom(
|
|
41
|
+
module=module,
|
|
42
|
+
names=[ast.alias(name=callableTarget, asname=None)],
|
|
43
|
+
level=0
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
listStuffYouOughtaKnow: List[youOughtaKnow] = []
|
|
47
|
+
|
|
48
|
+
for callableTarget in listCallablesInline:
|
|
49
|
+
pythonSource = inspect.getsource(algorithmSource)
|
|
50
|
+
parametersNumba = None
|
|
51
|
+
unpackArrays = False
|
|
52
|
+
match callableTarget:
|
|
53
|
+
case 'countParallel':
|
|
54
|
+
parametersNumba = parametersNumbaSuperJitParallel
|
|
55
|
+
case 'countSequential':
|
|
56
|
+
parametersNumba = parametersNumbaSuperJit
|
|
57
|
+
unpackArrays = True
|
|
58
|
+
case 'countInitialize':
|
|
59
|
+
parametersNumba = parametersNumbaDEFAULT
|
|
60
|
+
pythonSource = inlineOneCallable(pythonSource, callableTarget, parametersNumba, unpackArrays)
|
|
61
|
+
if not pythonSource:
|
|
62
|
+
raise Exception("Pylance, OMG! The sky is falling!")
|
|
63
|
+
|
|
64
|
+
pathFilename = getPathFilenameWrite(callableTarget)
|
|
65
|
+
|
|
66
|
+
listStuffYouOughtaKnow.append(youOughtaKnow(
|
|
67
|
+
callableSynthesized=callableTarget,
|
|
68
|
+
pathFilenameForMe=pathFilename,
|
|
69
|
+
astForCompetentProgrammers=makeAstImport(callableTarget)
|
|
70
|
+
))
|
|
71
|
+
pythonSource = autoflake.fix_code(pythonSource, ['mapFolding', 'numba', 'numpy'])
|
|
72
|
+
pathFilename.write_text(pythonSource)
|
|
73
|
+
|
|
74
|
+
# Generate dispatcher if requested
|
|
75
|
+
if callableDispatcher:
|
|
76
|
+
pythonSource = inspect.getsource(algorithmSource)
|
|
77
|
+
pythonSource = makeDispatcherNumba(pythonSource, callableDispatcher, listStuffYouOughtaKnow)
|
|
78
|
+
if not pythonSource:
|
|
79
|
+
raise FREAKOUT
|
|
80
|
+
|
|
81
|
+
pathFilename = getPathFilenameWrite(callableDispatcher)
|
|
82
|
+
|
|
83
|
+
listStuffYouOughtaKnow.append(youOughtaKnow(
|
|
84
|
+
callableSynthesized=callableDispatcher,
|
|
85
|
+
pathFilenameForMe=pathFilename,
|
|
86
|
+
astForCompetentProgrammers=makeAstImport(callableDispatcher)
|
|
87
|
+
))
|
|
88
|
+
pythonSource = autoflake.fix_code(pythonSource, ['mapFolding', 'numba', 'numpy'])
|
|
89
|
+
pathFilename.write_text(pythonSource)
|
|
90
|
+
|
|
91
|
+
def writeJobNumba(listDimensions: Sequence[int], callableTarget: str, algorithmSource: ModuleType, parametersNumba: Optional[ParametersNumba]=None, pathFilenameWriteJob: Optional[Union[str, os.PathLike[str]]] = None, **keywordArguments: Optional[Any]) -> pathlib.Path:
|
|
92
|
+
""" Parameters: **keywordArguments: most especially for `computationDivisions` if you want to make a parallel job. Also `CPUlimit`. """
|
|
93
|
+
"""Notes about the existing logic:
|
|
94
|
+
- the synthesized module must run well as a standalone interpreted Python script
|
|
95
|
+
- `writeJobNumba` synthesizes a parameter-specific module by starting with code synthesized by `makeNumbaOptimizedFlow`, which improves the optimization
|
|
96
|
+
- similarly, `writeJobNumba` should be a solid foundation for more optimizations, most especially compiling to a standalone executable, but the details of the next optimization step are unknown
|
|
97
|
+
- the minimum runtime (on my computer) to compute a value unknown to mathematicians is 26 hours, therefore, we ant to ensure the value is seen by the user, but we must have ultra-light overhead.
|
|
98
|
+
- perf_counter is for testing. When I run a real job, I delete those lines
|
|
99
|
+
- avoid `with` statement
|
|
100
|
+
"""
|
|
101
|
+
stateJob = makeStateJob(listDimensions, writeJob=False, **keywordArguments)
|
|
102
|
+
pythonSource = inspect.getsource(algorithmSource)
|
|
103
|
+
astModule = ast.parse(pythonSource)
|
|
104
|
+
|
|
105
|
+
allImports = UniversalImportTracker()
|
|
106
|
+
|
|
107
|
+
for statement in astModule.body:
|
|
108
|
+
if isinstance(statement, (ast.Import, ast.ImportFrom)):
|
|
109
|
+
allImports.addAst(statement)
|
|
110
|
+
|
|
111
|
+
FunctionDefTarget = next((node for node in astModule.body if isinstance(node, ast.FunctionDef) and node.name == callableTarget), None)
|
|
112
|
+
if not FunctionDefTarget: raise ValueError(f"I received `{callableTarget=}` and {algorithmSource.__name__=}, but I could not find that function in that source.")
|
|
113
|
+
|
|
114
|
+
for pirateScowl in FunctionDefTarget.args.args.copy():
|
|
115
|
+
match pirateScowl.arg:
|
|
116
|
+
case 'my':
|
|
117
|
+
FunctionDefTarget, allImports = evaluate_argIn_body(FunctionDefTarget, pirateScowl, stateJob[pirateScowl.arg], ['taskIndex', 'dimensionsTotal'], allImports)
|
|
118
|
+
case 'track':
|
|
119
|
+
FunctionDefTarget, allImports = evaluateArrayIn_body(FunctionDefTarget, pirateScowl, stateJob[pirateScowl.arg], allImports)
|
|
120
|
+
# TODO remove this after implementing `unrollWhileLoop`
|
|
121
|
+
case 'connectionGraph':
|
|
122
|
+
FunctionDefTarget, allImports = moveArrayTo_body(FunctionDefTarget, pirateScowl, stateJob[pirateScowl.arg], allImports)
|
|
123
|
+
case 'gapsWhere':
|
|
124
|
+
FunctionDefTarget, allImports = moveArrayTo_body(FunctionDefTarget, pirateScowl, stateJob[pirateScowl.arg], allImports)
|
|
125
|
+
case 'foldGroups':
|
|
126
|
+
FunctionDefTarget = removeIdentifierFrom_body(FunctionDefTarget, pirateScowl)
|
|
127
|
+
|
|
128
|
+
# Move function parameters to the function body,
|
|
129
|
+
# initialize identifiers with their state types and values,
|
|
130
|
+
# and replace static-valued identifiers with their values.
|
|
131
|
+
FunctionDefTarget, allImports = evaluateAnnAssignIn_body(FunctionDefTarget, allImports)
|
|
132
|
+
FunctionDefTarget = astNameToAstConstant(FunctionDefTarget, 'dimensionsTotal', int(stateJob['my'][indexMy.dimensionsTotal]))
|
|
133
|
+
FunctionDefTarget = astObjectToAstConstant(FunctionDefTarget, 'foldGroups[-1]', int(stateJob['foldGroups'][-1]))
|
|
134
|
+
|
|
135
|
+
FunctionDefTarget = unrollWhileLoop(FunctionDefTarget, 'indexDimension', stateJob['my'][indexMy.dimensionsTotal], stateJob['connectionGraph'])
|
|
136
|
+
|
|
137
|
+
FunctionDefTarget, allImports = addReturnJobNumba(FunctionDefTarget, stateJob, allImports)
|
|
138
|
+
FunctionDefTarget, allImports = makeDecoratorJobNumba(FunctionDefTarget, allImports, parametersNumba)
|
|
139
|
+
|
|
140
|
+
pathFilenameFoldsTotal = getPathFilenameFoldsTotal(stateJob['mapShape'])
|
|
141
|
+
# TODO consider: 1) launcher is a function, 2) if __name__ calls the launcher function, and 3) the launcher is "jitted", even just a light jit, then 4) `FunctionDefTarget` could be superJit.
|
|
142
|
+
astLauncher = makeLauncherJobNumba(FunctionDefTarget.name, pathFilenameFoldsTotal)
|
|
143
|
+
|
|
144
|
+
astImports = allImports.makeListAst()
|
|
145
|
+
|
|
146
|
+
astModule = ast.Module(body=cast(List[ast.stmt], astImports + [FunctionDefTarget] + [astLauncher]), type_ignores=[])
|
|
147
|
+
ast.fix_missing_locations(astModule)
|
|
148
|
+
|
|
149
|
+
pythonSource = ast.unparse(astModule)
|
|
150
|
+
pythonSource = autoflake.fix_code(pythonSource, ['mapFolding', 'numba', 'numpy'])
|
|
151
|
+
|
|
152
|
+
if pathFilenameWriteJob is None:
|
|
153
|
+
filename = getFilenameFoldsTotal(stateJob['mapShape'])
|
|
154
|
+
pathRoot = getPathJobRootDEFAULT()
|
|
155
|
+
pathFilenameWriteJob = pathlib.Path(pathRoot, pathlib.Path(filename).stem, pathlib.Path(filename).with_suffix('.py'))
|
|
156
|
+
else:
|
|
157
|
+
pathFilenameWriteJob = pathlib.Path(pathFilenameWriteJob)
|
|
158
|
+
pathFilenameWriteJob.parent.mkdir(parents=True, exist_ok=True)
|
|
159
|
+
|
|
160
|
+
pathFilenameWriteJob.write_text(pythonSource)
|
|
161
|
+
return pathFilenameWriteJob
|
|
162
|
+
|
|
163
|
+
def mainBig():
|
|
164
|
+
setDatatypeModule('numpy', sourGrapes=True)
|
|
165
|
+
setDatatypeFoldsTotal('int64', sourGrapes=True)
|
|
166
|
+
setDatatypeElephino('uint8', sourGrapes=True)
|
|
167
|
+
setDatatypeLeavesTotal('uint8', sourGrapes=True)
|
|
168
|
+
listCallablesInline: List[str] = ['countInitialize', 'countParallel', 'countSequential']
|
|
169
|
+
Z0Z_setDatatypeModuleScalar('numba')
|
|
170
|
+
Z0Z_setDecoratorCallable('jit')
|
|
171
|
+
callableDispatcher = 'doTheNeedful'
|
|
172
|
+
makeNumbaOptimizedFlow(listCallablesInline, callableDispatcher)
|
|
173
|
+
|
|
174
|
+
def mainSmall():
|
|
175
|
+
listDimensions = [6,6]
|
|
176
|
+
setDatatypeFoldsTotal('int64', sourGrapes=True)
|
|
177
|
+
setDatatypeElephino('uint8', sourGrapes=True)
|
|
178
|
+
setDatatypeLeavesTotal('uint8', sourGrapes=True)
|
|
179
|
+
from mapFolding.syntheticModules import numba_countSequential
|
|
180
|
+
algorithmSource: ModuleType = numba_countSequential
|
|
181
|
+
Z0Z_setDatatypeModuleScalar('numba')
|
|
182
|
+
Z0Z_setDecoratorCallable('jit')
|
|
183
|
+
writeJobNumba(listDimensions, 'countSequential', algorithmSource, parametersNumbaDEFAULT)
|
|
184
|
+
|
|
185
|
+
if __name__ == '__main__':
|
|
186
|
+
mainBig()
|
|
187
|
+
|
|
188
|
+
mainSmall()
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
from
|
|
2
|
-
import
|
|
3
|
-
from
|
|
4
|
-
import
|
|
5
|
-
from
|
|
1
|
+
from mapFolding import indexTrack
|
|
2
|
+
from mapFolding import indexMy
|
|
3
|
+
from numba import uint8
|
|
4
|
+
from numba import jit
|
|
5
|
+
from numpy import ndarray
|
|
6
|
+
from numpy import dtype
|
|
7
|
+
from numpy import integer
|
|
8
|
+
from typing import Any
|
|
9
|
+
from typing import Tuple
|
|
6
10
|
|
|
7
|
-
@
|
|
11
|
+
@jit((uint8[:, :, ::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
|
|
8
12
|
def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
9
13
|
while my[indexMy.leaf1ndex.value]:
|
|
10
14
|
if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
|
|
@@ -1,21 +1,27 @@
|
|
|
1
|
-
from mapFolding import
|
|
2
|
-
from
|
|
3
|
-
import
|
|
4
|
-
from
|
|
5
|
-
import
|
|
1
|
+
from mapFolding import indexTrack
|
|
2
|
+
from mapFolding import indexMy
|
|
3
|
+
from numba import uint8
|
|
4
|
+
from numba import jit
|
|
5
|
+
from numba import int64
|
|
6
|
+
from numba import prange
|
|
7
|
+
from numpy import ndarray
|
|
8
|
+
from numpy import dtype
|
|
9
|
+
from numpy import integer
|
|
10
|
+
from typing import Any
|
|
11
|
+
from typing import Tuple
|
|
6
12
|
|
|
7
|
-
@
|
|
13
|
+
@jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
|
|
8
14
|
def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
9
15
|
gapsWherePARALLEL = gapsWhere.copy()
|
|
10
16
|
myPARALLEL = my.copy()
|
|
11
17
|
trackPARALLEL = track.copy()
|
|
12
18
|
taskDivisionsPrange = myPARALLEL[indexMy.taskDivisions.value]
|
|
13
|
-
for indexSherpa in
|
|
19
|
+
for indexSherpa in prange(taskDivisionsPrange):
|
|
14
20
|
groupsOfFolds: int = 0
|
|
15
21
|
gapsWhere = gapsWherePARALLEL.copy()
|
|
16
22
|
my = myPARALLEL.copy()
|
|
17
|
-
my[indexMy.taskIndex.value] = indexSherpa
|
|
18
23
|
track = trackPARALLEL.copy()
|
|
24
|
+
my[indexMy.taskIndex.value] = indexSherpa
|
|
19
25
|
while my[indexMy.leaf1ndex.value]:
|
|
20
26
|
if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
|
|
21
27
|
if my[indexMy.leaf1ndex.value] > foldGroups[-1]:
|
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
import
|
|
2
|
-
from
|
|
3
|
-
import
|
|
4
|
-
from
|
|
5
|
-
from
|
|
1
|
+
from mapFolding import indexTrack
|
|
2
|
+
from mapFolding import indexMy
|
|
3
|
+
from numba import uint8
|
|
4
|
+
from numba import jit
|
|
5
|
+
from numba import int64
|
|
6
|
+
from numpy import ndarray
|
|
7
|
+
from numpy import dtype
|
|
8
|
+
from numpy import integer
|
|
9
|
+
from typing import Any
|
|
10
|
+
from typing import Tuple
|
|
6
11
|
|
|
7
|
-
@
|
|
12
|
+
@jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
|
|
8
13
|
def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
9
14
|
leafBelow = track[indexTrack.leafBelow.value]
|
|
10
15
|
gapRangeStart = track[indexTrack.gapRangeStart.value]
|
|
@@ -20,33 +25,33 @@ def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
|
|
|
20
25
|
gap1ndex = my[indexMy.gap1ndex.value]
|
|
21
26
|
taskIndex = my[indexMy.taskIndex.value]
|
|
22
27
|
groupsOfFolds: int = 0
|
|
23
|
-
doFindGaps = True
|
|
24
28
|
while leaf1ndex:
|
|
25
|
-
if
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
29
|
+
if leaf1ndex <= 1 or leafBelow[0] == 1:
|
|
30
|
+
if leaf1ndex > foldGroups[-1]:
|
|
31
|
+
groupsOfFolds += 1
|
|
32
|
+
else:
|
|
33
|
+
dimensionsUnconstrained = dimensionsTotal
|
|
34
|
+
gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
|
|
35
|
+
indexDimension = 0
|
|
36
|
+
while indexDimension < dimensionsTotal:
|
|
37
|
+
if connectionGraph[indexDimension, leaf1ndex, leaf1ndex] == leaf1ndex:
|
|
38
|
+
dimensionsUnconstrained -= 1
|
|
39
|
+
else:
|
|
40
|
+
leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
|
|
41
|
+
while leafConnectee != leaf1ndex:
|
|
42
|
+
gapsWhere[gap1ndexCeiling] = leafConnectee
|
|
43
|
+
if countDimensionsGapped[leafConnectee] == 0:
|
|
44
|
+
gap1ndexCeiling += 1
|
|
45
|
+
countDimensionsGapped[leafConnectee] += 1
|
|
46
|
+
leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
|
|
47
|
+
indexDimension += 1
|
|
48
|
+
indexMiniGap = gap1ndex
|
|
49
|
+
while indexMiniGap < gap1ndexCeiling:
|
|
50
|
+
gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
|
|
51
|
+
if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
|
|
52
|
+
gap1ndex += 1
|
|
53
|
+
countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
|
|
54
|
+
indexMiniGap += 1
|
|
50
55
|
while leaf1ndex and gap1ndex == gapRangeStart[leaf1ndex - 1]:
|
|
51
56
|
leaf1ndex -= 1
|
|
52
57
|
leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
|
|
@@ -1,28 +1,18 @@
|
|
|
1
|
-
from mapFolding import indexMy
|
|
2
|
-
from
|
|
3
|
-
from
|
|
4
|
-
import
|
|
5
|
-
import
|
|
1
|
+
from mapFolding import indexMy
|
|
2
|
+
from numba import uint8
|
|
3
|
+
from numba import jit
|
|
4
|
+
from numba import int64
|
|
5
|
+
from numpy import ndarray
|
|
6
|
+
from numpy import dtype
|
|
7
|
+
from numpy import integer
|
|
8
|
+
from typing import Any
|
|
9
|
+
from typing import Tuple
|
|
6
10
|
from mapFolding.syntheticModules.numba_countInitialize import countInitialize
|
|
7
11
|
from mapFolding.syntheticModules.numba_countParallel import countParallel
|
|
8
12
|
from mapFolding.syntheticModules.numba_countSequential import countSequential
|
|
9
13
|
|
|
10
|
-
@
|
|
14
|
+
@jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=True, cache=True, error_model='python', fastmath=False, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
|
|
11
15
|
def doTheNeedful(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], mapShape: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
12
|
-
"""What in tarnation is this stupid module and function?
|
|
13
|
-
|
|
14
|
-
- This function is not in the same module as `countFolds` so that we can delay Numba just-in-time (jit) compilation of this function and the finalization of its settings until we are ready.
|
|
15
|
-
- This function is not in the same module as the next function, which does the hard work, so that we can delay `numba.jit` compilation of the next function.
|
|
16
|
-
- This function is "jitted" but the next function is super jitted, which makes it too arrogant to talk to plebian Python functions. It will, however, reluctantly talk to basic jitted functions.
|
|
17
|
-
- So this module can talk to the next function, and because this module isn't as arrogant, it will talk to the low-class `countFolds` that called this function. Well, with a few restrictions, of course:
|
|
18
|
-
- No `TypedDict`
|
|
19
|
-
- The plebs must clean up their own memory problems
|
|
20
|
-
- No oversized integers
|
|
21
|
-
- No global variables, only global constants
|
|
22
|
-
- It won't accept pleb nonlocal variables either
|
|
23
|
-
- Python "class": they are all inferior to the jit class
|
|
24
|
-
- No `**kwargs`
|
|
25
|
-
- and just a few dozen-jillion other things."""
|
|
26
16
|
countInitialize(connectionGraph, gapsWhere, my, track)
|
|
27
17
|
if my[indexMy.taskDivisions.value] > 0:
|
|
28
18
|
countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
|
mapFolding/theDao.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from mapFolding import indexMy, indexTrack
|
|
2
|
+
from numba import prange
|
|
2
3
|
from numpy import dtype, integer, ndarray
|
|
3
4
|
from typing import Any, Tuple
|
|
4
|
-
import numba
|
|
5
|
-
import numpy
|
|
6
5
|
|
|
7
6
|
def activeGapIncrement(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> None:
|
|
7
|
+
# `.value` is not necessary for this module or most modules. But, this module is transformed into Numba "jitted" functions, and Numba won't use `Enum` for an index without `.value`.
|
|
8
8
|
my[indexMy.gap1ndex.value] += 1
|
|
9
9
|
|
|
10
10
|
def activeLeafGreaterThan0Condition(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
|
|
@@ -36,9 +36,6 @@ def countGaps(gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[T
|
|
|
36
36
|
gap1ndexCeilingIncrement(my=my)
|
|
37
37
|
track[indexTrack.countDimensionsGapped.value, my[indexMy.leafConnectee.value]] += 1
|
|
38
38
|
|
|
39
|
-
def dimension1ndexIncrement(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> None:
|
|
40
|
-
my[indexMy.indexDimension.value] += 1
|
|
41
|
-
|
|
42
39
|
def dimensionsUnconstrainedCondition(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
|
|
43
40
|
return connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]] == my[indexMy.leaf1ndex.value]
|
|
44
41
|
|
|
@@ -56,6 +53,9 @@ def findGapsInitializeVariables(my: ndarray[Tuple[int], dtype[integer[Any]]], tr
|
|
|
56
53
|
my[indexMy.gap1ndexCeiling.value] = track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
|
|
57
54
|
my[indexMy.indexDimension.value] = 0
|
|
58
55
|
|
|
56
|
+
def indexDimensionIncrement(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> None:
|
|
57
|
+
my[indexMy.indexDimension.value] += 1
|
|
58
|
+
|
|
59
59
|
def indexMiniGapIncrement(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> None:
|
|
60
60
|
my[indexMy.indexMiniGap.value] += 1
|
|
61
61
|
|
|
@@ -81,7 +81,7 @@ def leafConnecteeUpdate(connectionGraph: ndarray[Tuple[int, int, int], dtype[int
|
|
|
81
81
|
def loopingLeavesConnectedToActiveLeaf(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
|
|
82
82
|
return my[indexMy.leafConnectee.value] != my[indexMy.leaf1ndex.value]
|
|
83
83
|
|
|
84
|
-
def
|
|
84
|
+
def loopUpToDimensionsTotal(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
|
|
85
85
|
return my[indexMy.indexDimension.value] < my[indexMy.dimensionsTotal.value]
|
|
86
86
|
|
|
87
87
|
def loopingToActiveGapCeiling(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
|
|
@@ -103,13 +103,15 @@ def thereAreComputationDivisionsYouMightSkip(my: ndarray[Tuple[int], dtype[integ
|
|
|
103
103
|
return my[indexMy.leaf1ndex.value] != my[indexMy.taskDivisions.value] or my[indexMy.leafConnectee.value] % my[indexMy.taskDivisions.value] == my[indexMy.taskIndex.value]
|
|
104
104
|
|
|
105
105
|
def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
106
|
+
, gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
107
|
+
, my: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
108
|
+
, track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
|
|
109
|
+
) -> None:
|
|
110
|
+
|
|
109
111
|
while activeLeafGreaterThan0Condition(my=my):
|
|
110
112
|
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
111
113
|
findGapsInitializeVariables(my=my, track=track)
|
|
112
|
-
while
|
|
114
|
+
while loopUpToDimensionsTotal(my=my):
|
|
113
115
|
if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
|
|
114
116
|
dimensionsUnconstrainedDecrement(my=my)
|
|
115
117
|
else:
|
|
@@ -117,7 +119,7 @@ def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
|
|
|
117
119
|
while loopingLeavesConnectedToActiveLeaf(my=my):
|
|
118
120
|
countGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
119
121
|
leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
|
|
120
|
-
|
|
122
|
+
indexDimensionIncrement(my=my)
|
|
121
123
|
if allDimensionsAreUnconstrained(my=my):
|
|
122
124
|
insertUnconstrainedLeaf(gapsWhere=gapsWhere, my=my)
|
|
123
125
|
indexMiniGapInitialization(my=my)
|
|
@@ -130,27 +132,34 @@ def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
|
|
|
130
132
|
return
|
|
131
133
|
|
|
132
134
|
def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
133
|
-
,
|
|
134
|
-
,
|
|
135
|
-
,
|
|
136
|
-
,
|
|
135
|
+
, foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
136
|
+
, gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
137
|
+
, my: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
138
|
+
, track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
|
|
139
|
+
) -> None:
|
|
140
|
+
|
|
137
141
|
gapsWherePARALLEL = gapsWhere.copy()
|
|
138
142
|
myPARALLEL = my.copy()
|
|
139
143
|
trackPARALLEL = track.copy()
|
|
144
|
+
|
|
140
145
|
taskDivisionsPrange = myPARALLEL[indexMy.taskDivisions.value]
|
|
141
|
-
|
|
146
|
+
|
|
147
|
+
for indexSherpa in prange(taskDivisionsPrange):
|
|
142
148
|
groupsOfFolds: int = 0
|
|
149
|
+
|
|
143
150
|
gapsWhere = gapsWherePARALLEL.copy()
|
|
144
151
|
my = myPARALLEL.copy()
|
|
145
|
-
my[indexMy.taskIndex.value] = indexSherpa
|
|
146
152
|
track = trackPARALLEL.copy()
|
|
153
|
+
|
|
154
|
+
my[indexMy.taskIndex.value] = indexSherpa
|
|
155
|
+
|
|
147
156
|
while activeLeafGreaterThan0Condition(my=my):
|
|
148
157
|
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
149
158
|
if activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my):
|
|
150
159
|
groupsOfFolds += 1
|
|
151
160
|
else:
|
|
152
161
|
findGapsInitializeVariables(my=my, track=track)
|
|
153
|
-
while
|
|
162
|
+
while loopUpToDimensionsTotal(my=my):
|
|
154
163
|
if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
|
|
155
164
|
dimensionsUnconstrainedDecrement(my=my)
|
|
156
165
|
else:
|
|
@@ -159,7 +168,7 @@ def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[A
|
|
|
159
168
|
if thereAreComputationDivisionsYouMightSkip(my=my):
|
|
160
169
|
countGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
161
170
|
leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
|
|
162
|
-
|
|
171
|
+
indexDimensionIncrement(my=my)
|
|
163
172
|
indexMiniGapInitialization(my=my)
|
|
164
173
|
while loopingToActiveGapCeiling(my=my):
|
|
165
174
|
filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
@@ -170,28 +179,34 @@ def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[A
|
|
|
170
179
|
placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
|
|
171
180
|
foldGroups[my[indexMy.taskIndex.value]] = groupsOfFolds
|
|
172
181
|
|
|
173
|
-
def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
182
|
+
def countSequential( connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
183
|
+
, foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
184
|
+
, gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
185
|
+
, my: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
186
|
+
, track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
|
|
187
|
+
) -> None:
|
|
188
|
+
|
|
174
189
|
groupsOfFolds: int = 0
|
|
175
|
-
|
|
190
|
+
|
|
176
191
|
while activeLeafGreaterThan0Condition(my=my):
|
|
177
|
-
if
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
192
|
+
if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
|
|
193
|
+
if activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my):
|
|
194
|
+
groupsOfFolds += 1
|
|
195
|
+
else:
|
|
196
|
+
findGapsInitializeVariables(my=my, track=track)
|
|
197
|
+
while loopUpToDimensionsTotal(my=my):
|
|
198
|
+
if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
|
|
199
|
+
dimensionsUnconstrainedDecrement(my=my)
|
|
200
|
+
else:
|
|
201
|
+
leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
|
|
202
|
+
while loopingLeavesConnectedToActiveLeaf(my=my):
|
|
203
|
+
countGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
204
|
+
leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
|
|
205
|
+
indexDimensionIncrement(my=my)
|
|
206
|
+
indexMiniGapInitialization(my=my)
|
|
207
|
+
while loopingToActiveGapCeiling(my=my):
|
|
208
|
+
filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
|
|
209
|
+
indexMiniGapIncrement(my=my)
|
|
195
210
|
while backtrackCondition(my=my, track=track):
|
|
196
211
|
backtrack(my=my, track=track)
|
|
197
212
|
if placeLeafCondition(my=my):
|
|
@@ -199,12 +214,13 @@ def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
|
|
|
199
214
|
foldGroups[my[indexMy.taskIndex.value]] = groupsOfFolds
|
|
200
215
|
|
|
201
216
|
def doTheNeedful(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
217
|
+
, foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
218
|
+
, gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
219
|
+
, mapShape: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
220
|
+
, my: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
221
|
+
, track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
|
|
222
|
+
) -> None:
|
|
223
|
+
|
|
208
224
|
countInitialize(connectionGraph, gapsWhere, my, track)
|
|
209
225
|
|
|
210
226
|
if my[indexMy.taskDivisions.value] > 0:
|
mapFolding/theSSOT.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
2
|
from mapFolding.theSSOTnumba import *
|
|
3
|
-
from numpy import integer
|
|
3
|
+
from numpy import dtype, integer, ndarray
|
|
4
4
|
from types import ModuleType
|
|
5
5
|
from typing import Any, Callable, Dict, Final, Optional, Tuple, Type, TYPE_CHECKING, cast
|
|
6
6
|
import enum
|
|
@@ -73,14 +73,14 @@ def getDispatcherCallable() -> Callable[..., None]:
|
|
|
73
73
|
from mapFolding.syntheticModules import numba_doTheNeedful
|
|
74
74
|
return cast(Callable[..., None], numba_doTheNeedful.doTheNeedful)
|
|
75
75
|
|
|
76
|
-
# NOTE I want this _concept_ to be well implemented and usable everywhere: Python, Numba, Jax, CUDA, idc
|
|
76
|
+
# NOTE I want this _concept_, not necessarily this method, to be well implemented and usable everywhere: Python, Numba, Jax, CUDA, idc
|
|
77
77
|
class computationState(TypedDict):
|
|
78
|
-
connectionGraph:
|
|
79
|
-
foldGroups:
|
|
80
|
-
gapsWhere:
|
|
81
|
-
mapShape:
|
|
82
|
-
my:
|
|
83
|
-
track:
|
|
78
|
+
connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
|
|
79
|
+
foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
80
|
+
gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
81
|
+
mapShape: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
82
|
+
my: ndarray[Tuple[int] , dtype[integer[Any]]]
|
|
83
|
+
track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
|
|
84
84
|
|
|
85
85
|
@enum.verify(enum.CONTINUOUS, enum.UNIQUE) if sys.version_info >= (3, 11) else lambda x: x
|
|
86
86
|
class EnumIndices(enum.IntEnum):
|
|
@@ -249,3 +249,27 @@ def hackSSOTdatatype(identifier: str) -> str:
|
|
|
249
249
|
elif RubeGoldBerg == 'datatypeLeavesTotal':
|
|
250
250
|
return _get_datatype('leavesTotal')
|
|
251
251
|
raise Exception("Dude, you forgot to set a value in `hackSSOTdatatype`.")
|
|
252
|
+
|
|
253
|
+
_datatypeModuleScalar = 'numba'
|
|
254
|
+
_decoratorCallable = 'jit'
|
|
255
|
+
def Z0Z_getDatatypeModuleScalar() -> str:
|
|
256
|
+
return _datatypeModuleScalar
|
|
257
|
+
|
|
258
|
+
def Z0Z_setDatatypeModuleScalar(moduleName: str) -> str:
|
|
259
|
+
global _datatypeModuleScalar
|
|
260
|
+
_datatypeModuleScalar = moduleName
|
|
261
|
+
return _datatypeModuleScalar
|
|
262
|
+
|
|
263
|
+
def Z0Z_getDecoratorCallable() -> str:
|
|
264
|
+
return _decoratorCallable
|
|
265
|
+
|
|
266
|
+
def Z0Z_setDecoratorCallable(decoratorName: str) -> str:
|
|
267
|
+
global _decoratorCallable
|
|
268
|
+
_decoratorCallable = decoratorName
|
|
269
|
+
return _decoratorCallable
|
|
270
|
+
|
|
271
|
+
class FREAKOUT(Exception):
|
|
272
|
+
pass
|
|
273
|
+
|
|
274
|
+
# This identifier is declared in theDao.py. Two sources of truth is better than 900.
|
|
275
|
+
Z0Z_identifierCountFolds = 'groupsOfFolds'
|
mapFolding/theSSOTnumba.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
"""I have so much truth, I need two files to contain it all!"""
|
|
1
2
|
"""TODO learn how to use this efficiently and effectively to solve problems, be DRY, and have SSOT."""
|
|
2
3
|
from typing import Final, TYPE_CHECKING, Dict, Any, Union, Callable, Tuple, Any
|
|
3
4
|
import numba
|
|
@@ -33,7 +34,7 @@ Old notes that are not entirely accurate.
|
|
|
33
34
|
| `no_cpython_wrapper` | Disable Python C-API wrapper generation | Size | Smallest | Exclusionary |
|
|
34
35
|
|
|
35
36
|
"""
|
|
36
|
-
|
|
37
|
+
# NOTE Deepseek removed forceinline=True, inline='always'
|
|
37
38
|
# TODO try to implement all possible parameters, but use `NotRequired` for the more esoteric ones
|
|
38
39
|
class ParametersNumba(TypedDict):
|
|
39
40
|
_dbg_extend_lifetimes: NotRequired[bool]
|