mapFolding 0.8.0__py3-none-any.whl → 0.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +33 -4
- mapFolding/basecamp.py +14 -0
- mapFolding/beDRY.py +16 -1
- mapFolding/filesystem.py +124 -90
- mapFolding/noHomeYet.py +12 -0
- mapFolding/oeis.py +16 -1
- mapFolding/reference/__init__.py +0 -0
- mapFolding/reference/flattened.py +46 -45
- mapFolding/reference/hunterNumba.py +4 -4
- mapFolding/reference/irvineJavaPort.py +1 -1
- mapFolding/reference/lunnanNumpy.py +3 -4
- mapFolding/reference/lunnanWhile.py +5 -7
- mapFolding/reference/rotatedEntryPoint.py +2 -3
- mapFolding/someAssemblyRequired/__init__.py +29 -0
- mapFolding/someAssemblyRequired/getLLVMforNoReason.py +32 -14
- mapFolding/someAssemblyRequired/ingredientsNumba.py +22 -1
- mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +196 -0
- mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +3 -4
- mapFolding/someAssemblyRequired/transformDataStructures.py +162 -0
- mapFolding/someAssemblyRequired/transformationTools.py +216 -199
- mapFolding/theDao.py +19 -5
- mapFolding/theSSOT.py +19 -1
- {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/METADATA +50 -44
- mapfolding-0.8.1.dist-info/RECORD +39 -0
- {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/WHEEL +1 -1
- mapFolding/reference/lunnan.py +0 -153
- mapFolding/someAssemblyRequired/Z0Z_workbench.py +0 -350
- mapFolding/someAssemblyRequired/synthesizeDataConverters.py +0 -117
- mapFolding/syntheticModules/numbaCountHistoricalExample.py +0 -158
- mapFolding/syntheticModules/numba_doTheNeedfulHistoricalExample.py +0 -13
- mapfolding-0.8.0.dist-info/RECORD +0 -41
- {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info/licenses}/LICENSE +0 -0
- {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/top_level.txt +0 -0
|
@@ -1,350 +0,0 @@
|
|
|
1
|
-
from autoflake import fix_code as autoflake_fix_code
|
|
2
|
-
from mapFolding.filesystem import writeStringToHere
|
|
3
|
-
from mapFolding.someAssemblyRequired import (
|
|
4
|
-
ast_Identifier,
|
|
5
|
-
extractFunctionDef,
|
|
6
|
-
ifThis,
|
|
7
|
-
IngredientsFunction,
|
|
8
|
-
IngredientsModule,
|
|
9
|
-
LedgerOfImports,
|
|
10
|
-
Make,
|
|
11
|
-
makeDictionaryReplacementStatements,
|
|
12
|
-
NodeCollector,
|
|
13
|
-
NodeReplacer,
|
|
14
|
-
RecipeSynthesizeFlow,
|
|
15
|
-
strDotStrCuzPyStoopid,
|
|
16
|
-
Then,
|
|
17
|
-
)
|
|
18
|
-
from mapFolding.someAssemblyRequired.ingredientsNumba import decorateCallableWithNumba
|
|
19
|
-
from mapFolding.someAssemblyRequired.synthesizeDataConverters import shatter_dataclassesDOTdataclass
|
|
20
|
-
from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
|
|
21
|
-
from pathlib import Path
|
|
22
|
-
import ast
|
|
23
|
-
|
|
24
|
-
# Would `LibCST` be better than `ast` in some cases? https://github.com/hunterhogan/mapFolding/issues/7
|
|
25
|
-
|
|
26
|
-
def Z0Z_alphaTest_putModuleOnDisk(ingredients: IngredientsModule, recipeFlow: RecipeSynthesizeFlow):
|
|
27
|
-
# Physical namespace
|
|
28
|
-
filenameStem: str = recipeFlow.moduleDispatcher
|
|
29
|
-
fileExtension: str = recipeFlow.fileExtension
|
|
30
|
-
pathPackage: Path = Path(recipeFlow.pathPackage)
|
|
31
|
-
|
|
32
|
-
# Physical and logical namespace
|
|
33
|
-
packageName: ast_Identifier | None = recipeFlow.packageName # module name of the package, if any
|
|
34
|
-
logicalPathINFIX: ast_Identifier | strDotStrCuzPyStoopid | None = recipeFlow.Z0Z_flowLogicalPathRoot
|
|
35
|
-
|
|
36
|
-
def _getLogicalPathParent() -> str | None:
|
|
37
|
-
listModules: list[ast_Identifier] = []
|
|
38
|
-
if packageName:
|
|
39
|
-
listModules.append(packageName)
|
|
40
|
-
if logicalPathINFIX:
|
|
41
|
-
listModules.append(logicalPathINFIX)
|
|
42
|
-
if listModules:
|
|
43
|
-
return '.'.join(listModules)
|
|
44
|
-
return None
|
|
45
|
-
|
|
46
|
-
def _getLogicalPathAbsolute() -> str:
|
|
47
|
-
listModules: list[ast_Identifier] = []
|
|
48
|
-
logicalPathParent: str | None = _getLogicalPathParent()
|
|
49
|
-
if logicalPathParent:
|
|
50
|
-
listModules.append(logicalPathParent)
|
|
51
|
-
listModules.append(filenameStem)
|
|
52
|
-
return '.'.join(listModules)
|
|
53
|
-
|
|
54
|
-
def getPathFilename():
|
|
55
|
-
pathRoot: Path = pathPackage
|
|
56
|
-
filename: str = filenameStem + fileExtension
|
|
57
|
-
if logicalPathINFIX:
|
|
58
|
-
whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
|
|
59
|
-
pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
|
|
60
|
-
return pathRoot.joinpath(filename)
|
|
61
|
-
|
|
62
|
-
def absoluteImport() -> ast.Import:
|
|
63
|
-
return Make.astImport(_getLogicalPathAbsolute())
|
|
64
|
-
|
|
65
|
-
def absoluteImportFrom() -> ast.ImportFrom:
|
|
66
|
-
""" `from . import theModule` """
|
|
67
|
-
logicalPathParent: str = _getLogicalPathParent() or '.'
|
|
68
|
-
return Make.astImportFrom(logicalPathParent, [Make.astAlias(filenameStem)])
|
|
69
|
-
|
|
70
|
-
def writeModule() -> None:
|
|
71
|
-
astModule = ingredients.export()
|
|
72
|
-
ast.fix_missing_locations(astModule)
|
|
73
|
-
pythonSource: str = ast.unparse(astModule)
|
|
74
|
-
if not pythonSource: raise raiseIfNoneGitHubIssueNumber3
|
|
75
|
-
autoflake_additional_imports: list[str] = ingredients.imports.exportListModuleNames()
|
|
76
|
-
if packageName:
|
|
77
|
-
autoflake_additional_imports.append(packageName)
|
|
78
|
-
pythonSource = autoflake_fix_code(pythonSource, autoflake_additional_imports, expand_star_imports=False, remove_all_unused_imports=False, remove_duplicate_keys = False, remove_unused_variables = False,)
|
|
79
|
-
pathFilename = getPathFilename()
|
|
80
|
-
writeStringToHere(pythonSource, pathFilename)
|
|
81
|
-
|
|
82
|
-
writeModule()
|
|
83
|
-
|
|
84
|
-
def inlineThisFunctionWithTheseValues(astFunctionDef: ast.FunctionDef, dictionaryReplacementStatements: dict[str, ast.stmt | list[ast.stmt]]) -> ast.FunctionDef:
|
|
85
|
-
class FunctionInliner(ast.NodeTransformer):
|
|
86
|
-
def __init__(self, dictionaryReplacementStatements: dict[str, ast.stmt | list[ast.stmt]]) -> None:
|
|
87
|
-
self.dictionaryReplacementStatements = dictionaryReplacementStatements
|
|
88
|
-
|
|
89
|
-
def generic_visit(self, node: ast.AST) -> ast.AST:
|
|
90
|
-
"""Visit all nodes and replace them if necessary."""
|
|
91
|
-
return super().generic_visit(node)
|
|
92
|
-
|
|
93
|
-
def visit_Expr(self, node: ast.Expr) -> ast.AST | list[ast.stmt]:
|
|
94
|
-
"""Visit Expr nodes and replace value if it's a function call in our dictionary."""
|
|
95
|
-
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node.value):
|
|
96
|
-
return self.dictionaryReplacementStatements[node.value.func.id] # type: ignore[attr-defined]
|
|
97
|
-
return node
|
|
98
|
-
|
|
99
|
-
def visit_Assign(self, node: ast.Assign) -> ast.AST | list[ast.stmt]:
|
|
100
|
-
"""Visit Assign nodes and replace value if it's a function call in our dictionary."""
|
|
101
|
-
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node.value):
|
|
102
|
-
return self.dictionaryReplacementStatements[node.value.func.id] # type: ignore[attr-defined]
|
|
103
|
-
return node
|
|
104
|
-
|
|
105
|
-
def visit_Call(self, node: ast.Call) -> ast.AST | list[ast.stmt]:
|
|
106
|
-
"""Replace call nodes with their replacement statements if they're in the dictionary."""
|
|
107
|
-
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node):
|
|
108
|
-
replacement = self.dictionaryReplacementStatements[node.func.id] # type: ignore[attr-defined]
|
|
109
|
-
if not isinstance(replacement, list):
|
|
110
|
-
return replacement
|
|
111
|
-
return node
|
|
112
|
-
|
|
113
|
-
import copy
|
|
114
|
-
keepGoing = True
|
|
115
|
-
ImaInlineFunction = copy.deepcopy(astFunctionDef)
|
|
116
|
-
while keepGoing:
|
|
117
|
-
ImaInlineFunction = copy.deepcopy(astFunctionDef)
|
|
118
|
-
FunctionInliner(copy.deepcopy(dictionaryReplacementStatements)).visit(ImaInlineFunction)
|
|
119
|
-
if ast.unparse(ImaInlineFunction) == ast.unparse(astFunctionDef):
|
|
120
|
-
keepGoing = False
|
|
121
|
-
else:
|
|
122
|
-
astFunctionDef = copy.deepcopy(ImaInlineFunction)
|
|
123
|
-
return ImaInlineFunction
|
|
124
|
-
|
|
125
|
-
def replaceMatchingASTnodes(astTree: ast.AST, replacementMap: list[tuple[ast.AST, ast.AST]]) -> ast.AST:
|
|
126
|
-
"""Replace matching AST nodes using type-specific visitors.
|
|
127
|
-
|
|
128
|
-
Parameters:
|
|
129
|
-
astTree: The AST to transform
|
|
130
|
-
replacementMap: List of (find, replace) node pairs
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
The transformed AST
|
|
134
|
-
"""
|
|
135
|
-
class TargetedNodeReplacer(ast.NodeTransformer):
|
|
136
|
-
def __init__(self, replacementMap: list[tuple[ast.AST, ast.AST]]) -> None:
|
|
137
|
-
# Group replacements by node type for more efficient lookups
|
|
138
|
-
self.replacementByType: dict[type[ast.AST], list[tuple[ast.AST, ast.AST]]] = {}
|
|
139
|
-
for findNode, replaceNode in replacementMap:
|
|
140
|
-
nodeType = type(findNode)
|
|
141
|
-
if nodeType not in self.replacementByType:
|
|
142
|
-
self.replacementByType[nodeType] = []
|
|
143
|
-
self.replacementByType[nodeType].append((findNode, replaceNode))
|
|
144
|
-
|
|
145
|
-
def visit(self, node: ast.AST) -> ast.AST:
|
|
146
|
-
"""Check if this node should be replaced before continuing traversal."""
|
|
147
|
-
nodeType = type(node)
|
|
148
|
-
if nodeType in self.replacementByType:
|
|
149
|
-
for findNode, replaceNode in self.replacementByType[nodeType]:
|
|
150
|
-
if self.nodesMatchStructurally(node, findNode):
|
|
151
|
-
return replaceNode
|
|
152
|
-
return super().visit(node)
|
|
153
|
-
|
|
154
|
-
def nodesMatchStructurally(self, node1: ast.AST | list, node2: ast.AST | list) -> bool:
|
|
155
|
-
"""Compare two AST nodes structurally, ignoring position information."""
|
|
156
|
-
# Different types can't be equal
|
|
157
|
-
if type(node1) != type(node2):
|
|
158
|
-
return False
|
|
159
|
-
|
|
160
|
-
if isinstance(node1, ast.AST):
|
|
161
|
-
# Compare fields that matter for structural equality
|
|
162
|
-
fields = [f for f in node1._fields
|
|
163
|
-
if f not in ('lineno', 'col_offset', 'end_lineno', 'end_col_offset', 'ctx')]
|
|
164
|
-
|
|
165
|
-
for field in fields:
|
|
166
|
-
smurf1 = getattr(node1, field, None)
|
|
167
|
-
smurf2 = getattr(node2, field, None)
|
|
168
|
-
|
|
169
|
-
if isinstance(smurf1, (ast.AST, list)) and isinstance(smurf2, (ast.AST, list)):
|
|
170
|
-
if not self.nodesMatchStructurally(smurf1, smurf2):
|
|
171
|
-
return False
|
|
172
|
-
elif smurf1 != smurf2:
|
|
173
|
-
return False
|
|
174
|
-
return True
|
|
175
|
-
|
|
176
|
-
elif isinstance(node1, list) and isinstance(node2, list):
|
|
177
|
-
if len(node1) != len(node2):
|
|
178
|
-
return False
|
|
179
|
-
return all(self.nodesMatchStructurally(x, y) for x, y in zip(node1, node2))
|
|
180
|
-
|
|
181
|
-
else:
|
|
182
|
-
# Direct comparison for non-AST objects (strings, numbers, etc.)
|
|
183
|
-
return node1 == node2
|
|
184
|
-
|
|
185
|
-
import copy
|
|
186
|
-
keepGoing = True
|
|
187
|
-
astResult = copy.deepcopy(astTree)
|
|
188
|
-
|
|
189
|
-
while keepGoing:
|
|
190
|
-
astBeforeChange = copy.deepcopy(astResult)
|
|
191
|
-
TargetedNodeReplacer(copy.deepcopy(replacementMap)).visit(astResult)
|
|
192
|
-
|
|
193
|
-
# Check if we've reached a fixed point (no more changes)
|
|
194
|
-
if ast.unparse(astResult) == ast.unparse(astBeforeChange):
|
|
195
|
-
keepGoing = False
|
|
196
|
-
|
|
197
|
-
return astResult
|
|
198
|
-
|
|
199
|
-
def Z0Z_main() -> None:
|
|
200
|
-
numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
|
|
201
|
-
dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
|
|
202
|
-
# TODO remove hardcoding
|
|
203
|
-
theCountingIdentifierHARDCODED = 'groupsOfFolds'
|
|
204
|
-
theCountingIdentifier = theCountingIdentifierHARDCODED
|
|
205
|
-
|
|
206
|
-
# TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
|
|
207
|
-
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
208
|
-
|
|
209
|
-
# ===========================================================
|
|
210
|
-
sourcePython = numbaFlow.sourceDispatcherCallable
|
|
211
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
212
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
213
|
-
ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
214
|
-
|
|
215
|
-
# sourceParallelCallable
|
|
216
|
-
(astName_dataclassesDOTdataclass, ledgerDataclassANDFragments, listAnnAssign4DataclassUnpack,
|
|
217
|
-
astTuple4AssignTargetsToFragments, listNameDataclassFragments4Parameters, list_ast_argAnnotated4ArgumentsSpecification,
|
|
218
|
-
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns, astAssignDataclassRepack, list_keyword4DataclassInitialization) = shatter_dataclassesDOTdataclass(
|
|
219
|
-
numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
|
|
220
|
-
ingredientsDispatcher.imports.update(ledgerDataclassANDFragments)
|
|
221
|
-
|
|
222
|
-
# TODO remove hardcoding
|
|
223
|
-
namespaceHARDCODED = 'concurrencyManager'
|
|
224
|
-
identifierHARDCODED = 'submit'
|
|
225
|
-
namespace = namespaceHARDCODED
|
|
226
|
-
identifier = identifierHARDCODED
|
|
227
|
-
NodeReplacer(
|
|
228
|
-
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(namespace, identifier)
|
|
229
|
-
, doThat = Then.insertThisAbove(listAnnAssign4DataclassUnpack)
|
|
230
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
231
|
-
NodeReplacer(
|
|
232
|
-
findThis = ifThis.isCallNamespace_Identifier(namespace, identifier)
|
|
233
|
-
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(namespace), identifier)
|
|
234
|
-
, listArguments=[Make.astName(numbaFlow.parallelCallable)] + listNameDataclassFragments4Parameters))
|
|
235
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
236
|
-
|
|
237
|
-
CapturedAssign: list[ast.AST] = []
|
|
238
|
-
CapturedCall: list[ast.Call] = []
|
|
239
|
-
findThis = ifThis.isCall
|
|
240
|
-
doThat = [Then.appendTo(CapturedCall)]
|
|
241
|
-
capture = NodeCollector(findThis, doThat)
|
|
242
|
-
|
|
243
|
-
NodeCollector(
|
|
244
|
-
findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
|
|
245
|
-
, doThat = [Then.appendTo(CapturedAssign)
|
|
246
|
-
, lambda node: capture.visit(node)]
|
|
247
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
248
|
-
|
|
249
|
-
newAssign = CapturedAssign[0]
|
|
250
|
-
NodeReplacer(
|
|
251
|
-
findThis = lambda node: ifThis.isSubscript(node) and ifThis.isAttribute(node.value) and ifThis.isCall(node.value.value)
|
|
252
|
-
, doThat = Then.replaceWith(CapturedCall[0])
|
|
253
|
-
).visit(newAssign)
|
|
254
|
-
|
|
255
|
-
NodeReplacer(
|
|
256
|
-
findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
|
|
257
|
-
, doThat = Then.replaceWith(newAssign)
|
|
258
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
259
|
-
|
|
260
|
-
# sourceSequentialCallable
|
|
261
|
-
(astName_dataclassesDOTdataclass, ledgerDataclassANDFragments, listAnnAssign4DataclassUnpack,
|
|
262
|
-
astTuple4AssignTargetsToFragments, listNameDataclassFragments4Parameters, list_ast_argAnnotated4ArgumentsSpecification,
|
|
263
|
-
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns, astAssignDataclassRepack, list_keyword4DataclassInitialization) = shatter_dataclassesDOTdataclass(
|
|
264
|
-
numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstance)
|
|
265
|
-
ingredientsDispatcher.imports.update(ledgerDataclassANDFragments)
|
|
266
|
-
|
|
267
|
-
NodeReplacer(
|
|
268
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
269
|
-
, doThat = Then.insertThisAbove(listAnnAssign4DataclassUnpack)
|
|
270
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
271
|
-
NodeReplacer(
|
|
272
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
273
|
-
# findThis = ifThis.isReturn
|
|
274
|
-
, doThat = Then.insertThisBelow([astAssignDataclassRepack])
|
|
275
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
276
|
-
# TODO reconsider: This calls a function, but I don't inspect the function for its parameters or return.
|
|
277
|
-
NodeReplacer(
|
|
278
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
279
|
-
, doThat = Then.replaceWith(Make.astAssign(listTargets=[astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), listNameDataclassFragments4Parameters)))
|
|
280
|
-
).visit(ingredientsDispatcher.astFunctionDef)
|
|
281
|
-
|
|
282
|
-
# ===========================================================
|
|
283
|
-
sourcePython = numbaFlow.sourceInitializeCallable
|
|
284
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
285
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
286
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
287
|
-
ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
288
|
-
|
|
289
|
-
# ===========================================================
|
|
290
|
-
sourcePython = numbaFlow.sourceParallelCallable
|
|
291
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
292
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
293
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
294
|
-
ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
295
|
-
ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
|
|
296
|
-
ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=list_ast_argAnnotated4ArgumentsSpecification)
|
|
297
|
-
NodeReplacer(
|
|
298
|
-
findThis = ifThis.isReturn
|
|
299
|
-
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
300
|
-
).visit(ingredientsParallel.astFunctionDef)
|
|
301
|
-
NodeReplacer(
|
|
302
|
-
findThis = ifThis.isReturn
|
|
303
|
-
# , doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
304
|
-
, doThat = Then.replaceWith(Make.astReturn(Make.astName(theCountingIdentifier)))
|
|
305
|
-
).visit(ingredientsParallel.astFunctionDef)
|
|
306
|
-
theCountingIdentifierAnnotation = next(
|
|
307
|
-
ast_arg.annotation for ast_arg in list_ast_argAnnotated4ArgumentsSpecification if ast_arg.arg == theCountingIdentifier)
|
|
308
|
-
ingredientsParallel.astFunctionDef.returns = theCountingIdentifierAnnotation
|
|
309
|
-
# ingredientsParallel.astFunctionDef.returns = astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
|
|
310
|
-
replacementMap = [(statement.value, statement.target) for statement in listAnnAssign4DataclassUnpack]
|
|
311
|
-
ingredientsParallel.astFunctionDef = replaceMatchingASTnodes(
|
|
312
|
-
ingredientsParallel.astFunctionDef, replacementMap) # type: ignore
|
|
313
|
-
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
314
|
-
# But, I would need to update the calling function, too.
|
|
315
|
-
ingredientsParallel = decorateCallableWithNumba(ingredientsParallel) # parametersNumbaParallelDEFAULT
|
|
316
|
-
|
|
317
|
-
# ===========================================================
|
|
318
|
-
sourcePython = numbaFlow.sourceSequentialCallable
|
|
319
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
320
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
321
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
322
|
-
ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
323
|
-
ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
|
|
324
|
-
ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=list_ast_argAnnotated4ArgumentsSpecification)
|
|
325
|
-
NodeReplacer(
|
|
326
|
-
findThis = ifThis.isReturn
|
|
327
|
-
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
328
|
-
).visit(ingredientsSequential.astFunctionDef)
|
|
329
|
-
NodeReplacer(
|
|
330
|
-
findThis = ifThis.isReturn
|
|
331
|
-
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
332
|
-
).visit(ingredientsSequential.astFunctionDef)
|
|
333
|
-
ingredientsSequential.astFunctionDef.returns = astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
|
|
334
|
-
replacementMap = [(statement.value, statement.target) for statement in listAnnAssign4DataclassUnpack]
|
|
335
|
-
ingredientsSequential.astFunctionDef = replaceMatchingASTnodes(
|
|
336
|
-
ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
|
|
337
|
-
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
338
|
-
# But, I would need to update the calling function, too.
|
|
339
|
-
ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
|
|
340
|
-
|
|
341
|
-
ingredientsModuleNumbaUnified = IngredientsModule(
|
|
342
|
-
ingredientsFunction=[ingredientsInitialize,
|
|
343
|
-
ingredientsParallel,
|
|
344
|
-
ingredientsSequential,
|
|
345
|
-
ingredientsDispatcher], imports=LedgerOfImports(numbaFlow.source_astModule))
|
|
346
|
-
|
|
347
|
-
Z0Z_alphaTest_putModuleOnDisk(ingredientsModuleNumbaUnified, numbaFlow)
|
|
348
|
-
|
|
349
|
-
if __name__ == '__main__':
|
|
350
|
-
Z0Z_main()
|
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
from collections.abc import Sequence
|
|
2
|
-
from importlib import import_module
|
|
3
|
-
from inspect import getsource as inspect_getsource
|
|
4
|
-
from mapFolding.beDRY import outfitCountFolds, validateListDimensions
|
|
5
|
-
from mapFolding.filesystem import getPathFilenameFoldsTotal
|
|
6
|
-
from mapFolding.someAssemblyRequired import (
|
|
7
|
-
ast_Identifier,
|
|
8
|
-
extractClassDef,
|
|
9
|
-
ifThis,
|
|
10
|
-
LedgerOfImports,
|
|
11
|
-
Make,
|
|
12
|
-
NodeCollector,
|
|
13
|
-
strDotStrCuzPyStoopid,
|
|
14
|
-
Then,
|
|
15
|
-
Z0Z_executeActionUnlessDescendantMatches,
|
|
16
|
-
)
|
|
17
|
-
from mapFolding.theSSOT import ComputationState, getSourceAlgorithm
|
|
18
|
-
from pathlib import Path
|
|
19
|
-
from types import ModuleType
|
|
20
|
-
from typing import Any, Literal, overload
|
|
21
|
-
import ast
|
|
22
|
-
import pickle
|
|
23
|
-
|
|
24
|
-
# Would `LibCST` be better than `ast` in some cases? https://github.com/hunterhogan/mapFolding/issues/7
|
|
25
|
-
|
|
26
|
-
def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier
|
|
27
|
-
)-> tuple[ast.Name, LedgerOfImports, list[ast.AnnAssign], ast.Tuple, list[ast.Name], list[ast.arg], ast.Subscript, ast.Assign, list[ast.keyword]]:
|
|
28
|
-
"""
|
|
29
|
-
Parameters:
|
|
30
|
-
logicalPathModule: gimme string cuz python is stoopid
|
|
31
|
-
dataclass_Identifier: The identifier of the dataclass to be dismantled.
|
|
32
|
-
instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
|
|
33
|
-
"""
|
|
34
|
-
module: ast.Module = ast.parse(inspect_getsource(import_module(logicalPathModule)))
|
|
35
|
-
|
|
36
|
-
dataclass = extractClassDef(dataclass_Identifier, module)
|
|
37
|
-
|
|
38
|
-
if not isinstance(dataclass, ast.ClassDef):
|
|
39
|
-
raise ValueError(f"I could not find {dataclass_Identifier=} in {logicalPathModule=}.")
|
|
40
|
-
|
|
41
|
-
ledgerDataclassANDFragments = LedgerOfImports()
|
|
42
|
-
list_ast_argAnnotated4ArgumentsSpecification: list[ast.arg] = []
|
|
43
|
-
list_keyword4DataclassInitialization: list[ast.keyword] = []
|
|
44
|
-
listAnnAssign4DataclassUnpack: list[ast.AnnAssign] = []
|
|
45
|
-
listAnnotations: list[ast.expr] = []
|
|
46
|
-
listNameDataclassFragments4Parameters: list[ast.Name] = []
|
|
47
|
-
|
|
48
|
-
addToLedgerPredicate = ifThis.isAnnAssignAndAnnotationIsName
|
|
49
|
-
addToLedgerAction = Then.Z0Z_ledger(logicalPathModule, ledgerDataclassANDFragments)
|
|
50
|
-
addToLedger = NodeCollector(addToLedgerPredicate, [addToLedgerAction])
|
|
51
|
-
|
|
52
|
-
exclusionPredicate = ifThis.is_keyword_IdentifierEqualsConstantValue('init', False)
|
|
53
|
-
appendKeywordAction = Then.Z0Z_appendKeywordMirroredTo(list_keyword4DataclassInitialization)
|
|
54
|
-
filteredAppendKeywordAction = Z0Z_executeActionUnlessDescendantMatches(exclusionPredicate, appendKeywordAction) # type: ignore
|
|
55
|
-
|
|
56
|
-
collector = NodeCollector(
|
|
57
|
-
ifThis.isAnnAssignAndTargetIsName,
|
|
58
|
-
[Then.Z0Z_appendAnnAssignOf_nameDOTnameTo(instance_Identifier, listAnnAssign4DataclassUnpack)
|
|
59
|
-
, Then.append_targetTo(listNameDataclassFragments4Parameters) # type: ignore
|
|
60
|
-
, lambda node: addToLedger.visit(node)
|
|
61
|
-
, filteredAppendKeywordAction
|
|
62
|
-
, lambda node: list_ast_argAnnotated4ArgumentsSpecification.append(Make.ast_arg(node.target.id, node.annotation)) # type: ignore
|
|
63
|
-
, lambda node: listAnnotations.append(node.annotation) # type: ignore
|
|
64
|
-
]
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
collector.visit(dataclass)
|
|
68
|
-
|
|
69
|
-
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns = Make.astSubscript(Make.astName('tuple'), Make.astTuple(listAnnotations))
|
|
70
|
-
|
|
71
|
-
ledgerDataclassANDFragments.addImportFromStr(logicalPathModule, dataclass_Identifier)
|
|
72
|
-
|
|
73
|
-
astName_dataclassesDOTdataclass = Make.astName(dataclass_Identifier)
|
|
74
|
-
astTuple4AssignTargetsToFragments: ast.Tuple = Make.astTuple(listNameDataclassFragments4Parameters, ast.Store())
|
|
75
|
-
astAssignDataclassRepack = Make.astAssign(listTargets=[Make.astName(instance_Identifier)], value=Make.astCall(astName_dataclassesDOTdataclass, list_astKeywords=list_keyword4DataclassInitialization))
|
|
76
|
-
return (astName_dataclassesDOTdataclass, ledgerDataclassANDFragments, listAnnAssign4DataclassUnpack,
|
|
77
|
-
astTuple4AssignTargetsToFragments, listNameDataclassFragments4Parameters, list_ast_argAnnotated4ArgumentsSpecification,
|
|
78
|
-
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns, astAssignDataclassRepack, list_keyword4DataclassInitialization)
|
|
79
|
-
|
|
80
|
-
@overload
|
|
81
|
-
def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[True], **keywordArguments: Any) -> Path: ...
|
|
82
|
-
@overload
|
|
83
|
-
def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[False], **keywordArguments: Any) -> ComputationState: ...
|
|
84
|
-
def makeStateJob(listDimensions: Sequence[int], *, writeJob: bool = True, **keywordArguments: Any) -> ComputationState | Path:
|
|
85
|
-
"""
|
|
86
|
-
Creates a computation state job for map folding calculations and optionally saves it to disk.
|
|
87
|
-
|
|
88
|
-
This function initializes a computation state for map folding calculations based on the given dimensions,
|
|
89
|
-
sets up the initial counting configuration, and can optionally save the state to a pickle file.
|
|
90
|
-
|
|
91
|
-
Parameters:
|
|
92
|
-
listDimensions: List of integers representing the dimensions of the map to be folded.
|
|
93
|
-
writeJob (True): Whether to save the state to disk.
|
|
94
|
-
**keywordArguments: Additional keyword arguments to pass to the computation state initialization.
|
|
95
|
-
|
|
96
|
-
Returns:
|
|
97
|
-
stateUniversal|pathFilenameJob: The computation state for the map folding calculations, or
|
|
98
|
-
the path to the saved state file if writeJob is True.
|
|
99
|
-
"""
|
|
100
|
-
mapShape = validateListDimensions(listDimensions)
|
|
101
|
-
stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
|
|
102
|
-
|
|
103
|
-
moduleSource: ModuleType = getSourceAlgorithm()
|
|
104
|
-
# TODO `countInitialize` is hardcoded
|
|
105
|
-
stateUniversal = moduleSource.countInitialize(stateUniversal)
|
|
106
|
-
|
|
107
|
-
if not writeJob:
|
|
108
|
-
return stateUniversal
|
|
109
|
-
|
|
110
|
-
pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal.mapShape, None)
|
|
111
|
-
suffix = pathFilenameChopChop.suffix
|
|
112
|
-
pathJob = Path(str(pathFilenameChopChop)[0:-len(suffix)])
|
|
113
|
-
pathJob.mkdir(parents=True, exist_ok=True)
|
|
114
|
-
pathFilenameJob = pathJob / 'stateJob.pkl'
|
|
115
|
-
|
|
116
|
-
pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
|
|
117
|
-
return pathFilenameJob
|
|
@@ -1,158 +0,0 @@
|
|
|
1
|
-
from mapFolding.theSSOT import indexMy, indexTrack
|
|
2
|
-
from numba import uint16, prange, int64, jit
|
|
3
|
-
from numpy import ndarray, dtype, integer
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
@jit((uint16[:, :, ::1], uint16[::1], uint16[::1], uint16[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
|
|
7
|
-
def countInitialize(connectionGraph: ndarray[tuple[int, int, int], dtype[integer[Any]]], gapsWhere: ndarray[tuple[int], dtype[integer[Any]]], my: ndarray[tuple[int], dtype[integer[Any]]], track: ndarray[tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
8
|
-
while my[indexMy.leaf1ndex] > 0:
|
|
9
|
-
if my[indexMy.leaf1ndex] <= 1 or track[indexTrack.leafBelow, 0] == 1:
|
|
10
|
-
my[indexMy.dimensionsUnconstrained] = my[indexMy.dimensionsTotal]
|
|
11
|
-
my[indexMy.gap1ndexCeiling] = track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex] - 1]
|
|
12
|
-
my[indexMy.indexDimension] = 0
|
|
13
|
-
while my[indexMy.indexDimension] < my[indexMy.dimensionsTotal]:
|
|
14
|
-
if connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]] == my[indexMy.leaf1ndex]:
|
|
15
|
-
my[indexMy.dimensionsUnconstrained] -= 1
|
|
16
|
-
else:
|
|
17
|
-
my[indexMy.leafConnectee] = connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]]
|
|
18
|
-
while my[indexMy.leafConnectee] != my[indexMy.leaf1ndex]:
|
|
19
|
-
gapsWhere[my[indexMy.gap1ndexCeiling]] = my[indexMy.leafConnectee]
|
|
20
|
-
if track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] == 0:
|
|
21
|
-
my[indexMy.gap1ndexCeiling] += 1
|
|
22
|
-
track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] += 1
|
|
23
|
-
my[indexMy.leafConnectee] = connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], track[indexTrack.leafBelow, my[indexMy.leafConnectee]]]
|
|
24
|
-
my[indexMy.indexDimension] += 1
|
|
25
|
-
if not my[indexMy.dimensionsUnconstrained]:
|
|
26
|
-
my[indexMy.indexLeaf] = 0
|
|
27
|
-
while my[indexMy.indexLeaf] < my[indexMy.leaf1ndex]:
|
|
28
|
-
gapsWhere[my[indexMy.gap1ndexCeiling]] = my[indexMy.indexLeaf]
|
|
29
|
-
my[indexMy.gap1ndexCeiling] += 1
|
|
30
|
-
my[indexMy.indexLeaf] += 1
|
|
31
|
-
my[indexMy.indexMiniGap] = my[indexMy.gap1ndex]
|
|
32
|
-
while my[indexMy.indexMiniGap] < my[indexMy.gap1ndexCeiling]:
|
|
33
|
-
gapsWhere[my[indexMy.gap1ndex]] = gapsWhere[my[indexMy.indexMiniGap]]
|
|
34
|
-
if track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] == my[indexMy.dimensionsUnconstrained]:
|
|
35
|
-
my[indexMy.gap1ndex] += 1
|
|
36
|
-
track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] = 0
|
|
37
|
-
my[indexMy.indexMiniGap] += 1
|
|
38
|
-
if my[indexMy.leaf1ndex] > 0:
|
|
39
|
-
my[indexMy.gap1ndex] -= 1
|
|
40
|
-
track[indexTrack.leafAbove, my[indexMy.leaf1ndex]] = gapsWhere[my[indexMy.gap1ndex]]
|
|
41
|
-
track[indexTrack.leafBelow, my[indexMy.leaf1ndex]] = track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]]
|
|
42
|
-
track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]] = my[indexMy.leaf1ndex]
|
|
43
|
-
track[indexTrack.leafAbove, track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]] = my[indexMy.leaf1ndex]
|
|
44
|
-
track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex]] = my[indexMy.gap1ndex]
|
|
45
|
-
my[indexMy.leaf1ndex] += 1
|
|
46
|
-
if my[indexMy.gap1ndex] > 0:
|
|
47
|
-
return
|
|
48
|
-
|
|
49
|
-
@jit((uint16[:, :, ::1], int64[::1], uint16[::1], uint16[::1], uint16[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
|
|
50
|
-
def countParallel(connectionGraph: ndarray[tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[tuple[int], dtype[integer[Any]]], my: ndarray[tuple[int], dtype[integer[Any]]], track: ndarray[tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
51
|
-
gapsWherePARALLEL = gapsWhere.copy()
|
|
52
|
-
myPARALLEL = my.copy()
|
|
53
|
-
trackPARALLEL = track.copy()
|
|
54
|
-
taskDivisionsPrange = myPARALLEL[indexMy.taskDivisions]
|
|
55
|
-
for indexSherpa in prange(taskDivisionsPrange):
|
|
56
|
-
groupsOfFolds: int = 0
|
|
57
|
-
gapsWhere = gapsWherePARALLEL.copy()
|
|
58
|
-
my = myPARALLEL.copy()
|
|
59
|
-
track = trackPARALLEL.copy()
|
|
60
|
-
my[indexMy.taskIndex] = indexSherpa
|
|
61
|
-
while my[indexMy.leaf1ndex] > 0:
|
|
62
|
-
if my[indexMy.leaf1ndex] <= 1 or track[indexTrack.leafBelow, 0] == 1:
|
|
63
|
-
if my[indexMy.leaf1ndex] > foldGroups[-1]:
|
|
64
|
-
groupsOfFolds += 1
|
|
65
|
-
else:
|
|
66
|
-
my[indexMy.dimensionsUnconstrained] = my[indexMy.dimensionsTotal]
|
|
67
|
-
my[indexMy.gap1ndexCeiling] = track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex] - 1]
|
|
68
|
-
my[indexMy.indexDimension] = 0
|
|
69
|
-
while my[indexMy.indexDimension] < my[indexMy.dimensionsTotal]:
|
|
70
|
-
if connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]] == my[indexMy.leaf1ndex]:
|
|
71
|
-
my[indexMy.dimensionsUnconstrained] -= 1
|
|
72
|
-
else:
|
|
73
|
-
my[indexMy.leafConnectee] = connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]]
|
|
74
|
-
while my[indexMy.leafConnectee] != my[indexMy.leaf1ndex]:
|
|
75
|
-
if my[indexMy.leaf1ndex] != my[indexMy.taskDivisions] or my[indexMy.leafConnectee] % my[indexMy.taskDivisions] == my[indexMy.taskIndex]:
|
|
76
|
-
gapsWhere[my[indexMy.gap1ndexCeiling]] = my[indexMy.leafConnectee]
|
|
77
|
-
if track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] == 0:
|
|
78
|
-
my[indexMy.gap1ndexCeiling] += 1
|
|
79
|
-
track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] += 1
|
|
80
|
-
my[indexMy.leafConnectee] = connectionGraph[my[indexMy.indexDimension], my[indexMy.leaf1ndex], track[indexTrack.leafBelow, my[indexMy.leafConnectee]]]
|
|
81
|
-
my[indexMy.indexDimension] += 1
|
|
82
|
-
my[indexMy.indexMiniGap] = my[indexMy.gap1ndex]
|
|
83
|
-
while my[indexMy.indexMiniGap] < my[indexMy.gap1ndexCeiling]:
|
|
84
|
-
gapsWhere[my[indexMy.gap1ndex]] = gapsWhere[my[indexMy.indexMiniGap]]
|
|
85
|
-
if track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] == my[indexMy.dimensionsUnconstrained]:
|
|
86
|
-
my[indexMy.gap1ndex] += 1
|
|
87
|
-
track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] = 0
|
|
88
|
-
my[indexMy.indexMiniGap] += 1
|
|
89
|
-
while my[indexMy.leaf1ndex] > 0 and my[indexMy.gap1ndex] == track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex] - 1]:
|
|
90
|
-
my[indexMy.leaf1ndex] -= 1
|
|
91
|
-
track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]] = track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]
|
|
92
|
-
track[indexTrack.leafAbove, track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]] = track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]
|
|
93
|
-
if my[indexMy.leaf1ndex] > 0:
|
|
94
|
-
my[indexMy.gap1ndex] -= 1
|
|
95
|
-
track[indexTrack.leafAbove, my[indexMy.leaf1ndex]] = gapsWhere[my[indexMy.gap1ndex]]
|
|
96
|
-
track[indexTrack.leafBelow, my[indexMy.leaf1ndex]] = track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]]
|
|
97
|
-
track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]] = my[indexMy.leaf1ndex]
|
|
98
|
-
track[indexTrack.leafAbove, track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]] = my[indexMy.leaf1ndex]
|
|
99
|
-
track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex]] = my[indexMy.gap1ndex]
|
|
100
|
-
my[indexMy.leaf1ndex] += 1
|
|
101
|
-
foldGroups[my[indexMy.taskIndex]] = groupsOfFolds
|
|
102
|
-
|
|
103
|
-
@jit((uint16[:, :, ::1], int64[::1], uint16[::1], uint16[::1], uint16[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
|
|
104
|
-
def countSequential(connectionGraph: ndarray[tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[tuple[int], dtype[integer[Any]]], my: ndarray[tuple[int], dtype[integer[Any]]], track: ndarray[tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
105
|
-
leafBelow = track[indexTrack.leafBelow.value]
|
|
106
|
-
gapRangeStart = track[indexTrack.gapRangeStart.value]
|
|
107
|
-
countDimensionsGapped = track[indexTrack.countDimensionsGapped.value]
|
|
108
|
-
leafAbove = track[indexTrack.leafAbove.value]
|
|
109
|
-
leaf1ndex = my[indexMy.leaf1ndex.value]
|
|
110
|
-
dimensionsUnconstrained = my[indexMy.dimensionsUnconstrained.value]
|
|
111
|
-
dimensionsTotal = my[indexMy.dimensionsTotal.value]
|
|
112
|
-
gap1ndexCeiling = my[indexMy.gap1ndexCeiling.value]
|
|
113
|
-
indexDimension = my[indexMy.indexDimension.value]
|
|
114
|
-
leafConnectee = my[indexMy.leafConnectee.value]
|
|
115
|
-
indexMiniGap = my[indexMy.indexMiniGap.value]
|
|
116
|
-
gap1ndex = my[indexMy.gap1ndex.value]
|
|
117
|
-
taskIndex = my[indexMy.taskIndex.value]
|
|
118
|
-
groupsOfFolds: int = 0
|
|
119
|
-
while leaf1ndex > 0:
|
|
120
|
-
if leaf1ndex <= 1 or leafBelow[0] == 1:
|
|
121
|
-
if leaf1ndex > foldGroups[-1]:
|
|
122
|
-
groupsOfFolds += 1
|
|
123
|
-
else:
|
|
124
|
-
dimensionsUnconstrained = dimensionsTotal
|
|
125
|
-
gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
|
|
126
|
-
indexDimension = 0
|
|
127
|
-
while indexDimension < dimensionsTotal:
|
|
128
|
-
leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
|
|
129
|
-
if leafConnectee == leaf1ndex:
|
|
130
|
-
dimensionsUnconstrained -= 1
|
|
131
|
-
else:
|
|
132
|
-
while leafConnectee != leaf1ndex:
|
|
133
|
-
gapsWhere[gap1ndexCeiling] = leafConnectee
|
|
134
|
-
if countDimensionsGapped[leafConnectee] == 0:
|
|
135
|
-
gap1ndexCeiling += 1
|
|
136
|
-
countDimensionsGapped[leafConnectee] += 1
|
|
137
|
-
leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
|
|
138
|
-
indexDimension += 1
|
|
139
|
-
indexMiniGap = gap1ndex
|
|
140
|
-
while indexMiniGap < gap1ndexCeiling:
|
|
141
|
-
gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
|
|
142
|
-
if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
|
|
143
|
-
gap1ndex += 1
|
|
144
|
-
countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
|
|
145
|
-
indexMiniGap += 1
|
|
146
|
-
while leaf1ndex > 0 and gap1ndex == gapRangeStart[leaf1ndex - 1]:
|
|
147
|
-
leaf1ndex -= 1
|
|
148
|
-
leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
|
|
149
|
-
leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
|
|
150
|
-
if leaf1ndex > 0:
|
|
151
|
-
gap1ndex -= 1
|
|
152
|
-
leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
|
|
153
|
-
leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
|
|
154
|
-
leafBelow[leafAbove[leaf1ndex]] = leaf1ndex
|
|
155
|
-
leafAbove[leafBelow[leaf1ndex]] = leaf1ndex
|
|
156
|
-
gapRangeStart[leaf1ndex] = gap1ndex
|
|
157
|
-
leaf1ndex += 1
|
|
158
|
-
foldGroups[taskIndex] = groupsOfFolds
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
from mapFolding import indexMy
|
|
2
|
-
from mapFolding.syntheticModules.numbaCount import countInitialize, countSequential, countParallel
|
|
3
|
-
from numba import uint16, int64, jit
|
|
4
|
-
from numpy import ndarray, dtype, integer
|
|
5
|
-
from typing import Any
|
|
6
|
-
|
|
7
|
-
@jit((uint16[:, :, ::1], int64[::1], uint16[::1], uint16[::1], uint16[::1], uint16[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
|
|
8
|
-
def doTheNeedful(connectionGraph: ndarray[tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[tuple[int], dtype[integer[Any]]], mapShape: ndarray[tuple[int], dtype[integer[Any]]], my: ndarray[tuple[int], dtype[integer[Any]]], track: ndarray[tuple[int, int], dtype[integer[Any]]]) -> None:
|
|
9
|
-
countInitialize(connectionGraph, gapsWhere, my, track)
|
|
10
|
-
if my[indexMy.taskDivisions] > 0:
|
|
11
|
-
countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
|
|
12
|
-
else:
|
|
13
|
-
countSequential(connectionGraph, foldGroups, gapsWhere, my, track)
|