mapFolding 0.8.3__py3-none-any.whl → 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +2 -2
- mapFolding/basecamp.py +11 -5
- mapFolding/filesystem.py +134 -109
- mapFolding/oeis.py +1 -1
- mapFolding/someAssemblyRequired/__init__.py +37 -18
- mapFolding/someAssemblyRequired/_theTypes.py +35 -0
- mapFolding/someAssemblyRequired/_tool_Make.py +92 -0
- mapFolding/someAssemblyRequired/_tool_Then.py +65 -0
- mapFolding/someAssemblyRequired/_toolboxAntecedents.py +326 -0
- mapFolding/someAssemblyRequired/_toolboxContainers.py +306 -0
- mapFolding/someAssemblyRequired/_toolboxPython.py +76 -0
- mapFolding/someAssemblyRequired/ingredientsNumba.py +17 -24
- mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +114 -169
- mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +247 -0
- mapFolding/someAssemblyRequired/transformDataStructures.py +167 -100
- mapFolding/someAssemblyRequired/transformationTools.py +63 -685
- mapFolding/syntheticModules/numbaCount_doTheNeedful.py +36 -33
- mapFolding/theDao.py +13 -11
- mapFolding/theSSOT.py +69 -112
- {mapfolding-0.8.3.dist-info → mapfolding-0.8.4.dist-info}/METADATA +2 -1
- mapfolding-0.8.4.dist-info/RECORD +49 -0
- {mapfolding-0.8.3.dist-info → mapfolding-0.8.4.dist-info}/WHEEL +1 -1
- tests/conftest.py +34 -29
- tests/test_computations.py +40 -31
- tests/test_filesystem.py +3 -3
- mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +0 -413
- mapfolding-0.8.3.dist-info/RECORD +0 -43
- {mapfolding-0.8.3.dist-info → mapfolding-0.8.4.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.8.3.dist-info → mapfolding-0.8.4.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.8.3.dist-info → mapfolding-0.8.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""Synthesize one file to compute `foldsTotal` of `mapShape`."""
|
|
2
|
+
from mapFolding.someAssemblyRequired import ast_Identifier, be, ifThis, Make, NodeChanger, NodeTourist, parsePathFilename2astModule, str_nameDOTname, Then, write_astModule, 又
|
|
3
|
+
from mapFolding.someAssemblyRequired.ingredientsNumba import decorateCallableWithNumba, ParametersNumba, parametersNumbaDefault
|
|
4
|
+
from mapFolding.someAssemblyRequired.synthesizeNumbaFlow import theNumbaFlow
|
|
5
|
+
from mapFolding.someAssemblyRequired.transformDataStructures import makeInitializedComputationState, shatter_dataclassesDOTdataclass, ShatteredDataclass
|
|
6
|
+
from mapFolding.someAssemblyRequired._toolboxContainers import astModuleToIngredientsFunction, IngredientsFunction, IngredientsModule, LedgerOfImports
|
|
7
|
+
from mapFolding.filesystem import getFilenameFoldsTotal, getPathFilenameFoldsTotal, getPathRootJobDEFAULT
|
|
8
|
+
from mapFolding.theSSOT import ComputationState, The
|
|
9
|
+
from pathlib import Path, PurePosixPath
|
|
10
|
+
from typing import cast
|
|
11
|
+
from Z0Z_tools import autoDecodingRLE
|
|
12
|
+
import ast
|
|
13
|
+
import dataclasses
|
|
14
|
+
|
|
15
|
+
list_IdentifiersNotUsedAllHARDCODED = ['concurrencyLimit', 'foldsTotal', 'mapShape',]
|
|
16
|
+
list_IdentifiersNotUsedParallelSequentialHARDCODED = ['indexLeaf']
|
|
17
|
+
list_IdentifiersNotUsedSequentialHARDCODED = ['foldGroups', 'taskDivisions', 'taskIndex',]
|
|
18
|
+
|
|
19
|
+
list_IdentifiersReplacedHARDCODED = ['groupsOfFolds',]
|
|
20
|
+
|
|
21
|
+
list_IdentifiersStaticValuesHARDCODED = ['dimensionsTotal', 'leavesTotal',]
|
|
22
|
+
|
|
23
|
+
list_IdentifiersNotUsedHARDCODED = list_IdentifiersStaticValuesHARDCODED + list_IdentifiersReplacedHARDCODED + list_IdentifiersNotUsedAllHARDCODED + list_IdentifiersNotUsedParallelSequentialHARDCODED + list_IdentifiersNotUsedSequentialHARDCODED
|
|
24
|
+
|
|
25
|
+
@dataclasses.dataclass
|
|
26
|
+
class Z0Z_RecipeJob:
|
|
27
|
+
state: ComputationState
|
|
28
|
+
# TODO create function to calculate `foldsTotalEstimated`
|
|
29
|
+
foldsTotalEstimated: int = 0
|
|
30
|
+
useNumbaProgressBar: bool = True
|
|
31
|
+
numbaProgressBarIdentifier: ast_Identifier = 'ProgressBarGroupsOfFolds'
|
|
32
|
+
shatteredDataclass: ShatteredDataclass = dataclasses.field(default=None, init=True) # type: ignore[assignment, reportAssignmentType]
|
|
33
|
+
|
|
34
|
+
# ========================================
|
|
35
|
+
# Source
|
|
36
|
+
source_astModule = parsePathFilename2astModule(theNumbaFlow.pathFilenameSequential)
|
|
37
|
+
sourceCountCallable: ast_Identifier = theNumbaFlow.callableSequential
|
|
38
|
+
|
|
39
|
+
sourceLogicalPathModuleDataclass: str_nameDOTname = theNumbaFlow.logicalPathModuleDataclass
|
|
40
|
+
sourceDataclassIdentifier: ast_Identifier = theNumbaFlow.dataclassIdentifier
|
|
41
|
+
sourceDataclassInstance: ast_Identifier = theNumbaFlow.dataclassInstance
|
|
42
|
+
|
|
43
|
+
sourcePathPackage: PurePosixPath | None = theNumbaFlow.pathPackage
|
|
44
|
+
sourcePackageIdentifier: ast_Identifier | None = theNumbaFlow.packageIdentifier
|
|
45
|
+
|
|
46
|
+
# ========================================
|
|
47
|
+
# Filesystem (names of physical objects)
|
|
48
|
+
pathPackage: PurePosixPath | None = None
|
|
49
|
+
pathModule: PurePosixPath | None = PurePosixPath(getPathRootJobDEFAULT())
|
|
50
|
+
""" `pathModule` will override `pathPackage` and `logicalPathRoot`."""
|
|
51
|
+
fileExtension: str = theNumbaFlow.fileExtension
|
|
52
|
+
pathFilenameFoldsTotal: PurePosixPath = dataclasses.field(default=None, init=True) # type: ignore[assignment, reportAssignmentType]
|
|
53
|
+
|
|
54
|
+
# ========================================
|
|
55
|
+
# Logical identifiers (as opposed to physical identifiers)
|
|
56
|
+
# ========================================
|
|
57
|
+
packageIdentifier: ast_Identifier | None = None
|
|
58
|
+
logicalPathRoot: str_nameDOTname | None = None
|
|
59
|
+
""" `logicalPathRoot` likely corresponds to a physical filesystem directory."""
|
|
60
|
+
moduleIdentifier: ast_Identifier = dataclasses.field(default=None, init=True) # type: ignore[assignment, reportAssignmentType]
|
|
61
|
+
countCallable: ast_Identifier = sourceCountCallable
|
|
62
|
+
dataclassIdentifier: ast_Identifier | None = sourceDataclassIdentifier
|
|
63
|
+
dataclassInstance: ast_Identifier | None = sourceDataclassInstance
|
|
64
|
+
logicalPathModuleDataclass: str_nameDOTname | None = sourceLogicalPathModuleDataclass
|
|
65
|
+
|
|
66
|
+
def _makePathFilename(self,
|
|
67
|
+
pathRoot: PurePosixPath | None = None,
|
|
68
|
+
logicalPathINFIX: str_nameDOTname | None = None,
|
|
69
|
+
filenameStem: str | None = None,
|
|
70
|
+
fileExtension: str | None = None,
|
|
71
|
+
) -> PurePosixPath:
|
|
72
|
+
if pathRoot is None:
|
|
73
|
+
pathRoot = self.pathPackage or PurePosixPath(Path.cwd())
|
|
74
|
+
if logicalPathINFIX:
|
|
75
|
+
whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
|
|
76
|
+
pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
|
|
77
|
+
if filenameStem is None:
|
|
78
|
+
filenameStem = self.moduleIdentifier
|
|
79
|
+
if fileExtension is None:
|
|
80
|
+
fileExtension = self.fileExtension
|
|
81
|
+
filename: str = filenameStem + fileExtension
|
|
82
|
+
return pathRoot.joinpath(filename)
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def pathFilenameModule(self) -> PurePosixPath:
|
|
86
|
+
if self.pathModule is None:
|
|
87
|
+
return self._makePathFilename()
|
|
88
|
+
else:
|
|
89
|
+
return self._makePathFilename(pathRoot=self.pathModule, logicalPathINFIX=None)
|
|
90
|
+
|
|
91
|
+
def __post_init__(self):
|
|
92
|
+
pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(self.state.mapShape))
|
|
93
|
+
|
|
94
|
+
if self.moduleIdentifier is None:
|
|
95
|
+
self.moduleIdentifier = pathFilenameFoldsTotal.stem
|
|
96
|
+
|
|
97
|
+
if self.pathFilenameFoldsTotal is None:
|
|
98
|
+
self.pathFilenameFoldsTotal = pathFilenameFoldsTotal
|
|
99
|
+
|
|
100
|
+
if self.shatteredDataclass is None and self.logicalPathModuleDataclass and self.dataclassIdentifier and self.dataclassInstance:
|
|
101
|
+
self.shatteredDataclass = shatter_dataclassesDOTdataclass(self.logicalPathModuleDataclass, self.dataclassIdentifier, self.dataclassInstance)
|
|
102
|
+
|
|
103
|
+
# ========================================
|
|
104
|
+
# Fields you probably don't need =================================
|
|
105
|
+
# Dispatcher =================================
|
|
106
|
+
sourceDispatcherCallable: ast_Identifier = theNumbaFlow.callableDispatcher
|
|
107
|
+
dispatcherCallable: ast_Identifier = sourceDispatcherCallable
|
|
108
|
+
# Parallel counting =================================
|
|
109
|
+
sourceDataclassInstanceTaskDistribution: ast_Identifier = theNumbaFlow.dataclassInstanceTaskDistribution
|
|
110
|
+
sourceConcurrencyManagerNamespace: ast_Identifier = theNumbaFlow.concurrencyManagerNamespace
|
|
111
|
+
sourceConcurrencyManagerIdentifier: ast_Identifier = theNumbaFlow.concurrencyManagerIdentifier
|
|
112
|
+
dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
|
|
113
|
+
concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
|
|
114
|
+
concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier
|
|
115
|
+
|
|
116
|
+
def addLauncherNumbaProgress(ingredientsModule: IngredientsModule, ingredientsFunction: IngredientsFunction, job: Z0Z_RecipeJob) -> IngredientsModule:
|
|
117
|
+
|
|
118
|
+
linesLaunch: str = f"""
|
|
119
|
+
if __name__ == '__main__':
|
|
120
|
+
with ProgressBar(total={job.foldsTotalEstimated}, update_interval=2) as statusUpdate:
|
|
121
|
+
{job.countCallable}(statusUpdate)
|
|
122
|
+
foldsTotal = statusUpdate.n * {job.state.leavesTotal}
|
|
123
|
+
print('map {job.state.mapShape} =', foldsTotal)
|
|
124
|
+
writeStream = open('{job.pathFilenameFoldsTotal.as_posix()}', 'w')
|
|
125
|
+
writeStream.write(str(foldsTotal))
|
|
126
|
+
writeStream.close()
|
|
127
|
+
"""
|
|
128
|
+
numba_progressPythonClass: ast_Identifier = 'ProgressBar'
|
|
129
|
+
numba_progressNumbaType: ast_Identifier = 'ProgressBarType'
|
|
130
|
+
ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressPythonClass)
|
|
131
|
+
ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressNumbaType)
|
|
132
|
+
|
|
133
|
+
ast_argNumbaProgress = ast.arg(arg=job.numbaProgressBarIdentifier, annotation=ast.Name(id=numba_progressPythonClass, ctx=ast.Load()))
|
|
134
|
+
ingredientsFunction.astFunctionDef.args.args.append(ast_argNumbaProgress)
|
|
135
|
+
|
|
136
|
+
findThis = ifThis.isAugAssign_targetIs(ifThis.isName_Identifier(job.shatteredDataclass.countingVariableName.id))
|
|
137
|
+
doThat = Then.replaceWith(Make.Expr(Make.Call(Make.Attribute(Make.Name(job.numbaProgressBarIdentifier),'update'),[Make.Constant(1)])))
|
|
138
|
+
countWithProgressBar = NodeChanger(findThis, doThat)
|
|
139
|
+
countWithProgressBar.visit(ingredientsFunction.astFunctionDef)
|
|
140
|
+
|
|
141
|
+
ingredientsModule.appendLauncher(ast.parse(linesLaunch))
|
|
142
|
+
|
|
143
|
+
return ingredientsModule
|
|
144
|
+
|
|
145
|
+
def move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsFunction: IngredientsFunction, job: Z0Z_RecipeJob) -> IngredientsFunction:
|
|
146
|
+
ingredientsFunction.imports.update(job.shatteredDataclass.ledger)
|
|
147
|
+
|
|
148
|
+
list_IdentifiersNotUsed = list_IdentifiersNotUsedHARDCODED
|
|
149
|
+
|
|
150
|
+
list_argCauseMyBrainRefusesToDoThisTheRightWay = ingredientsFunction.astFunctionDef.args.args + ingredientsFunction.astFunctionDef.args.posonlyargs + ingredientsFunction.astFunctionDef.args.kwonlyargs
|
|
151
|
+
for ast_arg in list_argCauseMyBrainRefusesToDoThisTheRightWay:
|
|
152
|
+
if ast_arg.arg in job.shatteredDataclass.field2AnnAssign:
|
|
153
|
+
if ast_arg.arg in list_IdentifiersNotUsed:
|
|
154
|
+
pass
|
|
155
|
+
else:
|
|
156
|
+
ImaAnnAssign, elementConstructor = job.shatteredDataclass.Z0Z_field2AnnAssign[ast_arg.arg]
|
|
157
|
+
match elementConstructor:
|
|
158
|
+
case 'scalar':
|
|
159
|
+
ImaAnnAssign.value.args[0].value = int(job.state.__dict__[ast_arg.arg]) # type: ignore
|
|
160
|
+
case 'array':
|
|
161
|
+
# print(ast.dump(ImaAnnAssign))
|
|
162
|
+
dataAsStrRLE: str = autoDecodingRLE(job.state.__dict__[ast_arg.arg], addSpaces=True)
|
|
163
|
+
dataAs_astExpr: ast.expr = cast(ast.Expr, ast.parse(dataAsStrRLE).body[0]).value
|
|
164
|
+
ImaAnnAssign.value.args = [dataAs_astExpr] # type: ignore
|
|
165
|
+
case _:
|
|
166
|
+
list_exprDOTannotation: list[ast.expr] = []
|
|
167
|
+
list_exprDOTvalue: list[ast.expr] = []
|
|
168
|
+
for dimension in job.state.mapShape:
|
|
169
|
+
list_exprDOTannotation.append(Make.Name(elementConstructor))
|
|
170
|
+
list_exprDOTvalue.append(Make.Call(Make.Name(elementConstructor), [Make.Constant(dimension)]))
|
|
171
|
+
ImaAnnAssign.annotation.slice.elts = list_exprDOTannotation # type: ignore
|
|
172
|
+
ImaAnnAssign.value.elts = list_exprDOTvalue # type: ignore
|
|
173
|
+
|
|
174
|
+
ingredientsFunction.astFunctionDef.body.insert(0, ImaAnnAssign)
|
|
175
|
+
|
|
176
|
+
findThis = ifThis.is_arg_Identifier(ast_arg.arg)
|
|
177
|
+
remove_arg = NodeChanger(findThis, Then.removeIt)
|
|
178
|
+
remove_arg.visit(ingredientsFunction.astFunctionDef)
|
|
179
|
+
|
|
180
|
+
ast.fix_missing_locations(ingredientsFunction.astFunctionDef)
|
|
181
|
+
return ingredientsFunction
|
|
182
|
+
|
|
183
|
+
def makeJobNumba(job: Z0Z_RecipeJob, parametersNumba: ParametersNumba = parametersNumbaDefault):
|
|
184
|
+
# get the raw ingredients: data and the algorithm
|
|
185
|
+
ingredientsCount: IngredientsFunction = astModuleToIngredientsFunction(job.source_astModule, job.countCallable)
|
|
186
|
+
|
|
187
|
+
# Change the return so you can dynamically determine which variables are not used
|
|
188
|
+
removeReturnStatement = NodeChanger(be.Return, Then.removeIt)
|
|
189
|
+
removeReturnStatement.visit(ingredientsCount.astFunctionDef)
|
|
190
|
+
ingredientsCount.astFunctionDef.returns = Make.Constant(value=None)
|
|
191
|
+
|
|
192
|
+
# Remove `foldGroups` and any other unused statements, so you can dynamically determine which variables are not used
|
|
193
|
+
findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier('foldGroups'))
|
|
194
|
+
doThat = Then.removeIt
|
|
195
|
+
remove_foldGroups = NodeChanger(findThis, doThat)
|
|
196
|
+
remove_foldGroups.visit(ingredientsCount.astFunctionDef)
|
|
197
|
+
|
|
198
|
+
# replace identifiers with static values with their values, so you can dynamically determine which variables are not used
|
|
199
|
+
list_IdentifiersStaticValues = list_IdentifiersStaticValuesHARDCODED
|
|
200
|
+
for identifier in list_IdentifiersStaticValues:
|
|
201
|
+
findThis = ifThis.isName_Identifier(identifier)
|
|
202
|
+
doThat = Then.replaceWith(Make.Constant(int(job.state.__dict__[identifier])))
|
|
203
|
+
NodeChanger(findThis, doThat).visit(ingredientsCount.astFunctionDef)
|
|
204
|
+
|
|
205
|
+
# This launcher eliminates the use of one identifier, so run it now and you can dynamically determine which variables are not used
|
|
206
|
+
ingredientsModule = IngredientsModule()
|
|
207
|
+
ingredientsModule = addLauncherNumbaProgress(ingredientsModule, ingredientsCount, job)
|
|
208
|
+
parametersNumba['nogil'] = True
|
|
209
|
+
|
|
210
|
+
ingredientsCount = move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsCount, job)
|
|
211
|
+
|
|
212
|
+
ingredientsCount.astFunctionDef.decorator_list = [] # TODO low-priority, handle this more elegantly
|
|
213
|
+
# TODO when I add the function signature in numba style back to the decorator, the logic needs to handle `ProgressBarType:`
|
|
214
|
+
ingredientsCount = decorateCallableWithNumba(ingredientsCount, parametersNumba)
|
|
215
|
+
|
|
216
|
+
ingredientsModule.appendIngredientsFunction(ingredientsCount)
|
|
217
|
+
|
|
218
|
+
# add imports, make str, remove unused imports
|
|
219
|
+
# put on disk
|
|
220
|
+
write_astModule(ingredientsModule, job.pathFilenameModule, job.packageIdentifier)
|
|
221
|
+
|
|
222
|
+
"""
|
|
223
|
+
Overview
|
|
224
|
+
- the code starts life in theDao.py, which has many optimizations;
|
|
225
|
+
- `makeNumbaOptimizedFlow` increase optimization especially by using numba;
|
|
226
|
+
- `makeJobNumba` increases optimization especially by limiting its capabilities to just one set of parameters
|
|
227
|
+
- the synthesized module must run well as a standalone interpreted-Python script
|
|
228
|
+
- the next major optimization step will (probably) be to use the module synthesized by `makeJobNumba` to compile a standalone executable
|
|
229
|
+
- Nevertheless, at each major optimization step, the code is constantly being improved and optimized, so everything must be well organized (read: semantic) and able to handle a range of arbitrary upstream and not disrupt downstream transformations
|
|
230
|
+
|
|
231
|
+
Necessary
|
|
232
|
+
- Move the function's parameters to the function body,
|
|
233
|
+
- initialize identifiers with their state types and values,
|
|
234
|
+
|
|
235
|
+
Optimizations
|
|
236
|
+
- replace static-valued identifiers with their values
|
|
237
|
+
- narrowly focused imports
|
|
238
|
+
|
|
239
|
+
Minutia
|
|
240
|
+
- do not use `with` statement inside numba jitted code, except to use numba's obj mode
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
if __name__ == '__main__':
|
|
244
|
+
mapShape = (6,6)
|
|
245
|
+
state = makeInitializedComputationState(mapShape)
|
|
246
|
+
aJob = Z0Z_RecipeJob(state)
|
|
247
|
+
makeJobNumba(aJob)
|
|
@@ -20,149 +20,216 @@ While developed for transforming map folding computation state objects, the util
|
|
|
20
20
|
designed to be applicable to various data structure transformation scenarios.
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
-
from collections.abc import
|
|
24
|
-
from
|
|
25
|
-
from
|
|
26
|
-
from mapFolding.beDRY import outfitCountFolds, validateListDimensions
|
|
23
|
+
from collections.abc import Callable
|
|
24
|
+
from copy import deepcopy
|
|
25
|
+
from mapFolding.beDRY import outfitCountFolds
|
|
27
26
|
from mapFolding.filesystem import getPathFilenameFoldsTotal
|
|
28
27
|
from mapFolding.someAssemblyRequired import (
|
|
29
28
|
ast_Identifier,
|
|
29
|
+
be,
|
|
30
30
|
extractClassDef,
|
|
31
31
|
ifThis,
|
|
32
|
-
|
|
32
|
+
ImaAnnotationType,
|
|
33
|
+
importLogicalPath2Callable,
|
|
33
34
|
Make,
|
|
34
|
-
|
|
35
|
-
|
|
35
|
+
NodeTourist,
|
|
36
|
+
parseLogicalPath2astModule,
|
|
37
|
+
str_nameDOTname,
|
|
36
38
|
Then,
|
|
37
|
-
|
|
39
|
+
又,
|
|
38
40
|
)
|
|
39
|
-
from mapFolding.
|
|
40
|
-
from
|
|
41
|
-
from
|
|
41
|
+
from mapFolding.someAssemblyRequired._toolboxContainers import LedgerOfImports
|
|
42
|
+
from mapFolding.theSSOT import ComputationState, raiseIfNoneGitHubIssueNumber3, The
|
|
43
|
+
from os import PathLike
|
|
44
|
+
from pathlib import Path, PurePath
|
|
42
45
|
from typing import Any, Literal, overload
|
|
43
46
|
import ast
|
|
44
47
|
import dataclasses
|
|
45
48
|
import pickle
|
|
46
49
|
|
|
47
|
-
#
|
|
48
|
-
|
|
49
|
-
|
|
50
|
+
# Create dummy AST elements for use as defaults
|
|
51
|
+
dummyAssign = Make.Assign([Make.Name("dummyTarget")], Make.Constant(None))
|
|
52
|
+
dummySubscript = Make.Subscript(Make.Name("dummy"), Make.Name("slice"))
|
|
53
|
+
dummyTuple = Make.Tuple([Make.Name("dummyElement")])
|
|
50
54
|
|
|
51
55
|
@dataclasses.dataclass
|
|
52
56
|
class ShatteredDataclass:
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
astTuple4AssignTargetsToFragments: ast.Tuple
|
|
56
|
-
countingVariableAnnotation: ast.expr
|
|
57
|
+
countingVariableAnnotation: ImaAnnotationType
|
|
58
|
+
"""Type annotation for the counting variable extracted from the dataclass."""
|
|
57
59
|
countingVariableName: ast.Name
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
60
|
+
"""AST name node representing the counting variable identifier."""
|
|
61
|
+
field2AnnAssign: dict[ast_Identifier, ast.AnnAssign] = dataclasses.field(default_factory=dict)
|
|
62
|
+
"""Maps field names to their corresponding AST call expressions."""
|
|
63
|
+
Z0Z_field2AnnAssign: dict[ast_Identifier, tuple[ast.AnnAssign, str]] = dataclasses.field(default_factory=dict)
|
|
64
|
+
fragments4AssignmentOrParameters: ast.Tuple = dummyTuple
|
|
65
|
+
"""AST tuple used as target for assignment to capture returned fragments."""
|
|
66
|
+
ledger: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
|
|
67
|
+
"""Import records for the dataclass and its constituent parts."""
|
|
68
|
+
list_argAnnotated4ArgumentsSpecification: list[ast.arg] = dataclasses.field(default_factory=list)
|
|
69
|
+
"""Function argument nodes with annotations for parameter specification."""
|
|
70
|
+
list_keyword_field__field4init: list[ast.keyword] = dataclasses.field(default_factory=list)
|
|
71
|
+
"""Keyword arguments for dataclass initialization with field=field format."""
|
|
72
|
+
listAnnotations: list[ImaAnnotationType] = dataclasses.field(default_factory=list)
|
|
73
|
+
"""Type annotations for each dataclass field."""
|
|
74
|
+
listName4Parameters: list[ast.Name] = dataclasses.field(default_factory=list)
|
|
75
|
+
"""Name nodes for each dataclass field used as function parameters."""
|
|
76
|
+
listUnpack: list[ast.AnnAssign] = dataclasses.field(default_factory=list)
|
|
77
|
+
"""Annotated assignment statements to extract fields from dataclass."""
|
|
78
|
+
map_stateDOTfield2Name: dict[ast.expr, ast.Name] = dataclasses.field(default_factory=dict)
|
|
79
|
+
"""Maps AST expressions to Name nodes for find-replace operations."""
|
|
80
|
+
repack: ast.Assign = dummyAssign
|
|
81
|
+
"""AST assignment statement that reconstructs the original dataclass instance."""
|
|
82
|
+
signatureReturnAnnotation: ast.Subscript = dummySubscript
|
|
83
|
+
"""tuple-based return type annotation for function definitions."""
|
|
84
|
+
|
|
85
|
+
@dataclasses.dataclass
|
|
86
|
+
class DeReConstructField2ast:
|
|
87
|
+
dataclassesDOTdataclassLogicalPathModule: dataclasses.InitVar[str_nameDOTname]
|
|
88
|
+
dataclassClassDef: dataclasses.InitVar[ast.ClassDef]
|
|
89
|
+
dataclassesDOTdataclassInstance_Identifier: dataclasses.InitVar[ast_Identifier]
|
|
90
|
+
field: dataclasses.InitVar[dataclasses.Field[Any]]
|
|
91
|
+
|
|
92
|
+
ledger: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
|
|
93
|
+
|
|
94
|
+
name: ast_Identifier = dataclasses.field(init=False)
|
|
95
|
+
typeBuffalo: type[Any] | str | Any = dataclasses.field(init=False)
|
|
96
|
+
default: Any | None = dataclasses.field(init=False)
|
|
97
|
+
default_factory: Callable[..., Any] | None = dataclasses.field(init=False)
|
|
98
|
+
repr: bool = dataclasses.field(init=False)
|
|
99
|
+
hash: bool | None = dataclasses.field(init=False)
|
|
100
|
+
init: bool = dataclasses.field(init=False)
|
|
101
|
+
compare: bool = dataclasses.field(init=False)
|
|
102
|
+
metadata: dict[Any, Any] = dataclasses.field(init=False)
|
|
103
|
+
kw_only: bool = dataclasses.field(init=False)
|
|
104
|
+
|
|
105
|
+
astName: ast.Name = dataclasses.field(init=False)
|
|
106
|
+
ast_keyword_field__field: ast.keyword = dataclasses.field(init=False)
|
|
107
|
+
ast_nameDOTname: ast.Attribute = dataclasses.field(init=False)
|
|
108
|
+
astAnnotation: ImaAnnotationType = dataclasses.field(init=False)
|
|
109
|
+
ast_argAnnotated: ast.arg = dataclasses.field(init=False)
|
|
110
|
+
astAnnAssignConstructor: ast.AnnAssign = dataclasses.field(init=False)
|
|
111
|
+
Z0Z_hack: tuple[ast.AnnAssign, str] = dataclasses.field(init=False)
|
|
112
|
+
|
|
113
|
+
def __post_init__(self, dataclassesDOTdataclassLogicalPathModule: str_nameDOTname, dataclassClassDef: ast.ClassDef, dataclassesDOTdataclassInstance_Identifier: ast_Identifier, field: dataclasses.Field[Any]) -> None:
|
|
114
|
+
self.compare = field.compare
|
|
115
|
+
self.default = field.default if field.default is not dataclasses.MISSING else None
|
|
116
|
+
self.default_factory = field.default_factory if field.default_factory is not dataclasses.MISSING else None
|
|
117
|
+
self.hash = field.hash
|
|
118
|
+
self.init = field.init
|
|
119
|
+
self.kw_only = field.kw_only if field.kw_only is not dataclasses.MISSING else False
|
|
120
|
+
self.metadata = dict(field.metadata)
|
|
121
|
+
self.name = field.name
|
|
122
|
+
self.repr = field.repr
|
|
123
|
+
self.typeBuffalo = field.type
|
|
124
|
+
|
|
125
|
+
self.astName = Make.Name(self.name)
|
|
126
|
+
self.ast_keyword_field__field = Make.keyword(self.name, self.astName)
|
|
127
|
+
self.ast_nameDOTname = Make.Attribute(Make.Name(dataclassesDOTdataclassInstance_Identifier), self.name)
|
|
128
|
+
|
|
129
|
+
sherpa = NodeTourist(ifThis.isAnnAssign_targetIs(ifThis.isName_Identifier(self.name)), 又.annotation(Then.getIt)).captureLastMatch(dataclassClassDef)
|
|
130
|
+
if sherpa is None: raise raiseIfNoneGitHubIssueNumber3
|
|
131
|
+
else: self.astAnnotation = sherpa
|
|
132
|
+
|
|
133
|
+
self.ast_argAnnotated = Make.arg(self.name, self.astAnnotation)
|
|
134
|
+
|
|
135
|
+
dtype = self.metadata.get('dtype', None)
|
|
136
|
+
if dtype:
|
|
137
|
+
constructor = 'array'
|
|
138
|
+
self.astAnnAssignConstructor = Make.AnnAssign(self.astName, self.astAnnotation, Make.Call(Make.Name(constructor), list_astKeywords=[Make.keyword('dtype', Make.Name(dtype.__name__))]))
|
|
139
|
+
self.ledger.addImportFrom_asStr('numpy', constructor)
|
|
140
|
+
self.ledger.addImportFrom_asStr('numpy', dtype.__name__)
|
|
141
|
+
self.Z0Z_hack = (self.astAnnAssignConstructor, 'array')
|
|
142
|
+
elif be.Name(self.astAnnotation):
|
|
143
|
+
self.astAnnAssignConstructor = Make.AnnAssign(self.astName, self.astAnnotation, Make.Call(self.astAnnotation, [Make.Constant(-1)]))
|
|
144
|
+
self.ledger.addImportFrom_asStr(dataclassesDOTdataclassLogicalPathModule, self.astAnnotation.id)
|
|
145
|
+
self.Z0Z_hack = (self.astAnnAssignConstructor, 'scalar')
|
|
146
|
+
elif be.Subscript(self.astAnnotation):
|
|
147
|
+
elementConstructor: ast_Identifier = self.metadata['elementConstructor']
|
|
148
|
+
self.ledger.addImportFrom_asStr(dataclassesDOTdataclassLogicalPathModule, elementConstructor)
|
|
149
|
+
takeTheTuple: ast.Tuple = deepcopy(self.astAnnotation.slice)
|
|
150
|
+
self.astAnnAssignConstructor = Make.AnnAssign(self.astName, self.astAnnotation, takeTheTuple)
|
|
151
|
+
self.Z0Z_hack = (self.astAnnAssignConstructor, elementConstructor)
|
|
152
|
+
if be.Name(self.astAnnotation):
|
|
153
|
+
self.ledger.addImportFrom_asStr(dataclassesDOTdataclassLogicalPathModule, self.astAnnotation.id) # pyright: ignore [reportUnknownArgumentType, reportUnknownMemberType, reportIJustCalledATypeGuardMethod_WTF]
|
|
154
|
+
|
|
155
|
+
def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier) -> ShatteredDataclass:
|
|
66
156
|
"""
|
|
67
157
|
Parameters:
|
|
68
158
|
logicalPathModule: gimme string cuz python is stoopid
|
|
69
159
|
dataclass_Identifier: The identifier of the dataclass to be dismantled.
|
|
70
160
|
instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
|
|
71
161
|
"""
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
# TODO get the value from `groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})`
|
|
89
|
-
countingVariable = countingIdentifierHARDCODED
|
|
90
|
-
|
|
91
|
-
addToLedgerPredicate = ifThis.isAnnAssignAndAnnotationIsName
|
|
92
|
-
addToLedgerAction = Then.Z0Z_ledger(logicalPathModule, ledgerDataclassANDFragments)
|
|
93
|
-
addToLedger = NodeCollector(addToLedgerPredicate, [addToLedgerAction])
|
|
94
|
-
|
|
95
|
-
exclusionPredicate = ifThis.is_keyword_IdentifierEqualsConstantValue('init', False)
|
|
96
|
-
appendKeywordAction = Then.Z0Z_appendKeywordMirroredTo(list_keyword4DataclassInitialization)
|
|
97
|
-
filteredAppendKeywordAction = Z0Z_executeActionUnlessDescendantMatches(exclusionPredicate, appendKeywordAction) # type: ignore
|
|
98
|
-
|
|
99
|
-
NodeCollector(
|
|
100
|
-
ifThis.isAnnAssignAndTargetIsName,
|
|
101
|
-
[Then.Z0Z_appendAnnAssignOf_nameDOTnameTo(instance_Identifier, listAnnAssign4DataclassUnpack)
|
|
102
|
-
, Then.append_targetTo(listNameDataclassFragments4Parameters) # type: ignore
|
|
103
|
-
, lambda node: addToLedger.visit(node)
|
|
104
|
-
, filteredAppendKeywordAction
|
|
105
|
-
, lambda node: list_ast_argAnnotated4ArgumentsSpecification.append(Make.ast_arg(node.target.id, node.annotation)) # type: ignore
|
|
106
|
-
, lambda node: listAnnotations.append(node.annotation) # type: ignore
|
|
107
|
-
]
|
|
108
|
-
).visit(dataclass)
|
|
162
|
+
Official_fieldOrder: list[ast_Identifier] = []
|
|
163
|
+
dictionaryDeReConstruction: dict[ast_Identifier, DeReConstructField2ast] = {}
|
|
164
|
+
|
|
165
|
+
dataclassClassDef = extractClassDef(parseLogicalPath2astModule(logicalPathModule), dataclass_Identifier)
|
|
166
|
+
if not isinstance(dataclassClassDef, ast.ClassDef): raise ValueError(f"I could not find {dataclass_Identifier=} in {logicalPathModule=}.")
|
|
167
|
+
|
|
168
|
+
countingVariable = None
|
|
169
|
+
for aField in dataclasses.fields(importLogicalPath2Callable(logicalPathModule, dataclass_Identifier)): # pyright: ignore [reportArgumentType]
|
|
170
|
+
Official_fieldOrder.append(aField.name)
|
|
171
|
+
dictionaryDeReConstruction[aField.name] = DeReConstructField2ast(logicalPathModule, dataclassClassDef, instance_Identifier, aField)
|
|
172
|
+
if aField.metadata.get('theCountingIdentifier', False):
|
|
173
|
+
countingVariable = dictionaryDeReConstruction[aField.name].name
|
|
174
|
+
|
|
175
|
+
if countingVariable is None:
|
|
176
|
+
raise ValueError(f"I could not find the counting variable in {dataclass_Identifier=} in {logicalPathModule=}.")
|
|
109
177
|
|
|
110
178
|
shatteredDataclass = ShatteredDataclass(
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
shatteredDataclass.
|
|
125
|
-
|
|
179
|
+
countingVariableAnnotation=dictionaryDeReConstruction[countingVariable].astAnnotation,
|
|
180
|
+
countingVariableName=dictionaryDeReConstruction[countingVariable].astName,
|
|
181
|
+
field2AnnAssign={dictionaryDeReConstruction[field].name: dictionaryDeReConstruction[field].astAnnAssignConstructor for field in Official_fieldOrder},
|
|
182
|
+
Z0Z_field2AnnAssign={dictionaryDeReConstruction[field].name: dictionaryDeReConstruction[field].Z0Z_hack for field in Official_fieldOrder},
|
|
183
|
+
list_argAnnotated4ArgumentsSpecification=[dictionaryDeReConstruction[field].ast_argAnnotated for field in Official_fieldOrder],
|
|
184
|
+
list_keyword_field__field4init=[dictionaryDeReConstruction[field].ast_keyword_field__field for field in Official_fieldOrder if dictionaryDeReConstruction[field].init],
|
|
185
|
+
listAnnotations=[dictionaryDeReConstruction[field].astAnnotation for field in Official_fieldOrder],
|
|
186
|
+
listName4Parameters=[dictionaryDeReConstruction[field].astName for field in Official_fieldOrder],
|
|
187
|
+
listUnpack=[Make.AnnAssign(dictionaryDeReConstruction[field].astName, dictionaryDeReConstruction[field].astAnnotation, dictionaryDeReConstruction[field].ast_nameDOTname) for field in Official_fieldOrder],
|
|
188
|
+
map_stateDOTfield2Name={dictionaryDeReConstruction[field].ast_nameDOTname: dictionaryDeReConstruction[field].astName for field in Official_fieldOrder},
|
|
189
|
+
)
|
|
190
|
+
shatteredDataclass.fragments4AssignmentOrParameters = Make.Tuple(shatteredDataclass.listName4Parameters, ast.Store())
|
|
191
|
+
shatteredDataclass.repack = Make.Assign(listTargets=[Make.Name(instance_Identifier)], value=Make.Call(Make.Name(dataclass_Identifier), list_astKeywords=shatteredDataclass.list_keyword_field__field4init))
|
|
192
|
+
shatteredDataclass.signatureReturnAnnotation = Make.Subscript(Make.Name('tuple'), Make.Tuple(shatteredDataclass.listAnnotations))
|
|
193
|
+
|
|
194
|
+
shatteredDataclass.ledger.update(*(dictionaryDeReConstruction[field].ledger for field in Official_fieldOrder))
|
|
195
|
+
shatteredDataclass.ledger.addImportFrom_asStr(logicalPathModule, dataclass_Identifier)
|
|
126
196
|
|
|
127
|
-
|
|
128
|
-
moduleImported: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
|
|
129
|
-
return moduleImported
|
|
197
|
+
return shatteredDataclass
|
|
130
198
|
|
|
131
199
|
@overload
|
|
132
|
-
def
|
|
200
|
+
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: Literal[True], *, pathFilename: PathLike[str] | PurePath | None = None, **keywordArguments: Any) -> Path: ...
|
|
133
201
|
@overload
|
|
134
|
-
def
|
|
135
|
-
def
|
|
202
|
+
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: Literal[False] = False, **keywordArguments: Any) -> ComputationState: ...
|
|
203
|
+
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: bool = False, *, pathFilename: PathLike[str] | PurePath | None = None, **keywordArguments: Any) -> ComputationState | Path:
|
|
136
204
|
"""
|
|
137
|
-
|
|
205
|
+
Initializes a computation state and optionally saves it to disk.
|
|
138
206
|
|
|
139
|
-
This function initializes a computation state
|
|
140
|
-
sets up the initial counting configuration, and can optionally save the state to a pickle file.
|
|
207
|
+
This function initializes a computation state using the source algorithm.
|
|
141
208
|
|
|
142
|
-
|
|
143
|
-
listDimensions: List of integers representing the dimensions of the map to be folded.
|
|
144
|
-
writeJob (True): Whether to save the state to disk.
|
|
145
|
-
**keywordArguments: Additional keyword arguments to pass to the computation state initialization.
|
|
209
|
+
Hint: If you want an uninitialized state, call `outfitCountFolds` directly.
|
|
146
210
|
|
|
211
|
+
Parameters:
|
|
212
|
+
mapShape: List of integers representing the dimensions of the map to be folded.
|
|
213
|
+
writeJob (False): Whether to save the state to disk.
|
|
214
|
+
pathFilename (getPathFilenameFoldsTotal.pkl): The path and filename to save the state. If None, uses a default path.
|
|
215
|
+
**keywordArguments: computationDivisions:int|str|None=None,concurrencyLimit:int=1.
|
|
147
216
|
Returns:
|
|
148
217
|
stateUniversal|pathFilenameJob: The computation state for the map folding calculations, or
|
|
149
218
|
the path to the saved state file if writeJob is True.
|
|
150
219
|
"""
|
|
151
|
-
mapShape = validateListDimensions(listDimensions)
|
|
152
220
|
stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
|
|
153
221
|
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
stateUniversal = moduleSource.countInitialize(stateUniversal)
|
|
222
|
+
initializeState = importLogicalPath2Callable(The.logicalPathModuleSourceAlgorithm, The.sourceCallableInitialize)
|
|
223
|
+
stateUniversal = initializeState(stateUniversal)
|
|
157
224
|
|
|
158
225
|
if not writeJob:
|
|
159
226
|
return stateUniversal
|
|
160
227
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
228
|
+
if pathFilename:
|
|
229
|
+
pathFilenameJob = Path(pathFilename)
|
|
230
|
+
pathFilenameJob.parent.mkdir(parents=True, exist_ok=True)
|
|
231
|
+
else:
|
|
232
|
+
pathFilenameJob = getPathFilenameFoldsTotal(stateUniversal.mapShape).with_suffix('.pkl')
|
|
166
233
|
|
|
167
234
|
pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
|
|
168
235
|
return pathFilenameJob
|