mapFolding 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +7 -60
- mapFolding/basecamp.py +15 -13
- mapFolding/beDRY.py +4 -36
- mapFolding/dataBaskets.py +24 -2
- mapFolding/datatypes.py +0 -3
- mapFolding/{toolboxFilesystem.py → filesystemToolkit.py} +3 -3
- mapFolding/oeis.py +3 -5
- mapFolding/someAssemblyRequired/RecipeJob.py +8 -116
- mapFolding/someAssemblyRequired/Z0Z_makeAllModules.py +492 -0
- mapFolding/someAssemblyRequired/__init__.py +5 -31
- mapFolding/someAssemblyRequired/_toolIfThis.py +5 -6
- mapFolding/someAssemblyRequired/{_toolboxContainers.py → _toolkitContainers.py} +6 -127
- mapFolding/someAssemblyRequired/infoBooth.py +70 -0
- mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +13 -12
- mapFolding/someAssemblyRequired/{toolboxNumba.py → toolkitNumba.py} +2 -44
- mapFolding/someAssemblyRequired/transformationTools.py +16 -174
- mapFolding/syntheticModules/countParallel.py +98 -0
- mapFolding/syntheticModules/dataPacking.py +1 -1
- mapFolding/theSSOT.py +12 -246
- {mapfolding-0.11.1.dist-info → mapfolding-0.11.3.dist-info}/METADATA +16 -11
- mapfolding-0.11.3.dist-info/RECORD +53 -0
- {mapfolding-0.11.1.dist-info → mapfolding-0.11.3.dist-info}/WHEEL +1 -1
- tests/conftest.py +2 -79
- tests/test_computations.py +12 -19
- tests/test_filesystem.py +1 -2
- tests/test_other.py +1 -1
- tests/test_tasks.py +3 -4
- mapFolding/someAssemblyRequired/Z0Z_makeSomeModules.py +0 -325
- mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +0 -314
- mapFolding/syntheticModules/numbaCount.py +0 -201
- mapFolding/theDao.py +0 -243
- mapfolding-0.11.1.dist-info/RECORD +0 -54
- {mapfolding-0.11.1.dist-info → mapfolding-0.11.3.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.11.1.dist-info → mapfolding-0.11.3.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.11.1.dist-info → mapfolding-0.11.3.dist-info}/top_level.txt +0 -0
|
@@ -18,137 +18,18 @@ The containers work in conjunction with transformation tools that manipulate the
|
|
|
18
18
|
specific optimizations and transformations.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
+
from astToolkit import ast_Identifier, ClassIsAndAttribute, DOT, LedgerOfImports, Make, NodeTourist, str_nameDOTname, Then
|
|
21
22
|
from collections.abc import Callable
|
|
22
23
|
from copy import deepcopy
|
|
23
|
-
from mapFolding.someAssemblyRequired import
|
|
24
|
-
from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3, The
|
|
25
|
-
from pathlib import Path, PurePosixPath
|
|
24
|
+
from mapFolding.someAssemblyRequired import IfThis, raiseIfNoneGitHubIssueNumber3
|
|
26
25
|
from typing import Any, cast
|
|
27
26
|
import ast
|
|
28
27
|
import dataclasses
|
|
29
28
|
|
|
30
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
31
|
-
@dataclasses.dataclass
|
|
32
|
-
class RecipeSynthesizeFlow:
|
|
33
|
-
"""
|
|
34
|
-
Configure the generation of new modules, including Numba-accelerated code modules.
|
|
35
|
-
|
|
36
|
-
RecipeSynthesizeFlow defines the complete blueprint for transforming an original Python algorithm into an optimized,
|
|
37
|
-
accelerated implementation. It specifies:
|
|
38
|
-
|
|
39
|
-
1. Source code locations and identifiers.
|
|
40
|
-
2. Target code locations and identifiers.
|
|
41
|
-
3. Naming conventions for generated modules and functions.
|
|
42
|
-
4. File system paths for output files.
|
|
43
|
-
5. Import relationships between components.
|
|
44
|
-
|
|
45
|
-
This configuration class serves as a single source of truth for the code generation process, ensuring consistency
|
|
46
|
-
across all generated artifacts while enabling customization of the transformation assembly line.
|
|
47
|
-
|
|
48
|
-
The transformation process uses this configuration to extract functions from the source module, transform them
|
|
49
|
-
according to optimization rules, and output properly structured optimized modules with all necessary imports.
|
|
50
|
-
"""
|
|
51
|
-
# ========================================
|
|
52
|
-
# Source
|
|
53
|
-
source_astModule: ast.Module = parseLogicalPath2astModule(The.logicalPathModuleSourceAlgorithm)
|
|
54
|
-
"""AST of the source algorithm module containing the original implementation."""
|
|
55
|
-
|
|
56
|
-
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
57
|
-
sourceCallableDispatcher: ast_Identifier = The.sourceCallableDispatcher
|
|
58
|
-
sourceCallableInitialize: ast_Identifier = The.sourceCallableInitialize
|
|
59
|
-
sourceCallableParallel: ast_Identifier = The.sourceCallableParallel
|
|
60
|
-
sourceCallableSequential: ast_Identifier = The.sourceCallableSequential
|
|
61
|
-
|
|
62
|
-
sourceDataclassIdentifier: ast_Identifier = The.dataclassIdentifier
|
|
63
|
-
sourceDataclassInstance: ast_Identifier = The.dataclassInstance
|
|
64
|
-
sourceDataclassInstanceTaskDistribution: ast_Identifier = The.dataclassInstanceTaskDistribution
|
|
65
|
-
sourceLogicalPathModuleDataclass: str_nameDOTname = The.logicalPathModuleDataclass
|
|
66
|
-
|
|
67
|
-
sourceConcurrencyManagerNamespace = The.sourceConcurrencyManagerNamespace
|
|
68
|
-
sourceConcurrencyManagerIdentifier = The.sourceConcurrencyManagerIdentifier
|
|
69
|
-
|
|
70
|
-
# ========================================
|
|
71
|
-
# Logical identifiers (as opposed to physical identifiers)
|
|
72
|
-
# ========================================
|
|
73
|
-
# Package ================================
|
|
74
|
-
packageIdentifier: ast_Identifier | None = The.packageName
|
|
75
|
-
|
|
76
|
-
# Qualified logical path ================================
|
|
77
|
-
logicalPathModuleDataclass: str_nameDOTname = sourceLogicalPathModuleDataclass
|
|
78
|
-
logicalPathFlowRoot: ast_Identifier | None = 'syntheticModules'
|
|
79
|
-
""" `logicalPathFlowRoot` likely corresponds to a physical filesystem directory."""
|
|
80
|
-
|
|
81
|
-
# Module ================================
|
|
82
|
-
moduleDispatcher: ast_Identifier = 'numbaCount'
|
|
83
|
-
moduleInitialize: ast_Identifier = moduleDispatcher
|
|
84
|
-
moduleParallel: ast_Identifier = moduleDispatcher
|
|
85
|
-
moduleSequential: ast_Identifier = moduleDispatcher
|
|
86
|
-
|
|
87
|
-
# Function ================================
|
|
88
|
-
callableDispatcher: ast_Identifier = sourceCallableDispatcher
|
|
89
|
-
callableInitialize: ast_Identifier = sourceCallableInitialize
|
|
90
|
-
callableParallel: ast_Identifier = sourceCallableParallel
|
|
91
|
-
callableSequential: ast_Identifier = sourceCallableSequential
|
|
92
|
-
concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
|
|
93
|
-
concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier
|
|
94
|
-
dataclassIdentifier: ast_Identifier = sourceDataclassIdentifier
|
|
95
|
-
|
|
96
|
-
# Variable ================================
|
|
97
|
-
dataclassInstance: ast_Identifier = sourceDataclassInstance
|
|
98
|
-
dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
|
|
99
|
-
|
|
100
|
-
removeDataclassDispatcher: bool = False
|
|
101
|
-
removeDataclassInitialize: bool = False
|
|
102
|
-
removeDataclassParallel: bool = True
|
|
103
|
-
removeDataclassSequential: bool = True
|
|
104
|
-
# ========================================
|
|
105
|
-
# Computed
|
|
106
|
-
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
107
|
-
# theFormatStrModuleSynthetic = "{packageFlow}Count"
|
|
108
|
-
# theFormatStrModuleForCallableSynthetic = theFormatStrModuleSynthetic + "_{callableTarget}"
|
|
109
|
-
# theModuleDispatcherSynthetic: ast_Identifier = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=The.sourceCallableDispatcher)
|
|
110
|
-
# theLogicalPathModuleDispatcherSynthetic: str = '.'.join([The.packageName, The.moduleOfSyntheticModules, theModuleDispatcherSynthetic])
|
|
111
|
-
# logicalPathModuleDispatcher: str = '.'.join([Z0Z_flowLogicalPathRoot, moduleDispatcher])
|
|
112
|
-
|
|
113
|
-
# ========================================
|
|
114
|
-
# Filesystem (names of physical objects)
|
|
115
|
-
pathPackage: PurePosixPath | None = PurePosixPath(The.pathPackage)
|
|
116
|
-
fileExtension: str = The.fileExtension
|
|
117
|
-
|
|
118
|
-
def _makePathFilename(self, filenameStem: str,
|
|
119
|
-
pathRoot: PurePosixPath | None = None,
|
|
120
|
-
logicalPathINFIX: str_nameDOTname | None = None,
|
|
121
|
-
fileExtension: str | None = None,
|
|
122
|
-
) -> PurePosixPath:
|
|
123
|
-
"""filenameStem: (hint: the name of the logical module)"""
|
|
124
|
-
if pathRoot is None:
|
|
125
|
-
pathRoot = self.pathPackage or PurePosixPath(Path.cwd())
|
|
126
|
-
if logicalPathINFIX:
|
|
127
|
-
whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
|
|
128
|
-
pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
|
|
129
|
-
if fileExtension is None:
|
|
130
|
-
fileExtension = self.fileExtension
|
|
131
|
-
filename: str = filenameStem + fileExtension
|
|
132
|
-
return pathRoot.joinpath(filename)
|
|
133
|
-
|
|
134
|
-
@property
|
|
135
|
-
def pathFilenameDispatcher(self) -> PurePosixPath:
|
|
136
|
-
return self._makePathFilename(filenameStem=self.moduleDispatcher, logicalPathINFIX=self.logicalPathFlowRoot)
|
|
137
|
-
@property
|
|
138
|
-
def pathFilenameInitialize(self) -> PurePosixPath:
|
|
139
|
-
return self._makePathFilename(filenameStem=self.moduleInitialize, logicalPathINFIX=self.logicalPathFlowRoot)
|
|
140
|
-
@property
|
|
141
|
-
def pathFilenameParallel(self) -> PurePosixPath:
|
|
142
|
-
return self._makePathFilename(filenameStem=self.moduleParallel, logicalPathINFIX=self.logicalPathFlowRoot)
|
|
143
|
-
@property
|
|
144
|
-
def pathFilenameSequential(self) -> PurePosixPath:
|
|
145
|
-
return self._makePathFilename(filenameStem=self.moduleSequential, logicalPathINFIX=self.logicalPathFlowRoot)
|
|
146
|
-
|
|
147
29
|
dummyAssign = Make.Assign([Make.Name("dummyTarget")], Make.Constant(None))
|
|
148
30
|
dummySubscript = Make.Subscript(Make.Name("dummy"), Make.Name("slice"))
|
|
149
31
|
dummyTuple = Make.Tuple([Make.Name("dummyElement")])
|
|
150
32
|
|
|
151
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
152
33
|
@dataclasses.dataclass
|
|
153
34
|
class ShatteredDataclass:
|
|
154
35
|
countingVariableAnnotation: ast.expr
|
|
@@ -252,14 +133,12 @@ class DeReConstructField2ast:
|
|
|
252
133
|
self.ast_keyword_field__field = Make.keyword(self.name, self.astName)
|
|
253
134
|
self.ast_nameDOTname = Make.Attribute(Make.Name(dataclassesDOTdataclassInstance_Identifier), self.name)
|
|
254
135
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
findThis=findThis
|
|
259
|
-
, doThat=Then.extractIt(DOT.annotation)
|
|
136
|
+
sherpa: ast.expr = NodeTourist( # type: ignore
|
|
137
|
+
findThis=ClassIsAndAttribute.targetIs(ast.AnnAssign, IfThis.isName_Identifier(self.name))
|
|
138
|
+
, doThat=Then.extractIt(DOT.annotation) # type: ignore
|
|
260
139
|
).captureLastMatch(dataclassClassDef)
|
|
261
140
|
|
|
262
|
-
if sherpa is None: raise raiseIfNoneGitHubIssueNumber3
|
|
141
|
+
if sherpa is None: raise raiseIfNoneGitHubIssueNumber3 # type: ignore
|
|
263
142
|
else: self.astAnnotation = sherpa
|
|
264
143
|
|
|
265
144
|
self.ast_argAnnotated = Make.arg(self.name, self.astAnnotation)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from mapFolding import PackageSettings
|
|
2
|
+
import dataclasses
|
|
3
|
+
|
|
4
|
+
@dataclasses.dataclass
|
|
5
|
+
class PackageInformation(PackageSettings):
|
|
6
|
+
callableDispatcher: str = 'doTheNeedful'
|
|
7
|
+
"""Name of the function within the dispatcher module that will be called."""
|
|
8
|
+
|
|
9
|
+
# "Evaluate When Packaging" and "Evaluate When Installing"
|
|
10
|
+
# https://github.com/hunterhogan/mapFolding/issues/18
|
|
11
|
+
dataclassIdentifier: str = dataclasses.field(default='ComputationState', metadata={'evaluateWhen': 'packaging'})
|
|
12
|
+
"""Name of the dataclass used to track computation state."""
|
|
13
|
+
|
|
14
|
+
dataclassInstance: str = dataclasses.field(default='state', metadata={'evaluateWhen': 'packaging'})
|
|
15
|
+
"""Default variable name for instances of the computation state dataclass."""
|
|
16
|
+
|
|
17
|
+
dataclassInstanceTaskDistributionSuffix: str = dataclasses.field(default='Parallel', metadata={'evaluateWhen': 'packaging'})
|
|
18
|
+
"""Suffix added to dataclassInstance for parallel task distribution."""
|
|
19
|
+
|
|
20
|
+
dataclassModule: str = dataclasses.field(default='beDRY', metadata={'evaluateWhen': 'packaging'})
|
|
21
|
+
"""Module containing the computation state dataclass definition."""
|
|
22
|
+
|
|
23
|
+
datatypePackage: str = dataclasses.field(default='numpy', metadata={'evaluateWhen': 'packaging'})
|
|
24
|
+
"""Package providing the numeric data types used in computation."""
|
|
25
|
+
|
|
26
|
+
sourceAlgorithm: str = dataclasses.field(default='theDao', metadata={'evaluateWhen': 'packaging'})
|
|
27
|
+
"""Module containing the reference implementation of the algorithm."""
|
|
28
|
+
|
|
29
|
+
sourceCallableDispatcher: str = dataclasses.field(default='doTheNeedful', metadata={'evaluateWhen': 'packaging'})
|
|
30
|
+
"""Name of the function that dispatches computation in the source algorithm."""
|
|
31
|
+
|
|
32
|
+
sourceCallableInitialize: str = dataclasses.field(default='countInitialize', metadata={'evaluateWhen': 'packaging'})
|
|
33
|
+
"""Name of the function that initializes computation in the source algorithm."""
|
|
34
|
+
|
|
35
|
+
sourceCallableParallel: str = dataclasses.field(default='countParallel', metadata={'evaluateWhen': 'packaging'})
|
|
36
|
+
"""Name of the function that performs parallel computation in the source algorithm."""
|
|
37
|
+
|
|
38
|
+
sourceCallableSequential: str = dataclasses.field(default='countSequential', metadata={'evaluateWhen': 'packaging'})
|
|
39
|
+
"""Name of the function that performs sequential computation in the source algorithm."""
|
|
40
|
+
|
|
41
|
+
sourceConcurrencyManagerIdentifier: str = dataclasses.field(default='submit', metadata={'evaluateWhen': 'packaging'})
|
|
42
|
+
"""Method name used to submit tasks to the concurrency manager."""
|
|
43
|
+
|
|
44
|
+
sourceConcurrencyManagerNamespace: str = dataclasses.field(default='concurrencyManager', metadata={'evaluateWhen': 'packaging'})
|
|
45
|
+
"""Variable name used for the concurrency manager instance."""
|
|
46
|
+
|
|
47
|
+
sourceConcurrencyPackage: str = dataclasses.field(default='multiprocessing', metadata={'evaluateWhen': 'packaging'})
|
|
48
|
+
"""Default package used for concurrency in the source algorithm."""
|
|
49
|
+
|
|
50
|
+
dataclassInstanceTaskDistribution: str = dataclasses.field(default=None, metadata={'evaluateWhen': 'packaging'}) # pyright: ignore[reportAssignmentType]
|
|
51
|
+
"""Variable name for the parallel distribution instance of the computation state."""
|
|
52
|
+
|
|
53
|
+
logicalPathModuleDataclass: str = dataclasses.field(default=None, metadata={'evaluateWhen': 'packaging'}) # pyright: ignore[reportAssignmentType]
|
|
54
|
+
"""Fully qualified import path to the module containing the computation state dataclass."""
|
|
55
|
+
|
|
56
|
+
logicalPathModuleSourceAlgorithm: str = dataclasses.field(default=None, metadata={'evaluateWhen': 'packaging'}) # pyright: ignore[reportAssignmentType]
|
|
57
|
+
"""Fully qualified import path to the module containing the source algorithm."""
|
|
58
|
+
|
|
59
|
+
def __post_init__(self) -> None:
|
|
60
|
+
if self.dataclassInstanceTaskDistribution is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
61
|
+
self.dataclassInstanceTaskDistribution = self.dataclassInstance + self.dataclassInstanceTaskDistributionSuffix
|
|
62
|
+
|
|
63
|
+
if self.logicalPathModuleDataclass is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
64
|
+
self.logicalPathModuleDataclass = '.'.join([self.packageName, self.dataclassModule])
|
|
65
|
+
if self.logicalPathModuleSourceAlgorithm is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
66
|
+
self.logicalPathModuleSourceAlgorithm = '.'.join([self.packageName, self.sourceAlgorithm])
|
|
67
|
+
|
|
68
|
+
class raiseIfNoneGitHubIssueNumber3(Exception): pass
|
|
69
|
+
|
|
70
|
+
packageInformation = PackageInformation()
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
from mapFolding import getPathFilenameFoldsTotal,
|
|
2
|
-
from mapFolding.someAssemblyRequired import
|
|
1
|
+
from mapFolding import getPathFilenameFoldsTotal, MapFoldingState
|
|
2
|
+
from mapFolding.someAssemblyRequired import IfThis, packageInformation, raiseIfNoneGitHubIssueNumber3
|
|
3
|
+
from astToolkit import (
|
|
3
4
|
ast_Identifier,
|
|
4
5
|
Be,
|
|
6
|
+
ClassIsAndAttribute,
|
|
5
7
|
extractFunctionDef,
|
|
6
|
-
IfThis,
|
|
7
8
|
IngredientsFunction,
|
|
8
9
|
IngredientsModule,
|
|
9
10
|
LedgerOfImports,
|
|
@@ -12,12 +13,11 @@ from mapFolding.someAssemblyRequired import (
|
|
|
12
13
|
NodeTourist,
|
|
13
14
|
str_nameDOTname,
|
|
14
15
|
Then,
|
|
15
|
-
write_astModule,
|
|
16
16
|
)
|
|
17
|
+
from astToolkit.transformationTools import write_astModule
|
|
17
18
|
from mapFolding.someAssemblyRequired.RecipeJob import RecipeJobTheorem2Numba
|
|
18
|
-
from mapFolding.someAssemblyRequired.
|
|
19
|
+
from mapFolding.someAssemblyRequired.toolkitNumba import decorateCallableWithNumba, parametersNumbaLight, SpicesJobNumba
|
|
19
20
|
from mapFolding.syntheticModules.initializeCount import initializeGroupsOfFolds
|
|
20
|
-
from mapFolding.dataBaskets import MapFoldingState
|
|
21
21
|
from pathlib import PurePosixPath
|
|
22
22
|
from typing import cast, NamedTuple
|
|
23
23
|
from Z0Z_tools import autoDecodingRLE
|
|
@@ -77,7 +77,7 @@ if __name__ == '__main__':
|
|
|
77
77
|
ast_argNumbaProgress = ast.arg(arg=spices.numbaProgressBarIdentifier, annotation=ast.Name(id=numba_progressPythonClass, ctx=ast.Load()))
|
|
78
78
|
ingredientsFunction.astFunctionDef.args.args.append(ast_argNumbaProgress)
|
|
79
79
|
|
|
80
|
-
findThis =
|
|
80
|
+
findThis = ClassIsAndAttribute.targetIs(ast.AugAssign, IfThis.isName_Identifier(job.shatteredDataclass.countingVariableName.id))
|
|
81
81
|
doThat = Then.replaceWith(Make.Expr(Make.Call(Make.Attribute(Make.Name(spices.numbaProgressBarIdentifier),'update'),[Make.Constant(1)])))
|
|
82
82
|
countWithProgressBar = NodeChanger(findThis, doThat)
|
|
83
83
|
countWithProgressBar.visit(ingredientsFunction.astFunctionDef)
|
|
@@ -161,10 +161,11 @@ def makeJobNumba(job: RecipeJobTheorem2Numba, spices: SpicesJobNumba) -> None:
|
|
|
161
161
|
ingredientsCount: IngredientsFunction = IngredientsFunction(astFunctionDef, LedgerOfImports())
|
|
162
162
|
|
|
163
163
|
# Remove `foldGroups` and any other unused statements, so you can dynamically determine which variables are not used
|
|
164
|
-
findThis =
|
|
164
|
+
findThis = ClassIsAndAttribute.targetsIs(ast.Assign, lambda list_expr: any([IfThis.isSubscript_Identifier('foldGroups')(node) for node in list_expr ]))
|
|
165
|
+
# findThis = IfThis.isAssignAndTargets0Is(IfThis.isSubscript_Identifier('foldGroups'))
|
|
165
166
|
doThat = Then.removeIt
|
|
166
167
|
remove_foldGroups = NodeChanger(findThis, doThat)
|
|
167
|
-
remove_foldGroups.visit(ingredientsCount.astFunctionDef)
|
|
168
|
+
# remove_foldGroups.visit(ingredientsCount.astFunctionDef)
|
|
168
169
|
|
|
169
170
|
# replace identifiers with static values with their values, so you can dynamically determine which variables are not used
|
|
170
171
|
list_IdentifiersStaticValues = list_IdentifiersStaticValuesHARDCODED
|
|
@@ -183,7 +184,7 @@ def makeJobNumba(job: RecipeJobTheorem2Numba, spices: SpicesJobNumba) -> None:
|
|
|
183
184
|
if __name__ == '__main__':
|
|
184
185
|
import time
|
|
185
186
|
timeStart = time.perf_counter()
|
|
186
|
-
foldsTotal = {job.countCallable}() * {job.state.leavesTotal}
|
|
187
|
+
foldsTotal = int({job.countCallable}() * {job.state.leavesTotal})
|
|
187
188
|
print(time.perf_counter() - timeStart)
|
|
188
189
|
print('\\nmap {job.state.mapShape} =', foldsTotal)
|
|
189
190
|
writeStream = open('{job.pathFilenameFoldsTotal.as_posix()}', 'w')
|
|
@@ -260,13 +261,13 @@ if __name__ == '__main__':
|
|
|
260
261
|
"""
|
|
261
262
|
|
|
262
263
|
if __name__ == '__main__':
|
|
263
|
-
mapShape = (
|
|
264
|
+
mapShape = (2,4)
|
|
264
265
|
state = MapFoldingState(mapShape)
|
|
265
266
|
state = initializeGroupsOfFolds(state)
|
|
266
267
|
# foldsTotalEstimated = getFoldsTotalKnown(state.mapShape) // state.leavesTotal
|
|
267
268
|
# foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
|
|
268
269
|
foldsTotalEstimated = 0
|
|
269
|
-
pathModule = PurePosixPath(
|
|
270
|
+
pathModule = PurePosixPath(packageInformation.pathPackage, 'jobs')
|
|
270
271
|
pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(state.mapShape, pathModule))
|
|
271
272
|
aJob = RecipeJobTheorem2Numba(state, foldsTotalEstimated, pathModule=pathModule, pathFilenameFoldsTotal=pathFilenameFoldsTotal)
|
|
272
273
|
spices = SpicesJobNumba(useNumbaProgressBar=False, parametersNumba=parametersNumbaLight)
|
|
@@ -17,16 +17,13 @@ performance improvements while preserving code semantics and correctness.
|
|
|
17
17
|
|
|
18
18
|
from collections.abc import Callable, Sequence
|
|
19
19
|
from mapFolding import NotRequired, TypedDict
|
|
20
|
-
from
|
|
21
|
-
from
|
|
20
|
+
from astToolkit import ast_Identifier, IngredientsFunction, Make, str_nameDOTname
|
|
21
|
+
from astToolkit.transformationTools import write_astModule
|
|
22
22
|
from numba.core.compiler import CompilerBase as numbaCompilerBase
|
|
23
23
|
from typing import Any, cast, Final
|
|
24
24
|
import ast
|
|
25
25
|
import dataclasses
|
|
26
26
|
|
|
27
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
28
|
-
theNumbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
|
|
29
|
-
|
|
30
27
|
class ParametersNumba(TypedDict):
|
|
31
28
|
_dbg_extend_lifetimes: NotRequired[bool]
|
|
32
29
|
_dbg_optnone: NotRequired[bool]
|
|
@@ -128,47 +125,8 @@ def decorateCallableWithNumba(ingredientsFunction: IngredientsFunction, paramete
|
|
|
128
125
|
ingredientsFunction.astFunctionDef.decorator_list = [astDecorator]
|
|
129
126
|
return ingredientsFunction
|
|
130
127
|
|
|
131
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
132
128
|
@dataclasses.dataclass
|
|
133
129
|
class SpicesJobNumba:
|
|
134
130
|
useNumbaProgressBar: bool = True
|
|
135
131
|
numbaProgressBarIdentifier: ast_Identifier = 'ProgressBarGroupsOfFolds'
|
|
136
132
|
parametersNumba: ParametersNumba = dataclasses.field(default_factory=ParametersNumba) # type: ignore
|
|
137
|
-
|
|
138
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
139
|
-
def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow) -> None:
|
|
140
|
-
"""
|
|
141
|
-
Transform standard Python algorithm code into optimized Numba implementations.
|
|
142
|
-
|
|
143
|
-
This function implements the complete transformation assembly line that converts
|
|
144
|
-
a conventional Python implementation into a high-performance Numba-accelerated
|
|
145
|
-
version. The process includes:
|
|
146
|
-
|
|
147
|
-
1. Extracting core algorithm functions from the source module
|
|
148
|
-
2. Inlining function calls to create self-contained implementations
|
|
149
|
-
3. Transforming dataclass access patterns for Numba compatibility
|
|
150
|
-
4. Applying appropriate Numba decorators with optimization settings
|
|
151
|
-
5. Generating a unified module with sequential and parallel implementations
|
|
152
|
-
6. Writing the transformed code to the filesystem with properly managed imports
|
|
153
|
-
|
|
154
|
-
The transformation preserves the logical structure and semantics of the original
|
|
155
|
-
implementation while making it compatible with Numba's constraints and
|
|
156
|
-
optimization capabilities. This creates a bridge between the general-purpose
|
|
157
|
-
implementation and the highly-optimized version needed for production use.
|
|
158
|
-
|
|
159
|
-
Parameters:
|
|
160
|
-
numbaFlow: Configuration object that specifies all aspects of the
|
|
161
|
-
transformation process, including source and target locations,
|
|
162
|
-
function and variable names, and output paths.
|
|
163
|
-
"""
|
|
164
|
-
|
|
165
|
-
ingredientsModuleNumbaUnified = makeNewFlow(numbaFlow)
|
|
166
|
-
|
|
167
|
-
# numba decorators =========================================
|
|
168
|
-
ingredientsModuleNumbaUnified.listIngredientsFunctions[1] = decorateCallableWithNumba(ingredientsModuleNumbaUnified.listIngredientsFunctions[1])
|
|
169
|
-
ingredientsModuleNumbaUnified.listIngredientsFunctions[2] = decorateCallableWithNumba(ingredientsModuleNumbaUnified.listIngredientsFunctions[2])
|
|
170
|
-
|
|
171
|
-
write_astModule(ingredientsModuleNumbaUnified, numbaFlow.pathFilenameDispatcher, numbaFlow.packageIdentifier)
|
|
172
|
-
|
|
173
|
-
if __name__ == '__main__':
|
|
174
|
-
makeNumbaFlow(theNumbaFlow)
|
|
@@ -18,79 +18,27 @@ readable, maintainable implementations to highly optimized versions while preser
|
|
|
18
18
|
logical structure and correctness.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
-
from
|
|
22
|
-
from mapFolding.beDRY import outfitCountFolds
|
|
21
|
+
from astToolkit import ClassIsAndAttribute
|
|
23
22
|
from mapFolding.someAssemblyRequired import (
|
|
23
|
+
DeReConstructField2ast,
|
|
24
|
+
IfThis,
|
|
25
|
+
ShatteredDataclass,
|
|
26
|
+
)
|
|
27
|
+
from astToolkit import(
|
|
24
28
|
ast_Identifier,
|
|
25
|
-
astModuleToIngredientsFunction,
|
|
26
29
|
Be,
|
|
27
|
-
DeReConstructField2ast,
|
|
28
30
|
extractClassDef,
|
|
29
|
-
Grab,
|
|
30
|
-
IfThis,
|
|
31
|
-
importLogicalPath2Callable,
|
|
32
31
|
IngredientsFunction,
|
|
33
|
-
IngredientsModule,
|
|
34
|
-
inlineFunctionDef,
|
|
35
|
-
LedgerOfImports,
|
|
36
32
|
Make,
|
|
37
33
|
NodeChanger,
|
|
38
|
-
NodeTourist,
|
|
39
34
|
parseLogicalPath2astModule,
|
|
40
|
-
RecipeSynthesizeFlow,
|
|
41
|
-
removeUnusedParameters,
|
|
42
|
-
ShatteredDataclass,
|
|
43
35
|
str_nameDOTname,
|
|
44
36
|
Then,
|
|
45
|
-
unparseFindReplace,
|
|
46
37
|
)
|
|
47
|
-
from
|
|
48
|
-
from
|
|
49
|
-
from os import PathLike
|
|
50
|
-
from pathlib import Path, PurePath
|
|
51
|
-
from typing import Any, Literal, overload
|
|
38
|
+
from astToolkit.transformationTools import unparseFindReplace
|
|
39
|
+
from Z0Z_tools import importLogicalPath2Callable
|
|
52
40
|
import ast
|
|
53
41
|
import dataclasses
|
|
54
|
-
import pickle
|
|
55
|
-
|
|
56
|
-
@overload
|
|
57
|
-
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: Literal[True], *, pathFilename: PathLike[str] | PurePath | None = None, **keywordArguments: Any) -> Path: ...
|
|
58
|
-
@overload
|
|
59
|
-
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: Literal[False] = False, **keywordArguments: Any) -> ComputationState: ...
|
|
60
|
-
def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: bool = False, *, pathFilename: PathLike[str] | PurePath | None = None, **keywordArguments: Any) -> ComputationState | Path:
|
|
61
|
-
"""
|
|
62
|
-
Initializes a computation state and optionally saves it to disk.
|
|
63
|
-
|
|
64
|
-
This function initializes a computation state using the source algorithm.
|
|
65
|
-
|
|
66
|
-
Hint: If you want an uninitialized state, call `outfitCountFolds` directly.
|
|
67
|
-
|
|
68
|
-
Parameters:
|
|
69
|
-
mapShape: List of integers representing the dimensions of the map to be folded.
|
|
70
|
-
writeJob (False): Whether to save the state to disk.
|
|
71
|
-
pathFilename (getPathFilenameFoldsTotal.pkl): The path and filename to save the state. If None, uses a default path.
|
|
72
|
-
**keywordArguments: computationDivisions:int|str|None=None,concurrencyLimit:int=1.
|
|
73
|
-
Returns:
|
|
74
|
-
stateUniversal|pathFilenameJob: The computation state for the map folding calculations, or
|
|
75
|
-
the path to the saved state file if writeJob is True.
|
|
76
|
-
"""
|
|
77
|
-
stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
|
|
78
|
-
|
|
79
|
-
initializeState = importLogicalPath2Callable(The.logicalPathModuleSourceAlgorithm, The.sourceCallableInitialize)
|
|
80
|
-
stateUniversal = initializeState(stateUniversal)
|
|
81
|
-
|
|
82
|
-
if not writeJob:
|
|
83
|
-
return stateUniversal
|
|
84
|
-
|
|
85
|
-
if pathFilename:
|
|
86
|
-
pathFilenameJob = Path(pathFilename)
|
|
87
|
-
pathFilenameJob.parent.mkdir(parents=True, exist_ok=True)
|
|
88
|
-
else:
|
|
89
|
-
pathFilenameJob = getPathFilenameFoldsTotal(stateUniversal.mapShape).with_suffix('.pkl')
|
|
90
|
-
|
|
91
|
-
# Fix code scanning alert - Consider possible security implications associated with pickle module. #17
|
|
92
|
-
pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
|
|
93
|
-
return pathFilenameJob
|
|
94
42
|
|
|
95
43
|
def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier) -> ShatteredDataclass:
|
|
96
44
|
"""
|
|
@@ -116,8 +64,8 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclas
|
|
|
116
64
|
instance_Identifier: The variable name to use for the dataclass instance in generated code.
|
|
117
65
|
|
|
118
66
|
Returns:
|
|
119
|
-
A ShatteredDataclass containing AST representations of all dataclass components,
|
|
120
|
-
|
|
67
|
+
shatteredDataclass: A ShatteredDataclass containing AST representations of all dataclass components,
|
|
68
|
+
with imports, field definitions, annotations, and repackaging code.
|
|
121
69
|
|
|
122
70
|
Raises:
|
|
123
71
|
ValueError: If the dataclass cannot be found in the specified module or if no counting variable is identified in the dataclass.
|
|
@@ -136,7 +84,9 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclas
|
|
|
136
84
|
countingVariable = dictionaryDeReConstruction[aField.name].name
|
|
137
85
|
|
|
138
86
|
if countingVariable is None:
|
|
139
|
-
|
|
87
|
+
import warnings
|
|
88
|
+
warnings.warn(message=f"I could not find the counting variable in `{dataclass_Identifier = }` in `{logicalPathModule = }`.", category=UserWarning)
|
|
89
|
+
raise Exception
|
|
140
90
|
|
|
141
91
|
shatteredDataclass = ShatteredDataclass(
|
|
142
92
|
countingVariableAnnotation=dictionaryDeReConstruction[countingVariable].astAnnotation,
|
|
@@ -160,114 +110,6 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclas
|
|
|
160
110
|
return shatteredDataclass
|
|
161
111
|
|
|
162
112
|
# END of acceptable classes and functions ======================================================
|
|
163
|
-
def makeNewFlow(recipeFlow: RecipeSynthesizeFlow) -> IngredientsModule:
|
|
164
|
-
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
165
|
-
listAllIngredientsFunctions = [
|
|
166
|
-
(ingredientsInitialize := astModuleToIngredientsFunction(recipeFlow.source_astModule, recipeFlow.sourceCallableInitialize)),
|
|
167
|
-
(ingredientsParallel := astModuleToIngredientsFunction(recipeFlow.source_astModule, recipeFlow.sourceCallableParallel)),
|
|
168
|
-
(ingredientsSequential := astModuleToIngredientsFunction(recipeFlow.source_astModule, recipeFlow.sourceCallableSequential)),
|
|
169
|
-
(ingredientsDispatcher := astModuleToIngredientsFunction(recipeFlow.source_astModule, recipeFlow.sourceCallableDispatcher)),
|
|
170
|
-
]
|
|
171
|
-
|
|
172
|
-
# Inline functions ========================================================
|
|
173
|
-
# NOTE Replacements statements are based on the identifiers in the _source_, so operate on the source identifiers.
|
|
174
|
-
ingredientsInitialize.astFunctionDef = inlineFunctionDef(recipeFlow.sourceCallableInitialize, recipeFlow.source_astModule)
|
|
175
|
-
ingredientsParallel.astFunctionDef = inlineFunctionDef(recipeFlow.sourceCallableParallel, recipeFlow.source_astModule)
|
|
176
|
-
ingredientsSequential.astFunctionDef = inlineFunctionDef(recipeFlow.sourceCallableSequential, recipeFlow.source_astModule)
|
|
177
|
-
|
|
178
|
-
# assignRecipeIdentifiersToCallable. =============================
|
|
179
|
-
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
180
|
-
# How can I use dataclass settings as the SSOT for specific actions? https://github.com/hunterhogan/mapFolding/issues/16
|
|
181
|
-
# NOTE reminder: you are updating these `ast.Name` here (and not in a more general search) because this is a
|
|
182
|
-
# narrow search for `ast.Call` so you won't accidentally replace unrelated `ast.Name`.
|
|
183
|
-
listFindReplace = [(recipeFlow.sourceCallableDispatcher, recipeFlow.callableDispatcher),
|
|
184
|
-
(recipeFlow.sourceCallableInitialize, recipeFlow.callableInitialize),
|
|
185
|
-
(recipeFlow.sourceCallableParallel, recipeFlow.callableParallel),
|
|
186
|
-
(recipeFlow.sourceCallableSequential, recipeFlow.callableSequential),]
|
|
187
|
-
for ingredients in listAllIngredientsFunctions:
|
|
188
|
-
for source_Identifier, recipe_Identifier in listFindReplace:
|
|
189
|
-
updateCallName = NodeChanger(IfThis.isCall_Identifier(source_Identifier), Grab.funcAttribute(Then.replaceWith(Make.Name(recipe_Identifier))))
|
|
190
|
-
updateCallName.visit(ingredients.astFunctionDef)
|
|
191
|
-
|
|
192
|
-
ingredientsDispatcher.astFunctionDef.name = recipeFlow.callableDispatcher
|
|
193
|
-
ingredientsInitialize.astFunctionDef.name = recipeFlow.callableInitialize
|
|
194
|
-
ingredientsParallel.astFunctionDef.name = recipeFlow.callableParallel
|
|
195
|
-
ingredientsSequential.astFunctionDef.name = recipeFlow.callableSequential
|
|
196
|
-
|
|
197
|
-
# Assign identifiers per the recipe. ==============================
|
|
198
|
-
listFindReplace = [(recipeFlow.sourceDataclassInstance, recipeFlow.dataclassInstance),
|
|
199
|
-
(recipeFlow.sourceDataclassInstanceTaskDistribution, recipeFlow.dataclassInstanceTaskDistribution),
|
|
200
|
-
(recipeFlow.sourceConcurrencyManagerNamespace, recipeFlow.concurrencyManagerNamespace),]
|
|
201
|
-
for ingredients in listAllIngredientsFunctions:
|
|
202
|
-
for source_Identifier, recipe_Identifier in listFindReplace:
|
|
203
|
-
updateName = NodeChanger(IfThis.isName_Identifier(source_Identifier) , Grab.idAttribute(Then.replaceWith(recipe_Identifier)))
|
|
204
|
-
update_arg = NodeChanger(IfThis.isArgument_Identifier(source_Identifier), Grab.argAttribute(Then.replaceWith(recipe_Identifier)))
|
|
205
|
-
updateName.visit(ingredients.astFunctionDef)
|
|
206
|
-
update_arg.visit(ingredients.astFunctionDef)
|
|
207
|
-
|
|
208
|
-
updateConcurrencyManager = NodeChanger(IfThis.isCallAttributeNamespace_Identifier(recipeFlow.sourceConcurrencyManagerNamespace, recipeFlow.sourceConcurrencyManagerIdentifier)
|
|
209
|
-
, Grab.funcAttribute(Then.replaceWith(Make.Attribute(Make.Name(recipeFlow.concurrencyManagerNamespace), recipeFlow.concurrencyManagerIdentifier))))
|
|
210
|
-
updateConcurrencyManager.visit(ingredientsDispatcher.astFunctionDef)
|
|
211
|
-
|
|
212
|
-
# shatter Dataclass =======================================================
|
|
213
|
-
instance_Identifier = recipeFlow.dataclassInstance
|
|
214
|
-
getTheOtherRecord_damn = recipeFlow.dataclassInstanceTaskDistribution
|
|
215
|
-
shatteredDataclass = shatter_dataclassesDOTdataclass(recipeFlow.logicalPathModuleDataclass, recipeFlow.sourceDataclassIdentifier, instance_Identifier)
|
|
216
|
-
ingredientsDispatcher.imports.update(shatteredDataclass.imports)
|
|
217
|
-
|
|
218
|
-
# How can I use dataclass settings as the SSOT for specific actions? https://github.com/hunterhogan/mapFolding/issues/16
|
|
219
|
-
# Change callable parameters and Call to the callable at the same time ====
|
|
220
|
-
# sequentialCallable =========================================================
|
|
221
|
-
if recipeFlow.removeDataclassSequential:
|
|
222
|
-
ingredientsSequential = removeDataclassFromFunction(ingredientsSequential, shatteredDataclass)
|
|
223
|
-
ingredientsDispatcher = unpackDataclassCallFunctionRepackDataclass(ingredientsDispatcher, recipeFlow.callableSequential, shatteredDataclass)
|
|
224
|
-
|
|
225
|
-
if recipeFlow.removeDataclassInitialize:
|
|
226
|
-
ingredientsInitialize = removeDataclassFromFunction(ingredientsInitialize, shatteredDataclass)
|
|
227
|
-
ingredientsDispatcher = unpackDataclassCallFunctionRepackDataclass(ingredientsDispatcher, recipeFlow.callableInitialize, shatteredDataclass)
|
|
228
|
-
|
|
229
|
-
# parallelCallable =========================================================
|
|
230
|
-
if recipeFlow.removeDataclassParallel:
|
|
231
|
-
ingredientsParallel.astFunctionDef.args = Make.arguments(args=shatteredDataclass.list_argAnnotated4ArgumentsSpecification)
|
|
232
|
-
|
|
233
|
-
ingredientsParallel.astFunctionDef = unparseFindReplace(ingredientsParallel.astFunctionDef, shatteredDataclass.map_stateDOTfield2Name)
|
|
234
|
-
|
|
235
|
-
ingredientsParallel = removeUnusedParameters(ingredientsParallel)
|
|
236
|
-
|
|
237
|
-
list_argCuzMyBrainRefusesToThink = ingredientsParallel.astFunctionDef.args.args + ingredientsParallel.astFunctionDef.args.posonlyargs + ingredientsParallel.astFunctionDef.args.kwonlyargs
|
|
238
|
-
list_arg_arg: list[ast_Identifier] = [ast_arg.arg for ast_arg in list_argCuzMyBrainRefusesToThink]
|
|
239
|
-
|
|
240
|
-
listParameters = [parameter for parameter in shatteredDataclass.listName4Parameters if parameter.id in list_arg_arg]
|
|
241
|
-
|
|
242
|
-
replaceCall2concurrencyManager = NodeChanger(IfThis.isCallAttributeNamespace_Identifier(recipeFlow.concurrencyManagerNamespace, recipeFlow.concurrencyManagerIdentifier), Then.replaceWith(Make.Call(Make.Attribute(Make.Name(recipeFlow.concurrencyManagerNamespace), recipeFlow.concurrencyManagerIdentifier), [Make.Name(recipeFlow.callableParallel)] + listParameters)))
|
|
243
|
-
|
|
244
|
-
def getIt(astCallConcurrencyResult: list[ast.Call]) -> Callable[[ast.AST], ast.AST]:
|
|
245
|
-
# TODO I cannot remember why I made this function. It doesn't fit with how I normally do things.
|
|
246
|
-
def workhorse(node: ast.AST) -> ast.AST:
|
|
247
|
-
NodeTourist(Be.Call, Then.appendTo(astCallConcurrencyResult)).visit(node)
|
|
248
|
-
return node
|
|
249
|
-
return workhorse
|
|
250
|
-
|
|
251
|
-
# NOTE I am dissatisfied with this logic for many reasons, including that it requires separate NodeCollector and NodeReplacer instances.
|
|
252
|
-
astCallConcurrencyResult: list[ast.Call] = []
|
|
253
|
-
get_astCallConcurrencyResult = NodeTourist(IfThis.isAssignAndTargets0Is(IfThis.isSubscript_Identifier(getTheOtherRecord_damn)), getIt(astCallConcurrencyResult))
|
|
254
|
-
get_astCallConcurrencyResult.visit(ingredientsDispatcher.astFunctionDef)
|
|
255
|
-
replaceAssignParallelCallable = NodeChanger(IfThis.isAssignAndTargets0Is(IfThis.isSubscript_Identifier(getTheOtherRecord_damn)), Grab.valueAttribute(Then.replaceWith(astCallConcurrencyResult[0])))
|
|
256
|
-
replaceAssignParallelCallable.visit(ingredientsDispatcher.astFunctionDef)
|
|
257
|
-
changeReturnParallelCallable = NodeChanger(Be.Return, Then.replaceWith(Make.Return(shatteredDataclass.countingVariableName)))
|
|
258
|
-
ingredientsParallel.astFunctionDef.returns = shatteredDataclass.countingVariableAnnotation
|
|
259
|
-
|
|
260
|
-
unpack4parallelCallable = NodeChanger(IfThis.isAssignAndValueIs(IfThis.isCallAttributeNamespace_Identifier(recipeFlow.concurrencyManagerNamespace, recipeFlow.concurrencyManagerIdentifier)), Then.insertThisAbove(shatteredDataclass.listUnpack))
|
|
261
|
-
|
|
262
|
-
unpack4parallelCallable.visit(ingredientsDispatcher.astFunctionDef)
|
|
263
|
-
replaceCall2concurrencyManager.visit(ingredientsDispatcher.astFunctionDef)
|
|
264
|
-
changeReturnParallelCallable.visit(ingredientsParallel.astFunctionDef)
|
|
265
|
-
|
|
266
|
-
# Module-level transformations ===========================================================
|
|
267
|
-
ingredientsModuleNumbaUnified = IngredientsModule(ingredientsFunction=listAllIngredientsFunctions, imports=LedgerOfImports(recipeFlow.source_astModule))
|
|
268
|
-
ingredientsModuleNumbaUnified.removeImportFromModule('numpy')
|
|
269
|
-
|
|
270
|
-
return ingredientsModuleNumbaUnified
|
|
271
113
|
|
|
272
114
|
def removeDataclassFromFunction(ingredientsTarget: IngredientsFunction, shatteredDataclass: ShatteredDataclass) -> IngredientsFunction:
|
|
273
115
|
ingredientsTarget.astFunctionDef.args = Make.arguments(args=shatteredDataclass.list_argAnnotated4ArgumentsSpecification)
|
|
@@ -279,9 +121,9 @@ def removeDataclassFromFunction(ingredientsTarget: IngredientsFunction, shattere
|
|
|
279
121
|
|
|
280
122
|
def unpackDataclassCallFunctionRepackDataclass(ingredientsCaller: IngredientsFunction, targetCallableIdentifier: ast_Identifier, shatteredDataclass: ShatteredDataclass) -> IngredientsFunction:
|
|
281
123
|
astCallTargetCallable = Make.Call(Make.Name(targetCallableIdentifier), shatteredDataclass.listName4Parameters)
|
|
282
|
-
replaceAssignTargetCallable = NodeChanger(
|
|
283
|
-
unpack4targetCallable = NodeChanger(
|
|
284
|
-
repack4targetCallable = NodeChanger(
|
|
124
|
+
replaceAssignTargetCallable = NodeChanger(ClassIsAndAttribute.valueIs(ast.Assign, IfThis.isCall_Identifier(targetCallableIdentifier)), Then.replaceWith(Make.Assign([shatteredDataclass.fragments4AssignmentOrParameters], value=astCallTargetCallable)))
|
|
125
|
+
unpack4targetCallable = NodeChanger(ClassIsAndAttribute.valueIs(ast.Assign, IfThis.isCall_Identifier(targetCallableIdentifier)), Then.insertThisAbove(shatteredDataclass.listUnpack))
|
|
126
|
+
repack4targetCallable = NodeChanger(ClassIsAndAttribute.valueIs(ast.Assign, IfThis.isCall_Identifier(targetCallableIdentifier)), Then.insertThisBelow([shatteredDataclass.repack]))
|
|
285
127
|
replaceAssignTargetCallable.visit(ingredientsCaller.astFunctionDef)
|
|
286
128
|
unpack4targetCallable.visit(ingredientsCaller.astFunctionDef)
|
|
287
129
|
repack4targetCallable.visit(ingredientsCaller.astFunctionDef)
|