mapFolding 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. mapFolding/__init__.py +6 -3
  2. mapFolding/basecamp.py +13 -7
  3. mapFolding/beDRY.py +241 -68
  4. mapFolding/oeis.py +4 -4
  5. mapFolding/reference/hunterNumba.py +1 -1
  6. mapFolding/someAssemblyRequired/__init__.py +40 -20
  7. mapFolding/someAssemblyRequired/_theTypes.py +53 -0
  8. mapFolding/someAssemblyRequired/_tool_Make.py +99 -0
  9. mapFolding/someAssemblyRequired/_tool_Then.py +72 -0
  10. mapFolding/someAssemblyRequired/_toolboxAntecedents.py +358 -0
  11. mapFolding/someAssemblyRequired/_toolboxContainers.py +334 -0
  12. mapFolding/someAssemblyRequired/_toolboxPython.py +62 -0
  13. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +2 -2
  14. mapFolding/someAssemblyRequired/newInliner.py +22 -0
  15. mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +158 -0
  16. mapFolding/someAssemblyRequired/toolboxNumba.py +358 -0
  17. mapFolding/someAssemblyRequired/transformationTools.py +289 -698
  18. mapFolding/syntheticModules/numbaCount_doTheNeedful.py +36 -33
  19. mapFolding/theDao.py +13 -11
  20. mapFolding/theSSOT.py +83 -128
  21. mapFolding/toolboxFilesystem.py +219 -0
  22. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/METADATA +4 -2
  23. mapfolding-0.8.5.dist-info/RECORD +48 -0
  24. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/WHEEL +1 -1
  25. tests/conftest.py +56 -52
  26. tests/test_computations.py +42 -32
  27. tests/test_filesystem.py +4 -4
  28. tests/test_other.py +2 -2
  29. tests/test_tasks.py +2 -2
  30. mapFolding/filesystem.py +0 -129
  31. mapFolding/someAssemblyRequired/ingredientsNumba.py +0 -206
  32. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +0 -211
  33. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +0 -413
  34. mapFolding/someAssemblyRequired/transformDataStructures.py +0 -168
  35. mapfolding-0.8.3.dist-info/RECORD +0 -43
  36. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/entry_points.txt +0 -0
  37. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/licenses/LICENSE +0 -0
  38. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,334 @@
1
+ """
2
+ Container classes for AST transformations and code synthesis.
3
+
4
+ This module provides container classes used in the AST transformation process
5
+ and code synthesis workflows. It acts as a dependency boundary to prevent
6
+ circular imports while providing reusable data structures.
7
+ """
8
+ from collections import defaultdict
9
+ from collections.abc import Sequence
10
+ from mapFolding.someAssemblyRequired import ImaAnnotationType, ast_Identifier, be, Make, parseLogicalPath2astModule, str_nameDOTname
11
+ from mapFolding.theSSOT import callableDispatcherHARDCODED, The
12
+ from pathlib import Path, PurePosixPath
13
+ from Z0Z_tools import updateExtendPolishDictionaryLists
14
+ import ast
15
+ import dataclasses
16
+
17
+ class LedgerOfImports:
18
+ # TODO When resolving the ledger of imports, remove self-referential imports
19
+ # TODO TypeIgnore :/
20
+
21
+ def __init__(self, startWith: ast.AST | None = None) -> None:
22
+ self.dictionaryImportFrom: dict[str_nameDOTname, list[tuple[ast_Identifier, ast_Identifier | None]]] = defaultdict(list)
23
+ self.listImport: list[str_nameDOTname] = []
24
+ if startWith:
25
+ self.walkThis(startWith)
26
+
27
+ def addAst(self, astImport____: ast.Import | ast.ImportFrom) -> None:
28
+ assert isinstance(astImport____, (ast.Import, ast.ImportFrom)), f"I received {type(astImport____) = }, but I can only accept {ast.Import} and {ast.ImportFrom}."
29
+ if be.Import(astImport____):
30
+ for alias in astImport____.names:
31
+ self.listImport.append(alias.name)
32
+ elif be.ImportFrom(astImport____):
33
+ # TODO fix the mess created by `None` means '.'. I need a `str_nameDOTname` to replace '.'
34
+ if astImport____.module is None:
35
+ astImport____.module = '.'
36
+ for alias in astImport____.names:
37
+ self.dictionaryImportFrom[astImport____.module].append((alias.name, alias.asname))
38
+
39
+ def addImport_asStr(self, moduleIdentifier: str_nameDOTname) -> None:
40
+ self.listImport.append(moduleIdentifier)
41
+
42
+ def addImportFrom_asStr(self, moduleIdentifier: ast_Identifier, name: ast_Identifier, asname: ast_Identifier | None = None) -> None:
43
+ self.dictionaryImportFrom[moduleIdentifier].append((name, asname))
44
+
45
+ def exportListModuleIdentifiers(self) -> list[ast_Identifier]:
46
+ listModuleIdentifiers: list[ast_Identifier] = list(self.dictionaryImportFrom.keys())
47
+ listModuleIdentifiers.extend(self.listImport)
48
+ return sorted(set(listModuleIdentifiers))
49
+
50
+ def makeList_ast(self) -> list[ast.ImportFrom | ast.Import]:
51
+ listImportFrom: list[ast.ImportFrom] = []
52
+ for moduleIdentifier, listOfNameTuples in sorted(self.dictionaryImportFrom.items()):
53
+ listOfNameTuples = sorted(list(set(listOfNameTuples)), key=lambda nameTuple: nameTuple[0])
54
+ list_alias: list[ast.alias] = []
55
+ for name, asname in listOfNameTuples:
56
+ list_alias.append(Make.alias(name, asname))
57
+ listImportFrom.append(Make.ImportFrom(moduleIdentifier, list_alias))
58
+ list_astImport: list[ast.Import] = [Make.Import(moduleIdentifier) for moduleIdentifier in sorted(set(self.listImport))]
59
+ return listImportFrom + list_astImport
60
+
61
+ def update(self, *fromLedger: 'LedgerOfImports') -> None:
62
+ """Update this ledger with imports from one or more other ledgers.
63
+ Parameters:
64
+ *fromLedger: One or more other `LedgerOfImports` objects from which to merge.
65
+ """
66
+ self.dictionaryImportFrom = updateExtendPolishDictionaryLists(self.dictionaryImportFrom, *(ledger.dictionaryImportFrom for ledger in fromLedger), destroyDuplicates=True, reorderLists=True)
67
+ for ledger in fromLedger:
68
+ self.listImport.extend(ledger.listImport)
69
+
70
+ def walkThis(self, walkThis: ast.AST) -> None:
71
+ for nodeBuffalo in ast.walk(walkThis):
72
+ if isinstance(nodeBuffalo, (ast.Import, ast.ImportFrom)):
73
+ self.addAst(nodeBuffalo)
74
+
75
+ @dataclasses.dataclass
76
+ class IngredientsFunction:
77
+ """Everything necessary to integrate a function into a module should be here.
78
+ Parameters:
79
+ astFunctionDef: hint `Make.astFunctionDef()`
80
+ """
81
+ astFunctionDef: ast.FunctionDef
82
+ imports: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
83
+ type_ignores: list[ast.TypeIgnore] = dataclasses.field(default_factory=list)
84
+
85
+ @dataclasses.dataclass
86
+ class IngredientsModule:
87
+ """Everything necessary to create one _logical_ `ast.Module` should be here.
88
+ Extrinsic qualities should _probably_ be handled externally.
89
+
90
+ Parameters:
91
+ ingredientsFunction (None): One or more `IngredientsFunction` that will appended to `listIngredientsFunctions`.
92
+ """
93
+ ingredientsFunction: dataclasses.InitVar[Sequence[IngredientsFunction] | IngredientsFunction | None] = None
94
+
95
+ # init var with an existing module? method to deconstruct an existing module?
96
+
97
+ # `body` attribute of `ast.Module`
98
+ """NOTE
99
+ - Bare statements in `prologue` and `epilogue` are not 'protected' by `if __name__ == '__main__':` so they will be executed merely by loading the module.
100
+ - The dataclass has methods for modifying `prologue`, `epilogue`, and `launcher`.
101
+ - However, `prologue`, `epilogue`, and `launcher` are `ast.Module` (as opposed to `list[ast.stmt]`), so that you may use tools such as `ast.walk` and `ast.NodeVisitor` on the fields.
102
+ """
103
+ imports: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
104
+ """Modify this field using the methods in `LedgerOfImports`."""
105
+ prologue: ast.Module = Make.Module([],[])
106
+ """Statements after the imports and before the functions in listIngredientsFunctions."""
107
+ listIngredientsFunctions: list[IngredientsFunction] = dataclasses.field(default_factory=list)
108
+ epilogue: ast.Module = Make.Module([],[])
109
+ """Statements after the functions in listIngredientsFunctions and before `launcher`."""
110
+ launcher: ast.Module = Make.Module([],[])
111
+ """`if __name__ == '__main__':`"""
112
+
113
+ # `ast.TypeIgnore` statements to supplement those in other fields; `type_ignores` is a parameter for `ast.Module` constructor
114
+ supplemental_type_ignores: list[ast.TypeIgnore] = dataclasses.field(default_factory=list)
115
+
116
+ def __post_init__(self, ingredientsFunction: Sequence[IngredientsFunction] | IngredientsFunction | None = None) -> None:
117
+ if ingredientsFunction is not None:
118
+ if isinstance(ingredientsFunction, IngredientsFunction):
119
+ self.appendIngredientsFunction(ingredientsFunction)
120
+ else:
121
+ self.appendIngredientsFunction(*ingredientsFunction)
122
+
123
+ def _append_astModule(self, self_astModule: ast.Module, astModule: ast.Module | None, statement: Sequence[ast.stmt] | ast.stmt | None, type_ignores: list[ast.TypeIgnore] | None) -> None:
124
+ """Append one or more statements to `prologue`."""
125
+ list_body: list[ast.stmt] = []
126
+ listTypeIgnore: list[ast.TypeIgnore] = []
127
+ if astModule is not None and be.Module(astModule):
128
+ list_body.extend(astModule.body)
129
+ listTypeIgnore.extend(astModule.type_ignores)
130
+ if type_ignores is not None:
131
+ listTypeIgnore.extend(type_ignores)
132
+ if statement is not None:
133
+ if isinstance(statement, Sequence):
134
+ list_body.extend(statement)
135
+ else:
136
+ list_body.append(statement)
137
+ self_astModule.body.extend(list_body)
138
+ self_astModule.type_ignores.extend(listTypeIgnore)
139
+ ast.fix_missing_locations(self_astModule)
140
+
141
+ def appendPrologue(self, astModule: ast.Module | None = None, statement: Sequence[ast.stmt] | ast.stmt | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
142
+ """Append one or more statements to `prologue`."""
143
+ self._append_astModule(self.prologue, astModule, statement, type_ignores)
144
+
145
+ def appendEpilogue(self, astModule: ast.Module | None = None, statement: Sequence[ast.stmt] | ast.stmt | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
146
+ """Append one or more statements to `epilogue`."""
147
+ self._append_astModule(self.epilogue, astModule, statement, type_ignores)
148
+
149
+ def appendLauncher(self, astModule: ast.Module | None = None, statement: Sequence[ast.stmt] | ast.stmt | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
150
+ """Append one or more statements to `launcher`."""
151
+ self._append_astModule(self.launcher, astModule, statement, type_ignores)
152
+
153
+ def appendIngredientsFunction(self, *ingredientsFunction: IngredientsFunction) -> None:
154
+ """Append one or more `IngredientsFunction`."""
155
+ for allegedIngredientsFunction in ingredientsFunction:
156
+ if isinstance(allegedIngredientsFunction, IngredientsFunction):
157
+ self.listIngredientsFunctions.append(allegedIngredientsFunction)
158
+ else:
159
+ raise ValueError(f"I received `{type(allegedIngredientsFunction) = }`, but I can only accept `{IngredientsFunction}`.")
160
+
161
+ @property
162
+ def list_astImportImportFrom(self) -> list[ast.Import | ast.ImportFrom]:
163
+ """List of `ast.Import` and `ast.ImportFrom` statements."""
164
+ sherpaLedger = LedgerOfImports()
165
+ listLedgers: list[LedgerOfImports] = [self.imports]
166
+ for ingredientsFunction in self.listIngredientsFunctions:
167
+ listLedgers.append(ingredientsFunction.imports)
168
+ sherpaLedger.update(*listLedgers)
169
+ return sherpaLedger.makeList_ast()
170
+
171
+ @property
172
+ def body(self) -> list[ast.stmt]:
173
+ list_stmt: list[ast.stmt] = []
174
+ list_stmt.extend(self.list_astImportImportFrom)
175
+ list_stmt.extend(self.prologue.body)
176
+ for ingredientsFunction in self.listIngredientsFunctions:
177
+ list_stmt.append(ingredientsFunction.astFunctionDef)
178
+ list_stmt.extend(self.epilogue.body)
179
+ list_stmt.extend(self.launcher.body)
180
+ # TODO `launcher`, if it exists, must start with `if __name__ == '__main__':` and be indented
181
+ return list_stmt
182
+
183
+ @property
184
+ def type_ignores(self) -> list[ast.TypeIgnore]:
185
+ listTypeIgnore: list[ast.TypeIgnore] = self.supplemental_type_ignores
186
+ # listTypeIgnore.extend(self.imports.makeListAst())
187
+ listTypeIgnore.extend(self.prologue.type_ignores)
188
+ for ingredientsFunction in self.listIngredientsFunctions:
189
+ listTypeIgnore.extend(ingredientsFunction.type_ignores)
190
+ listTypeIgnore.extend(self.epilogue.type_ignores)
191
+ listTypeIgnore.extend(self.launcher.type_ignores)
192
+ return listTypeIgnore
193
+
194
+ @dataclasses.dataclass
195
+ class RecipeSynthesizeFlow:
196
+ """Settings for synthesizing flow."""
197
+ # ========================================
198
+ # Source
199
+ # ========================================
200
+ source_astModule = parseLogicalPath2astModule(The.logicalPathModuleSourceAlgorithm)
201
+
202
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
203
+ sourceCallableDispatcher: ast_Identifier = The.sourceCallableDispatcher
204
+ sourceCallableInitialize: ast_Identifier = The.sourceCallableInitialize
205
+ sourceCallableParallel: ast_Identifier = The.sourceCallableParallel
206
+ sourceCallableSequential: ast_Identifier = The.sourceCallableSequential
207
+
208
+ sourceDataclassIdentifier: ast_Identifier = The.dataclassIdentifier
209
+ sourceDataclassInstance: ast_Identifier = The.dataclassInstance
210
+ sourceDataclassInstanceTaskDistribution: ast_Identifier = The.dataclassInstanceTaskDistribution
211
+ sourceLogicalPathModuleDataclass: str_nameDOTname = The.logicalPathModuleDataclass
212
+
213
+ sourceConcurrencyManagerNamespace = The.sourceConcurrencyManagerNamespace
214
+ sourceConcurrencyManagerIdentifier = The.sourceConcurrencyManagerIdentifier
215
+
216
+ # ========================================
217
+ # Logical identifiers (as opposed to physical identifiers)
218
+ # ========================================
219
+ # Package ================================
220
+ packageIdentifier: ast_Identifier | None = The.packageName
221
+
222
+ # Qualified logical path ================================
223
+ logicalPathModuleDataclass: str_nameDOTname = sourceLogicalPathModuleDataclass
224
+ logicalPathFlowRoot: ast_Identifier | None = 'syntheticModules'
225
+ """ `logicalPathFlowRoot` likely corresponds to a physical filesystem directory."""
226
+
227
+ # Module ================================
228
+ moduleDispatcher: ast_Identifier = 'numbaCount_doTheNeedful'
229
+ moduleInitialize: ast_Identifier = moduleDispatcher
230
+ moduleParallel: ast_Identifier = moduleDispatcher
231
+ moduleSequential: ast_Identifier = moduleDispatcher
232
+
233
+ # Function ================================
234
+ callableDispatcher: ast_Identifier = sourceCallableDispatcher
235
+ callableInitialize: ast_Identifier = sourceCallableInitialize
236
+ callableParallel: ast_Identifier = sourceCallableParallel
237
+ callableSequential: ast_Identifier = sourceCallableSequential
238
+ concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
239
+ concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier
240
+ dataclassIdentifier: ast_Identifier = sourceDataclassIdentifier
241
+
242
+ # Variable ================================
243
+ dataclassInstance: ast_Identifier = sourceDataclassInstance
244
+ dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
245
+
246
+ # ========================================
247
+ # Computed
248
+ # ========================================
249
+ """
250
+ theFormatStrModuleSynthetic = "{packageFlow}Count"
251
+ theFormatStrModuleForCallableSynthetic = theFormatStrModuleSynthetic + "_{callableTarget}"
252
+ theModuleDispatcherSynthetic: ast_Identifier = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=The.sourceCallableDispatcher)
253
+ theLogicalPathModuleDispatcherSynthetic: str = '.'.join([The.packageName, The.moduleOfSyntheticModules, theModuleDispatcherSynthetic])
254
+
255
+ """
256
+ # logicalPathModuleDispatcher: str = '.'.join([Z0Z_flowLogicalPathRoot, moduleDispatcher])
257
+ # ========================================
258
+ # Filesystem (names of physical objects)
259
+ # ========================================
260
+ pathPackage: PurePosixPath | None = PurePosixPath(The.pathPackage)
261
+ fileExtension: str = The.fileExtension
262
+
263
+ def _makePathFilename(self, filenameStem: str,
264
+ pathRoot: PurePosixPath | None = None,
265
+ logicalPathINFIX: str_nameDOTname | None = None,
266
+ fileExtension: str | None = None,
267
+ ) -> PurePosixPath:
268
+ """filenameStem: (hint: the name of the logical module)"""
269
+ if pathRoot is None:
270
+ pathRoot = self.pathPackage or PurePosixPath(Path.cwd())
271
+ if logicalPathINFIX:
272
+ whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
273
+ pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
274
+ if fileExtension is None:
275
+ fileExtension = self.fileExtension
276
+ filename: str = filenameStem + fileExtension
277
+ return pathRoot.joinpath(filename)
278
+
279
+ @property
280
+ def pathFilenameDispatcher(self) -> PurePosixPath:
281
+ return self._makePathFilename(filenameStem=self.moduleDispatcher, logicalPathINFIX=self.logicalPathFlowRoot)
282
+ @property
283
+ def pathFilenameInitialize(self) -> PurePosixPath:
284
+ return self._makePathFilename(filenameStem=self.moduleInitialize, logicalPathINFIX=self.logicalPathFlowRoot)
285
+ @property
286
+ def pathFilenameParallel(self) -> PurePosixPath:
287
+ return self._makePathFilename(filenameStem=self.moduleParallel, logicalPathINFIX=self.logicalPathFlowRoot)
288
+ @property
289
+ def pathFilenameSequential(self) -> PurePosixPath:
290
+ return self._makePathFilename(filenameStem=self.moduleSequential, logicalPathINFIX=self.logicalPathFlowRoot)
291
+
292
+ def __post_init__(self) -> None:
293
+ if ((self.concurrencyManagerIdentifier is not None and self.concurrencyManagerIdentifier != self.sourceConcurrencyManagerIdentifier) # `submit` # type: ignore
294
+ or ((self.concurrencyManagerIdentifier is None) != (self.concurrencyManagerNamespace is None))): # type: ignore
295
+ import warnings
296
+ warnings.warn(f"If your synthesized module is weird, check `{self.concurrencyManagerIdentifier=}` and `{self.concurrencyManagerNamespace=}`. (ChildProcessError? 'Yeah! Children shouldn't be processing stuff, man.')", category=ChildProcessError, stacklevel=2) # pyright: ignore[reportCallIssue, reportArgumentType] Y'all Pynatics need to be less shrill and focus on making code that doesn't need 8000 error categories.
297
+
298
+ # self.logicalPathModuleDispatcher!=logicalPathModuleDispatcherHARDCODED or
299
+ if self.callableDispatcher!=callableDispatcherHARDCODED:
300
+ print(f"fyi: `{self.callableDispatcher=}` but\n\t`{callableDispatcherHARDCODED=}`.")
301
+
302
+ dummyAssign = Make.Assign([Make.Name("dummyTarget")], Make.Constant(None))
303
+ dummySubscript = Make.Subscript(Make.Name("dummy"), Make.Name("slice"))
304
+ dummyTuple = Make.Tuple([Make.Name("dummyElement")])
305
+
306
+ @dataclasses.dataclass
307
+ class ShatteredDataclass:
308
+ countingVariableAnnotation: ImaAnnotationType
309
+ """Type annotation for the counting variable extracted from the dataclass."""
310
+ countingVariableName: ast.Name
311
+ """AST name node representing the counting variable identifier."""
312
+ field2AnnAssign: dict[ast_Identifier, ast.AnnAssign] = dataclasses.field(default_factory=dict)
313
+ """Maps field names to their corresponding AST call expressions."""
314
+ Z0Z_field2AnnAssign: dict[ast_Identifier, tuple[ast.AnnAssign, str]] = dataclasses.field(default_factory=dict)
315
+ fragments4AssignmentOrParameters: ast.Tuple = dummyTuple
316
+ """AST tuple used as target for assignment to capture returned fragments."""
317
+ ledger: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
318
+ """Import records for the dataclass and its constituent parts."""
319
+ list_argAnnotated4ArgumentsSpecification: list[ast.arg] = dataclasses.field(default_factory=list)
320
+ """Function argument nodes with annotations for parameter specification."""
321
+ list_keyword_field__field4init: list[ast.keyword] = dataclasses.field(default_factory=list)
322
+ """Keyword arguments for dataclass initialization with field=field format."""
323
+ listAnnotations: list[ImaAnnotationType] = dataclasses.field(default_factory=list)
324
+ """Type annotations for each dataclass field."""
325
+ listName4Parameters: list[ast.Name] = dataclasses.field(default_factory=list)
326
+ """Name nodes for each dataclass field used as function parameters."""
327
+ listUnpack: list[ast.AnnAssign] = dataclasses.field(default_factory=list)
328
+ """Annotated assignment statements to extract fields from dataclass."""
329
+ map_stateDOTfield2Name: dict[ast.expr, ast.Name] = dataclasses.field(default_factory=dict)
330
+ """Maps AST expressions to Name nodes for find-replace operations."""
331
+ repack: ast.Assign = dummyAssign
332
+ """AST assignment statement that reconstructs the original dataclass instance."""
333
+ signatureReturnAnnotation: ast.Subscript = dummySubscript
334
+ """tuple-based return type annotation for function definitions."""
@@ -0,0 +1,62 @@
1
+ from collections.abc import Callable, Sequence
2
+ from inspect import getsource as inspect_getsource
3
+ from mapFolding.someAssemblyRequired import ast_Identifier, str_nameDOTname, 个
4
+ from os import PathLike
5
+ from pathlib import Path, PurePath
6
+ from types import ModuleType
7
+ from typing import Any, cast, Generic, TypeGuard
8
+ import ast
9
+ import importlib
10
+ import importlib.util
11
+
12
+ # TODO Identify the logic that narrows the type and can help the user during static type checking.
13
+
14
+ class NodeTourist(ast.NodeVisitor, Generic[个]):
15
+ def __init__(self, findThis: Callable[[个], TypeGuard[个] | bool], doThat: Callable[[个], 个 | None]) -> None:
16
+ self.findThis = findThis
17
+ self.doThat = doThat
18
+ self.nodeCaptured: 个 | None = None
19
+
20
+ def visit(self, node: 个) -> None: # pyright: ignore [reportGeneralTypeIssues]
21
+ if self.findThis(node):
22
+ nodeActionReturn = self.doThat(node)
23
+ if nodeActionReturn is not None:
24
+ self.nodeCaptured = nodeActionReturn
25
+ self.generic_visit(cast(ast.AST, node))
26
+
27
+ def captureLastMatch(self, node: 个) -> 个 | None: # pyright: ignore [reportGeneralTypeIssues]
28
+ self.nodeCaptured = None
29
+ self.visit(node)
30
+ return self.nodeCaptured
31
+
32
+ class NodeChanger(ast.NodeTransformer, Generic[个]):
33
+ def __init__(self, findThis: Callable[[个], bool], doThat: Callable[[个], Sequence[个] | 个 | None]) -> None:
34
+ self.findThis = findThis
35
+ self.doThat = doThat
36
+
37
+ def visit(self, node: 个) -> Sequence[个] | 个 | None: # pyright: ignore [reportGeneralTypeIssues]
38
+ if self.findThis(node):
39
+ return self.doThat(node)
40
+ return super().visit(cast(ast.AST, node))
41
+
42
+ def importLogicalPath2Callable(logicalPathModule: str_nameDOTname, identifier: ast_Identifier, packageIdentifierIfRelative: ast_Identifier | None = None) -> Callable[..., Any]:
43
+ moduleImported: ModuleType = importlib.import_module(logicalPathModule, packageIdentifierIfRelative)
44
+ return getattr(moduleImported, identifier)
45
+
46
+ def importPathFilename2Callable(pathFilename: PathLike[Any] | PurePath, identifier: ast_Identifier, moduleIdentifier: ast_Identifier | None = None) -> Callable[..., Any]:
47
+ pathFilename = Path(pathFilename)
48
+
49
+ importlibSpecification = importlib.util.spec_from_file_location(moduleIdentifier or pathFilename.stem, pathFilename)
50
+ if importlibSpecification is None or importlibSpecification.loader is None: raise ImportError(f"I received\n\t`{pathFilename = }`,\n\t`{identifier = }`, and\n\t`{moduleIdentifier = }`.\n\tAfter loading, \n\t`importlibSpecification` {'is `None`' if importlibSpecification is None else 'has a value'} and\n\t`importlibSpecification.loader` is unknown.")
51
+
52
+ moduleImported_jk_hahaha: ModuleType = importlib.util.module_from_spec(importlibSpecification)
53
+ importlibSpecification.loader.exec_module(moduleImported_jk_hahaha)
54
+ return getattr(moduleImported_jk_hahaha, identifier)
55
+
56
+ def parseLogicalPath2astModule(logicalPathModule: str_nameDOTname, packageIdentifierIfRelative: ast_Identifier|None=None, mode:str='exec') -> ast.AST:
57
+ moduleImported: ModuleType = importlib.import_module(logicalPathModule, packageIdentifierIfRelative)
58
+ sourcePython: str = inspect_getsource(moduleImported)
59
+ return ast.parse(sourcePython, mode=mode)
60
+
61
+ def parsePathFilename2astModule(pathFilename: PathLike[Any] | PurePath, mode:str='exec') -> ast.AST:
62
+ return ast.parse(Path(pathFilename).read_text(), mode=mode)
@@ -18,9 +18,9 @@ mapFolding/reference/jobsCompleted/[2x19]/[2x19].ll
18
18
 
19
19
  This file demonstrates the low-level optimizations that made this previously
20
20
  intractable calculation possible. The IR reveals how the abstract algorithm was
21
- transformed into efficient machine code through Numba's compilation pipeline.
21
+ transformed into efficient machine code through Numba's compilation assembly-line.
22
22
 
23
- While originally part of a tighter integration with the code generation pipeline,
23
+ While originally part of a tighter integration with the code generation assembly-line,
24
24
  this module now operates as a standalone utility that can be applied to any module
25
25
  containing Numba-compiled functions.
26
26
  """
@@ -0,0 +1,22 @@
1
+ from collections.abc import Callable
2
+ from copy import deepcopy
3
+ from mapFolding.someAssemblyRequired import ast_Identifier, RecipeSynthesizeFlow, Then, be, ifThis, DOT, 又, NodeChanger
4
+ from mapFolding.someAssemblyRequired.transformationTools import makeDictionary4InliningFunction, makeDictionaryFunctionDef
5
+ from typing import cast
6
+ import ast
7
+
8
+ def inlineFunctionDef(astFunctionDef: ast.FunctionDef, dictionary4Inlining: dict[ast_Identifier, ast.FunctionDef]) -> ast.FunctionDef:
9
+
10
+ return astFunctionDef
11
+
12
+ # Test code
13
+ testFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
14
+ dictionary4Inlining: dict[ast_Identifier, ast.FunctionDef] = makeDictionary4InliningFunction(
15
+ testFlow.sourceCallableSequential,
16
+ (dictionaryFunctionDef := makeDictionaryFunctionDef(testFlow.source_astModule)))
17
+
18
+ astFunctionDef = dictionaryFunctionDef[testFlow.sourceCallableSequential]
19
+
20
+ astFunctionDefTransformed = inlineFunctionDef(
21
+ astFunctionDef,
22
+ dictionary4Inlining)
@@ -0,0 +1,158 @@
1
+ """Synthesize one file to compute `foldsTotal` of `mapShape`."""
2
+ from mapFolding.toolboxFilesystem import getPathFilenameFoldsTotal
3
+ from mapFolding.someAssemblyRequired import ast_Identifier, be, ifThis, Make, NodeChanger, Then, IngredientsFunction, IngredientsModule
4
+ from mapFolding.someAssemblyRequired.toolboxNumba import RecipeJob, SpicesJobNumba, decorateCallableWithNumba
5
+ from mapFolding.someAssemblyRequired.transformationTools import astModuleToIngredientsFunction, write_astModule
6
+ from mapFolding.someAssemblyRequired.transformationTools import makeInitializedComputationState
7
+ from mapFolding.theSSOT import The
8
+ from typing import cast
9
+ from Z0Z_tools import autoDecodingRLE
10
+ from pathlib import PurePosixPath
11
+ import ast
12
+
13
+ list_IdentifiersNotUsedAllHARDCODED = ['concurrencyLimit', 'foldsTotal', 'mapShape',]
14
+ list_IdentifiersNotUsedParallelSequentialHARDCODED = ['indexLeaf']
15
+ list_IdentifiersNotUsedSequentialHARDCODED = ['foldGroups', 'taskDivisions', 'taskIndex',]
16
+
17
+ list_IdentifiersReplacedHARDCODED = ['groupsOfFolds',]
18
+
19
+ list_IdentifiersStaticValuesHARDCODED = ['dimensionsTotal', 'leavesTotal',]
20
+
21
+ list_IdentifiersNotUsedHARDCODED = list_IdentifiersStaticValuesHARDCODED + list_IdentifiersReplacedHARDCODED + list_IdentifiersNotUsedAllHARDCODED + list_IdentifiersNotUsedParallelSequentialHARDCODED + list_IdentifiersNotUsedSequentialHARDCODED
22
+
23
+ def addLauncherNumbaProgress(ingredientsModule: IngredientsModule, ingredientsFunction: IngredientsFunction, job: RecipeJob, spices: SpicesJobNumba) -> tuple[IngredientsModule, IngredientsFunction]:
24
+
25
+ linesLaunch: str = f"""
26
+ if __name__ == '__main__':
27
+ with ProgressBar(total={job.foldsTotalEstimated}, update_interval=2) as statusUpdate:
28
+ {job.countCallable}(statusUpdate)
29
+ foldsTotal = statusUpdate.n * {job.state.leavesTotal}
30
+ print('map {job.state.mapShape} =', foldsTotal)
31
+ writeStream = open('{job.pathFilenameFoldsTotal.as_posix()}', 'w')
32
+ writeStream.write(str(foldsTotal))
33
+ writeStream.close()
34
+ """
35
+ numba_progressPythonClass: ast_Identifier = 'ProgressBar'
36
+ numba_progressNumbaType: ast_Identifier = 'ProgressBarType'
37
+ ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressPythonClass)
38
+ ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressNumbaType)
39
+
40
+ ast_argNumbaProgress = ast.arg(arg=spices.numbaProgressBarIdentifier, annotation=ast.Name(id=numba_progressPythonClass, ctx=ast.Load()))
41
+ ingredientsFunction.astFunctionDef.args.args.append(ast_argNumbaProgress)
42
+
43
+ findThis = ifThis.isAugAssign_targetIs(ifThis.isName_Identifier(job.shatteredDataclass.countingVariableName.id))
44
+ doThat = Then.replaceWith(Make.Expr(Make.Call(Make.Attribute(Make.Name(spices.numbaProgressBarIdentifier),'update'),[Make.Constant(1)])))
45
+ countWithProgressBar = NodeChanger(findThis, doThat)
46
+ countWithProgressBar.visit(ingredientsFunction.astFunctionDef)
47
+
48
+ ingredientsModule.appendLauncher(ast.parse(linesLaunch))
49
+
50
+ return ingredientsModule, ingredientsFunction
51
+
52
+ def move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsFunction: IngredientsFunction, job: RecipeJob) -> IngredientsFunction:
53
+ ingredientsFunction.imports.update(job.shatteredDataclass.ledger)
54
+
55
+ list_IdentifiersNotUsed = list_IdentifiersNotUsedHARDCODED
56
+
57
+ list_argCauseMyBrainRefusesToDoThisTheRightWay = ingredientsFunction.astFunctionDef.args.args + ingredientsFunction.astFunctionDef.args.posonlyargs + ingredientsFunction.astFunctionDef.args.kwonlyargs
58
+ for ast_arg in list_argCauseMyBrainRefusesToDoThisTheRightWay:
59
+ if ast_arg.arg in job.shatteredDataclass.field2AnnAssign:
60
+ if ast_arg.arg in list_IdentifiersNotUsed:
61
+ pass
62
+ else:
63
+ ImaAnnAssign, elementConstructor = job.shatteredDataclass.Z0Z_field2AnnAssign[ast_arg.arg]
64
+ match elementConstructor:
65
+ case 'scalar':
66
+ ImaAnnAssign.value.args[0].value = int(job.state.__dict__[ast_arg.arg]) # type: ignore
67
+ case 'array':
68
+ # print(ast.dump(ImaAnnAssign))
69
+ dataAsStrRLE: str = autoDecodingRLE(job.state.__dict__[ast_arg.arg], addSpaces=True)
70
+ dataAs_astExpr: ast.expr = cast(ast.Expr, ast.parse(dataAsStrRLE).body[0]).value
71
+ ImaAnnAssign.value.args = [dataAs_astExpr] # type: ignore
72
+ case _:
73
+ list_exprDOTannotation: list[ast.expr] = []
74
+ list_exprDOTvalue: list[ast.expr] = []
75
+ for dimension in job.state.mapShape:
76
+ list_exprDOTannotation.append(Make.Name(elementConstructor))
77
+ list_exprDOTvalue.append(Make.Call(Make.Name(elementConstructor), [Make.Constant(dimension)]))
78
+ ImaAnnAssign.annotation.slice.elts = list_exprDOTannotation # type: ignore
79
+ ImaAnnAssign.value.elts = list_exprDOTvalue # type: ignore
80
+
81
+ ingredientsFunction.astFunctionDef.body.insert(0, ImaAnnAssign)
82
+
83
+ findThis = ifThis.is_arg_Identifier(ast_arg.arg)
84
+ remove_arg = NodeChanger(findThis, Then.removeIt)
85
+ remove_arg.visit(ingredientsFunction.astFunctionDef)
86
+
87
+ ast.fix_missing_locations(ingredientsFunction.astFunctionDef)
88
+ return ingredientsFunction
89
+
90
+ def makeJobNumba(job: RecipeJob, spices: SpicesJobNumba):
91
+ # get the raw ingredients: data and the algorithm
92
+ ingredientsCount: IngredientsFunction = astModuleToIngredientsFunction(job.source_astModule, job.countCallable)
93
+
94
+ # Change the return so you can dynamically determine which variables are not used
95
+ removeReturnStatement = NodeChanger(be.Return, Then.removeIt)
96
+ removeReturnStatement.visit(ingredientsCount.astFunctionDef)
97
+ ingredientsCount.astFunctionDef.returns = Make.Constant(value=None)
98
+
99
+ # Remove `foldGroups` and any other unused statements, so you can dynamically determine which variables are not used
100
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier('foldGroups'))
101
+ doThat = Then.removeIt
102
+ remove_foldGroups = NodeChanger(findThis, doThat)
103
+ remove_foldGroups.visit(ingredientsCount.astFunctionDef)
104
+
105
+ # replace identifiers with static values with their values, so you can dynamically determine which variables are not used
106
+ list_IdentifiersStaticValues = list_IdentifiersStaticValuesHARDCODED
107
+ for identifier in list_IdentifiersStaticValues:
108
+ findThis = ifThis.isName_Identifier(identifier)
109
+ doThat = Then.replaceWith(Make.Constant(int(job.state.__dict__[identifier])))
110
+ NodeChanger(findThis, doThat).visit(ingredientsCount.astFunctionDef)
111
+
112
+ # This launcher eliminates the use of one identifier, so run it now and you can dynamically determine which variables are not used
113
+ if spices.useNumbaProgressBar:
114
+ ingredientsModule = IngredientsModule()
115
+ ingredientsModule, ingredientsCount = addLauncherNumbaProgress(ingredientsModule, ingredientsCount, job, spices)
116
+ spices.parametersNumba['nogil'] = True
117
+
118
+ ingredientsCount = move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsCount, job)
119
+
120
+ ingredientsCount.astFunctionDef.decorator_list = [] # TODO low-priority, handle this more elegantly
121
+ # TODO when I add the function signature in numba style back to the decorator, the logic needs to handle `ProgressBarType:`
122
+ ingredientsCount = decorateCallableWithNumba(ingredientsCount, spices.parametersNumba)
123
+
124
+ ingredientsModule.appendIngredientsFunction(ingredientsCount)
125
+
126
+ # add imports, make str, remove unused imports
127
+ # put on disk
128
+ write_astModule(ingredientsModule, job.pathFilenameModule, job.packageIdentifier)
129
+
130
+ """
131
+ Overview
132
+ - the code starts life in theDao.py, which has many optimizations;
133
+ - `makeNumbaOptimizedFlow` increase optimization especially by using numba;
134
+ - `makeJobNumba` increases optimization especially by limiting its capabilities to just one set of parameters
135
+ - the synthesized module must run well as a standalone interpreted-Python script
136
+ - the next major optimization step will (probably) be to use the module synthesized by `makeJobNumba` to compile a standalone executable
137
+ - Nevertheless, at each major optimization step, the code is constantly being improved and optimized, so everything must be well organized (read: semantic) and able to handle a range of arbitrary upstream and not disrupt downstream transformations
138
+
139
+ Necessary
140
+ - Move the function's parameters to the function body,
141
+ - initialize identifiers with their state types and values,
142
+
143
+ Optimizations
144
+ - replace static-valued identifiers with their values
145
+ - narrowly focused imports
146
+
147
+ Minutia
148
+ - do not use `with` statement inside numba jitted code, except to use numba's obj mode
149
+ """
150
+
151
+ if __name__ == '__main__':
152
+ mapShape = (3,4)
153
+ state = makeInitializedComputationState(mapShape)
154
+ pathModule = PurePosixPath(The.pathPackage, 'jobs')
155
+ pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(state.mapShape, pathModule))
156
+ aJob = RecipeJob(state, pathModule=pathModule, pathFilenameFoldsTotal=pathFilenameFoldsTotal)
157
+ spices = SpicesJobNumba()
158
+ makeJobNumba(aJob, spices)