mapFolding 0.8.0__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. mapFolding/__init__.py +33 -4
  2. mapFolding/basecamp.py +14 -0
  3. mapFolding/beDRY.py +16 -1
  4. mapFolding/filesystem.py +124 -90
  5. mapFolding/noHomeYet.py +12 -0
  6. mapFolding/oeis.py +16 -1
  7. mapFolding/reference/__init__.py +0 -0
  8. mapFolding/reference/flattened.py +46 -45
  9. mapFolding/reference/hunterNumba.py +4 -4
  10. mapFolding/reference/irvineJavaPort.py +1 -1
  11. mapFolding/reference/lunnanNumpy.py +3 -4
  12. mapFolding/reference/lunnanWhile.py +5 -7
  13. mapFolding/reference/rotatedEntryPoint.py +2 -3
  14. mapFolding/someAssemblyRequired/__init__.py +29 -0
  15. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +32 -14
  16. mapFolding/someAssemblyRequired/ingredientsNumba.py +22 -1
  17. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +196 -0
  18. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +3 -4
  19. mapFolding/someAssemblyRequired/transformDataStructures.py +162 -0
  20. mapFolding/someAssemblyRequired/transformationTools.py +216 -199
  21. mapFolding/theDao.py +19 -5
  22. mapFolding/theSSOT.py +19 -1
  23. {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/METADATA +50 -44
  24. mapfolding-0.8.1.dist-info/RECORD +39 -0
  25. {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/WHEEL +1 -1
  26. mapFolding/reference/lunnan.py +0 -153
  27. mapFolding/someAssemblyRequired/Z0Z_workbench.py +0 -350
  28. mapFolding/someAssemblyRequired/synthesizeDataConverters.py +0 -117
  29. mapFolding/syntheticModules/numbaCountHistoricalExample.py +0 -158
  30. mapFolding/syntheticModules/numba_doTheNeedfulHistoricalExample.py +0 -13
  31. mapfolding-0.8.0.dist-info/RECORD +0 -41
  32. {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/entry_points.txt +0 -0
  33. {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info/licenses}/LICENSE +0 -0
  34. {mapfolding-0.8.0.dist-info → mapfolding-0.8.1.dist-info}/top_level.txt +0 -0
@@ -2,10 +2,9 @@
2
2
  A generally faithful translation of the original Atlas Autocode code by W. F. Lunnon to Python using NumPy.
3
3
  W. F. Lunnon, Multi-dimensional map-folding, The Computer Journal, Volume 14, Issue 1, 1971, Pages 75-80, https://doi.org/10.1093/comjnl/14.1.75
4
4
  """
5
- from typing import List
6
5
  import numpy
7
6
 
8
- def foldings(p: List[int]) -> int:
7
+ def foldings(p: list[int]) -> int:
9
8
  """
10
9
  Run loop with (A, B) on each folding of a p[1] x ... x p[d] map, where A and B are the above and below vectors.
11
10
 
@@ -66,7 +65,7 @@ def foldings(p: List[int]) -> int:
66
65
  # D[i][l][m] = leaf connected to m in section i when inserting l;
67
66
 
68
67
  G: int = 0
69
- l: int = 1
68
+ l = 1
70
69
 
71
70
  # kick off with null folding
72
71
  while l > 0:
@@ -86,7 +85,7 @@ def foldings(p: List[int]) -> int:
86
85
  if D[i][l][l] == l:
87
86
  dd = dd + 1
88
87
  else:
89
- m: int = D[i][l][l]
88
+ m = D[i][l][l]
90
89
  while m != l:
91
90
  gap[gg] = m
92
91
  if count[m] == 0:
@@ -2,9 +2,7 @@
2
2
  A largely faithful translation of the original Atlas Autocode code by W. F. Lunnon to Python using `while`.
3
3
  W. F. Lunnon, Multi-dimensional map-folding, The Computer Journal, Volume 14, Issue 1, 1971, Pages 75-80, https://doi.org/10.1093/comjnl/14.1.75
4
4
  """
5
- from typing import Sequence
6
-
7
- def foldings(p: Sequence[int]) -> int:
5
+ def foldings(p: list[int]) -> int:
8
6
  """
9
7
  Run loop with (A, B) on each folding of a p[1] x ... x p[d] map, where A and B are the above and below vectors.
10
8
 
@@ -38,8 +36,8 @@ def foldings(p: Sequence[int]) -> int:
38
36
  # and later gap[gapter[l]] is the gap where leaf l is currently inserted
39
37
 
40
38
  P = [1] * (d + 1)
41
- C = [[0] * (n + 1) for dimension1 in range(d + 1)]
42
- D = [[[0] * (n + 1) for dimension2 in range(n + 1)] for dimension1 in range(d + 1)]
39
+ C = [[0] * (n + 1) for _dimension1 in range(d + 1)]
40
+ D = [[[0] * (n + 1) for _dimension2 in range(n + 1)] for _dimension1 in range(d + 1)]
43
41
 
44
42
  for i in range(1, d + 1):
45
43
  P[i] = P[i - 1] * p[i - 1]
@@ -65,7 +63,7 @@ def foldings(p: Sequence[int]) -> int:
65
63
  # D[i][l][m] = leaf connected to m in section i when inserting l;
66
64
 
67
65
  G: int = 0
68
- l: int = 1
66
+ l = 1
69
67
 
70
68
  # kick off with null folding
71
69
  while l > 0:
@@ -84,7 +82,7 @@ def foldings(p: Sequence[int]) -> int:
84
82
  if D[i][l][l] == l:
85
83
  dd = dd + 1
86
84
  else:
87
- m: int = D[i][l][l]
85
+ m = D[i][l][l]
88
86
  while m != l:
89
87
  gap[gg] = m
90
88
  if count[m] == 0:
@@ -1,6 +1,5 @@
1
1
  from mapFolding import outfitFoldings
2
2
  from numba import njit
3
- from typing import List
4
3
  import numpy
5
4
  from numpy.typing import NDArray
6
5
 
@@ -42,7 +41,7 @@ tricky = [
42
41
 
43
42
  COUNTindicesStatic = len(tricky)
44
43
 
45
- def countFolds(listDimensions: List[int]):
44
+ def countFolds(listDimensions: list[int]):
46
45
  static = numpy.zeros(COUNTindicesStatic, dtype=numpy.int64)
47
46
 
48
47
  listDimensions, static[leavesTotal], D, track,gapsWhere = outfitFoldings(listDimensions)
@@ -55,7 +54,7 @@ def countFolds(listDimensions: List[int]):
55
54
  return foldingsTotal
56
55
 
57
56
  # @recordBenchmarks()
58
- def _sherpa(track: NDArray, gap: NDArray, static: NDArray, D: NDArray, p: List[int]):
57
+ def _sherpa(track: NDArray, gap: NDArray, static: NDArray, D: NDArray, p: list[int]):
59
58
  """Performance critical section that counts foldings.
60
59
 
61
60
  Parameters:
@@ -1,3 +1,29 @@
1
+ """
2
+ Code transformation framework for algorithmic optimization.
3
+
4
+ This package implements a comprehensive framework for programmatically analyzing,
5
+ transforming, and generating Python code. It enables sophisticated algorithm optimization
6
+ through abstract syntax tree (AST) manipulation, allowing algorithms to be transformed
7
+ from a readable, functional implementation into highly-optimized variants tailored for
8
+ different execution environments or specific computational tasks.
9
+
10
+ Core capabilities:
11
+ 1. AST Pattern Recognition - Precisely identify and match code patterns using composable predicates
12
+ 2. Algorithm Transformation - Convert functional state-based implementations to primitive operations
13
+ 3. Dataclass "Shattering" - Decompose complex state objects into primitive components
14
+ 4. Performance Optimization - Apply domain-specific optimizations for numerical computation
15
+ 5. Code Generation - Generate specialized implementations with appropriate imports and syntax
16
+
17
+ The transformation pipeline supports multiple optimization targets, from general-purpose
18
+ acceleration to generating highly-specialized variants optimized for specific input parameters.
19
+ This multi-level transformation approach allows for both development flexibility and
20
+ runtime performance, preserving algorithm readability in the source while enabling
21
+ maximum execution speed in production.
22
+
23
+ These tools were developed for map folding computation optimization but are designed as
24
+ general-purpose utilities applicable to a wide range of code transformation scenarios,
25
+ particularly for numerically-intensive algorithms that benefit from just-in-time compilation.
26
+ """
1
27
  from mapFolding.someAssemblyRequired.transformationTools import (
2
28
  ast_Identifier as ast_Identifier,
3
29
  extractClassDef as extractClassDef,
@@ -5,6 +31,7 @@ from mapFolding.someAssemblyRequired.transformationTools import (
5
31
  ifThis as ifThis,
6
32
  IngredientsFunction as IngredientsFunction,
7
33
  IngredientsModule as IngredientsModule,
34
+ inlineThisFunctionWithTheseValues as inlineThisFunctionWithTheseValues,
8
35
  LedgerOfImports as LedgerOfImports,
9
36
  Make as Make,
10
37
  makeDictionaryReplacementStatements as makeDictionaryReplacementStatements,
@@ -13,5 +40,7 @@ from mapFolding.someAssemblyRequired.transformationTools import (
13
40
  RecipeSynthesizeFlow as RecipeSynthesizeFlow,
14
41
  strDotStrCuzPyStoopid as strDotStrCuzPyStoopid,
15
42
  Then as Then,
43
+ write_astModule as write_astModule,
16
44
  Z0Z_executeActionUnlessDescendantMatches as Z0Z_executeActionUnlessDescendantMatches,
45
+ Z0Z_replaceMatchingASTnodes as Z0Z_replaceMatchingASTnodes,
17
46
  )
@@ -1,20 +1,38 @@
1
+ """
2
+ Utility for extracting LLVM IR from compiled Python modules.
3
+
4
+ This module provides functionality to extract and save the LLVM Intermediate Representation (IR)
5
+ generated when Numba compiles Python functions. It implements a simple interface that:
6
+
7
+ 1. Imports a specified Python module from its file path
8
+ 2. Extracts the LLVM IR from a specified function within that module
9
+ 3. Writes the IR to a file with the same base name but with the .ll extension
10
+
11
+ The extracted LLVM IR can be valuable for debugging, optimization analysis, or educational
12
+ purposes, as it provides a view into how high-level Python code is translated into
13
+ lower-level representations for machine execution.
14
+
15
+ While originally part of a tighter integration with the code generation pipeline,
16
+ this module now operates as a standalone utility that can be applied to any module
17
+ containing Numba-compiled functions.
18
+ """
1
19
  from importlib.machinery import ModuleSpec
20
+ from pathlib import Path
2
21
  from types import ModuleType
3
22
  import importlib.util
4
23
  import llvmlite.binding
5
- import pathlib
6
24
 
7
- def writeModuleLLVM(pathFilename: pathlib.Path, identifierCallable: str) -> pathlib.Path:
8
- """Import the generated module directly and get its LLVM IR."""
9
- specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
10
- if specTarget is None or specTarget.loader is None:
11
- raise ImportError(f"Could not create module spec or loader for {pathFilename}")
12
- moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
13
- specTarget.loader.exec_module(moduleTarget)
25
+ def writeModuleLLVM(pathFilename: Path, identifierCallable: str) -> Path:
26
+ """Import the generated module directly and get its LLVM IR."""
27
+ specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
28
+ if specTarget is None or specTarget.loader is None:
29
+ raise ImportError(f"Could not create module spec or loader for {pathFilename}")
30
+ moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
31
+ specTarget.loader.exec_module(moduleTarget)
14
32
 
15
- # Get LLVM IR and write to file
16
- linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
17
- moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
18
- pathFilenameLLVM: pathlib.Path = pathFilename.with_suffix(".ll")
19
- pathFilenameLLVM.write_text(str(moduleLLVM))
20
- return pathFilenameLLVM
33
+ # Get LLVM IR and write to file
34
+ linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
35
+ moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
36
+ pathFilenameLLVM: Path = pathFilename.with_suffix(".ll")
37
+ pathFilenameLLVM.write_text(str(moduleLLVM))
38
+ return pathFilenameLLVM
@@ -1,7 +1,28 @@
1
+ """
2
+ Numba-specific ingredients for optimized code generation.
3
+
4
+ This module provides specialized tools, constants, and types specifically designed
5
+ for transforming Python code into Numba-accelerated implementations. It implements:
6
+
7
+ 1. A range of Numba jit decorator configurations for different optimization scenarios
8
+ 2. Functions to identify and manipulate Numba decorators in abstract syntax trees
9
+ 3. Utilities for applying appropriate Numba typing to transformed code
10
+ 4. Parameter management for Numba compilation options
11
+
12
+ The configurations range from conservative options that prioritize compatibility and
13
+ error detection to aggressive optimizations that maximize performance at the cost of
14
+ flexibility. While this module specifically targets Numba, its design follows the pattern
15
+ of generic code transformation tools in the package, allowing similar approaches to be
16
+ applied to other acceleration technologies.
17
+
18
+ This module works in conjunction with transformation tools to convert the general-purpose
19
+ algorithm implementation into a highly-optimized Numba version.
20
+ """
21
+
1
22
  from collections.abc import Callable, Sequence
2
23
  from mapFolding.someAssemblyRequired import ifThis, IngredientsFunction, Make
3
24
  from numba.core.compiler import CompilerBase as numbaCompilerBase
4
- from typing import Any, TYPE_CHECKING, Final, cast
25
+ from typing import Any, cast, Final, TYPE_CHECKING
5
26
  import ast
6
27
 
7
28
  try:
@@ -0,0 +1,196 @@
1
+ """
2
+ Orchestrator for generating Numba-optimized versions of the map folding algorithm.
3
+
4
+ This module transforms the pure Python implementation of the map folding algorithm
5
+ into a highly-optimized Numba implementation. It serves as the high-level coordinator
6
+ for the code transformation process, orchestrating the following steps:
7
+
8
+ 1. Extracting the core algorithm functions from the source implementation
9
+ 2. Transforming function signatures and state handling for Numba compatibility
10
+ 3. Converting state-based operations to direct primitive operations
11
+ 4. Applying Numba decorators with appropriate optimization parameters
12
+ 5. Managing imports and dependencies for the generated code
13
+ 6. Assembling and writing the transformed implementation
14
+
15
+ The transformation process preserves the algorithm's logic while dramatically improving
16
+ performance by leveraging Numba's just-in-time compilation capabilities. This module
17
+ depends on the abstract transformation tools, dataclass handling utilities, and
18
+ Numba-specific optimization configurations from other modules in the package.
19
+
20
+ The primary entry point is the makeNumbaFlow function, which can be executed directly
21
+ to generate a fresh optimized implementation.
22
+ """
23
+
24
+ from mapFolding.someAssemblyRequired import (
25
+ extractFunctionDef,
26
+ ifThis,
27
+ IngredientsFunction,
28
+ IngredientsModule,
29
+ LedgerOfImports,
30
+ Make,
31
+ makeDictionaryReplacementStatements,
32
+ NodeCollector,
33
+ NodeReplacer,
34
+ RecipeSynthesizeFlow,
35
+ Then,
36
+ write_astModule,
37
+ Z0Z_replaceMatchingASTnodes,
38
+ inlineThisFunctionWithTheseValues,
39
+ )
40
+ from mapFolding.someAssemblyRequired.ingredientsNumba import decorateCallableWithNumba
41
+ from mapFolding.someAssemblyRequired.transformDataStructures import shatter_dataclassesDOTdataclass
42
+ from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
43
+ import ast
44
+
45
+ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> None:
46
+ """
47
+ Think about a better organization of this function.
48
+
49
+ Currently, transform `Callable` in order:
50
+ sourceDispatcherCallable
51
+ sourceInitializeCallable
52
+ sourceParallelCallable
53
+ sourceSequentialCallable
54
+
55
+ But, it should be organized around each transformation. So, when the parameters of `sourceSequentialCallable`
56
+ are transformed, for example, the statement in `sourceDispatcherCallable` that calls `sourceSequentialCallable` should be
57
+ transformed at the same time: literally in the same function-or-NodeReplacer-or-subroutine. That would help
58
+ avoid bugs.
59
+
60
+ Furthermore, if the above example transformation requires unpacking the dataclass, for example, then the unpacking
61
+ would be automatically triggered. I have no idea how that would happen, but the transformations are highly predictable,
62
+ so using a programming language to construct if-this-then-that cascades shouldn't be a problem, you know?
63
+
64
+ # TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
65
+ """
66
+ dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
67
+ # TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
68
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
69
+
70
+ # ===========================================================
71
+ sourcePython = numbaFlow.sourceDispatcherCallable
72
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
73
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
74
+ ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
75
+
76
+ # sourceParallelCallable
77
+ shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
78
+ ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
79
+
80
+ # TODO remove hardcoding
81
+ namespaceHARDCODED = 'concurrencyManager'
82
+ identifierHARDCODED = 'submit'
83
+ sourceNamespace = namespaceHARDCODED
84
+ sourceIdentifier = identifierHARDCODED
85
+ NodeReplacer(
86
+ findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
87
+ , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
88
+ ).visit(ingredientsDispatcher.astFunctionDef)
89
+ NodeReplacer(
90
+ findThis = ifThis.isCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
91
+ , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(sourceNamespace), sourceIdentifier)
92
+ , listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
93
+ ).visit(ingredientsDispatcher.astFunctionDef)
94
+
95
+ CapturedAssign: list[ast.AST] = []
96
+ CapturedCall: list[ast.Call] = []
97
+ findThis = ifThis.isCall
98
+ doThat = [Then.appendTo(CapturedCall)]
99
+ capture = NodeCollector(findThis, doThat)
100
+
101
+ NodeCollector(
102
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
103
+ , doThat = [Then.appendTo(CapturedAssign)
104
+ , lambda node: capture.visit(node)]
105
+ ).visit(ingredientsDispatcher.astFunctionDef)
106
+
107
+ newAssign = CapturedAssign[0]
108
+ NodeReplacer(
109
+ findThis = lambda node: ifThis.isSubscript(node) and ifThis.isAttribute(node.value) and ifThis.isCall(node.value.value)
110
+ , doThat = Then.replaceWith(CapturedCall[0])
111
+ ).visit(newAssign)
112
+
113
+ NodeReplacer(
114
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
115
+ , doThat = Then.replaceWith(newAssign)
116
+ ).visit(ingredientsDispatcher.astFunctionDef)
117
+
118
+ # sourceSequentialCallable
119
+ shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstance)
120
+
121
+ ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
122
+
123
+ NodeReplacer(
124
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
125
+ , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
126
+ ).visit(ingredientsDispatcher.astFunctionDef)
127
+ NodeReplacer(
128
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
129
+ , doThat = Then.insertThisBelow([shatteredDataclass.astAssignDataclassRepack])
130
+ ).visit(ingredientsDispatcher.astFunctionDef)
131
+ NodeReplacer(
132
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
133
+ , doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
134
+ ).visit(ingredientsDispatcher.astFunctionDef)
135
+
136
+ # ===========================================================
137
+ sourcePython = numbaFlow.sourceInitializeCallable
138
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
139
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
140
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
141
+ ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
142
+
143
+ # ===========================================================
144
+ sourcePython = numbaFlow.sourceParallelCallable
145
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
146
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
147
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
148
+ ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
149
+ ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
150
+ ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
151
+ NodeReplacer(
152
+ findThis = ifThis.isReturn
153
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
154
+ ).visit(ingredientsParallel.astFunctionDef)
155
+
156
+ NodeReplacer(
157
+ findThis = ifThis.isReturn
158
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.countingVariableName))
159
+ ).visit(ingredientsParallel.astFunctionDef)
160
+ ingredientsParallel.astFunctionDef.returns = shatteredDataclass.countingVariableAnnotation
161
+ replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
162
+ ingredientsParallel.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsParallel.astFunctionDef, replacementMap) # type: ignore
163
+ ingredientsParallel = decorateCallableWithNumba(ingredientsParallel)
164
+
165
+ # ===========================================================
166
+ sourcePython = numbaFlow.sourceSequentialCallable
167
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
168
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
169
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
170
+ ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
171
+ ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
172
+ ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
173
+ NodeReplacer(
174
+ findThis = ifThis.isReturn
175
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
176
+ ).visit(ingredientsSequential.astFunctionDef)
177
+ NodeReplacer(
178
+ findThis = ifThis.isReturn
179
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
180
+ ).visit(ingredientsSequential.astFunctionDef)
181
+ ingredientsSequential.astFunctionDef.returns = shatteredDataclass.astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
182
+ replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
183
+ ingredientsSequential.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
184
+ ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
185
+
186
+ # ===========================================================
187
+ ingredientsModuleNumbaUnified = IngredientsModule(
188
+ ingredientsFunction=[ingredientsInitialize,
189
+ ingredientsParallel,
190
+ ingredientsSequential,
191
+ ingredientsDispatcher], imports=LedgerOfImports(numbaFlow.source_astModule))
192
+
193
+ write_astModule(ingredientsModuleNumbaUnified, numbaFlow.pathFilenameDispatcher, numbaFlow.packageName)
194
+
195
+ if __name__ == '__main__':
196
+ makeNumbaFlow()
@@ -2,8 +2,7 @@
2
2
  from collections.abc import Sequence
3
3
  from typing import Any, cast, TYPE_CHECKING
4
4
  from mapFolding.filesystem import getFilenameFoldsTotal, getPathFilenameFoldsTotal
5
- from mapFolding.someAssemblyRequired import ( ifThis, Make, NodeReplacer, Then, )
6
- from mapFolding.someAssemblyRequired.transformationTools import LedgerOfImports
5
+ from mapFolding.someAssemblyRequired import ( ifThis, LedgerOfImports, Make, NodeReplacer, Then, )
7
6
  from mapFolding.theSSOT import ( ComputationState, raiseIfNoneGitHubIssueNumber3, getPathJobRootDEFAULT, )
8
7
  from os import PathLike
9
8
  from pathlib import Path
@@ -16,7 +15,7 @@ import copy
16
15
  import inspect
17
16
  import numpy
18
17
  if TYPE_CHECKING:
19
- from mapFolding.someAssemblyRequired.synthesizeDataConverters import makeStateJob
18
+ from mapFolding.someAssemblyRequired.transformDataStructures import makeStateJobOUTDATED
20
19
  from mapFolding.someAssemblyRequired.ingredientsNumba import thisIsNumbaDotJit, decorateCallableWithNumba
21
20
  from mapFolding.someAssemblyRequired.ingredientsNumba import ParametersNumba, parametersNumbaDEFAULT
22
21
 
@@ -291,7 +290,7 @@ def writeJobNumba(mapShape: Sequence[int], algorithmSource: ModuleType, callable
291
290
  """
292
291
 
293
292
  # NOTE get the raw ingredients: data and the algorithm
294
- stateJob = makeStateJob(mapShape, writeJob=False, **keywordArguments)
293
+ stateJob = makeStateJobOUTDATED(mapShape, writeJob=False, **keywordArguments)
295
294
  pythonSource: str = inspect.getsource(algorithmSource)
296
295
  astModule: ast.Module = ast.parse(pythonSource)
297
296
  setFunctionDef: set[ast.FunctionDef] = {statement for statement in astModule.body if isinstance(statement, ast.FunctionDef)}
@@ -0,0 +1,162 @@
1
+ """
2
+ Utilities for transforming complex data structures in Python code generation.
3
+
4
+ This module provides specialized tools for working with structured data types during
5
+ the code transformation process, with a particular focus on handling dataclasses. It
6
+ implements functionality that enables:
7
+
8
+ 1. Decomposing dataclasses into individual fields for efficient processing
9
+ 2. Creating optimized parameter passing for transformed functions
10
+ 3. Converting between different representations of data structures
11
+ 4. Serializing and deserializing computation state objects
12
+
13
+ The core functionality revolves around the "shattering" process that breaks down
14
+ a dataclass into its constituent components, making each field individually accessible
15
+ for code generation and optimization purposes. This dataclass handling is critical for
16
+ transforming algorithms that operate on unified state objects into optimized implementations
17
+ that work with primitive types directly.
18
+
19
+ While developed for transforming map folding computation state objects, the utilities are
20
+ designed to be applicable to various data structure transformation scenarios.
21
+ """
22
+
23
+ from collections.abc import Sequence
24
+ from importlib import import_module
25
+ from inspect import getsource as inspect_getsource
26
+ from mapFolding.beDRY import outfitCountFolds, validateListDimensions
27
+ from mapFolding.filesystem import getPathFilenameFoldsTotal
28
+ from mapFolding.someAssemblyRequired import (
29
+ ast_Identifier,
30
+ extractClassDef,
31
+ ifThis,
32
+ LedgerOfImports,
33
+ Make,
34
+ NodeCollector,
35
+ strDotStrCuzPyStoopid,
36
+ Then,
37
+ Z0Z_executeActionUnlessDescendantMatches,
38
+ )
39
+ from mapFolding.theSSOT import ComputationState, getSourceAlgorithm
40
+ from pathlib import Path
41
+ from types import ModuleType
42
+ from typing import Any, Literal, overload
43
+ import ast
44
+ import dataclasses
45
+ import pickle
46
+
47
+ # Would `LibCST` be better than `ast` in some cases? https://github.com/hunterhogan/mapFolding/issues/7
48
+
49
+ countingIdentifierHARDCODED = 'groupsOfFolds'
50
+
51
+ @dataclasses.dataclass
52
+ class ShatteredDataclass:
53
+ astAssignDataclassRepack: ast.Assign
54
+ astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns: ast.Subscript
55
+ astTuple4AssignTargetsToFragments: ast.Tuple
56
+ countingVariableAnnotation: ast.expr
57
+ countingVariableName: ast.Name
58
+ ledgerDataclassANDFragments: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
59
+ list_ast_argAnnotated4ArgumentsSpecification: list[ast.arg] = dataclasses.field(default_factory=list)
60
+ list_keyword4DataclassInitialization: list[ast.keyword] = dataclasses.field(default_factory=list)
61
+ listAnnAssign4DataclassUnpack: list[ast.AnnAssign] = dataclasses.field(default_factory=list)
62
+ listAnnotations: list[ast.expr] = dataclasses.field(default_factory=list)
63
+ listNameDataclassFragments4Parameters: list[ast.Name] = dataclasses.field(default_factory=list)
64
+
65
+ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier) -> ShatteredDataclass:
66
+ """
67
+ Parameters:
68
+ logicalPathModule: gimme string cuz python is stoopid
69
+ dataclass_Identifier: The identifier of the dataclass to be dismantled.
70
+ instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
71
+ """
72
+ module: ast.Module = ast.parse(inspect_getsource(import_module(logicalPathModule)))
73
+ astName_dataclassesDOTdataclass = Make.astName(dataclass_Identifier)
74
+
75
+ dataclass = extractClassDef(dataclass_Identifier, module)
76
+ if not isinstance(dataclass, ast.ClassDef):
77
+ raise ValueError(f"I could not find {dataclass_Identifier=} in {logicalPathModule=}.")
78
+
79
+ ledgerDataclassANDFragments = LedgerOfImports()
80
+ list_ast_argAnnotated4ArgumentsSpecification: list[ast.arg] = []
81
+ list_keyword4DataclassInitialization: list[ast.keyword] = []
82
+ listAnnAssign4DataclassUnpack: list[ast.AnnAssign] = []
83
+ listAnnotations: list[ast.expr] = []
84
+ listNameDataclassFragments4Parameters: list[ast.Name] = []
85
+
86
+ # TODO get the value from `groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})`
87
+ countingVariable = countingIdentifierHARDCODED
88
+
89
+ addToLedgerPredicate = ifThis.isAnnAssignAndAnnotationIsName
90
+ addToLedgerAction = Then.Z0Z_ledger(logicalPathModule, ledgerDataclassANDFragments)
91
+ addToLedger = NodeCollector(addToLedgerPredicate, [addToLedgerAction])
92
+
93
+ exclusionPredicate = ifThis.is_keyword_IdentifierEqualsConstantValue('init', False)
94
+ appendKeywordAction = Then.Z0Z_appendKeywordMirroredTo(list_keyword4DataclassInitialization)
95
+ filteredAppendKeywordAction = Z0Z_executeActionUnlessDescendantMatches(exclusionPredicate, appendKeywordAction) # type: ignore
96
+
97
+ NodeCollector(
98
+ ifThis.isAnnAssignAndTargetIsName,
99
+ [Then.Z0Z_appendAnnAssignOf_nameDOTnameTo(instance_Identifier, listAnnAssign4DataclassUnpack)
100
+ , Then.append_targetTo(listNameDataclassFragments4Parameters) # type: ignore
101
+ , lambda node: addToLedger.visit(node)
102
+ , filteredAppendKeywordAction
103
+ , lambda node: list_ast_argAnnotated4ArgumentsSpecification.append(Make.ast_arg(node.target.id, node.annotation)) # type: ignore
104
+ , lambda node: listAnnotations.append(node.annotation) # type: ignore
105
+ ]
106
+ ).visit(dataclass)
107
+
108
+ shatteredDataclass = ShatteredDataclass(
109
+ astAssignDataclassRepack = Make.astAssign(listTargets=[Make.astName(instance_Identifier)], value=Make.astCall(astName_dataclassesDOTdataclass, list_astKeywords=list_keyword4DataclassInitialization))
110
+ , astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns = Make.astSubscript(Make.astName('tuple'), Make.astTuple(listAnnotations))
111
+ , astTuple4AssignTargetsToFragments = Make.astTuple(listNameDataclassFragments4Parameters, ast.Store())
112
+ , countingVariableAnnotation = next(ast_arg.annotation for ast_arg in list_ast_argAnnotated4ArgumentsSpecification if ast_arg.arg == countingVariable) or Make.astName('Any')
113
+ , countingVariableName = Make.astName(countingVariable)
114
+ , ledgerDataclassANDFragments = ledgerDataclassANDFragments
115
+ , list_ast_argAnnotated4ArgumentsSpecification = list_ast_argAnnotated4ArgumentsSpecification
116
+ , list_keyword4DataclassInitialization = list_keyword4DataclassInitialization
117
+ , listAnnAssign4DataclassUnpack = listAnnAssign4DataclassUnpack
118
+ , listAnnotations = listAnnotations
119
+ , listNameDataclassFragments4Parameters = listNameDataclassFragments4Parameters
120
+ )
121
+
122
+ shatteredDataclass.ledgerDataclassANDFragments.addImportFromStr(logicalPathModule, dataclass_Identifier)
123
+ return shatteredDataclass
124
+
125
+ @overload
126
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[True], **keywordArguments: Any) -> Path: ...
127
+ @overload
128
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[False], **keywordArguments: Any) -> ComputationState: ...
129
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: bool = True, **keywordArguments: Any) -> ComputationState | Path:
130
+ """
131
+ Creates a computation state job for map folding calculations and optionally saves it to disk.
132
+
133
+ This function initializes a computation state for map folding calculations based on the given dimensions,
134
+ sets up the initial counting configuration, and can optionally save the state to a pickle file.
135
+
136
+ Parameters:
137
+ listDimensions: List of integers representing the dimensions of the map to be folded.
138
+ writeJob (True): Whether to save the state to disk.
139
+ **keywordArguments: Additional keyword arguments to pass to the computation state initialization.
140
+
141
+ Returns:
142
+ stateUniversal|pathFilenameJob: The computation state for the map folding calculations, or
143
+ the path to the saved state file if writeJob is True.
144
+ """
145
+ mapShape = validateListDimensions(listDimensions)
146
+ stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
147
+
148
+ moduleSource: ModuleType = getSourceAlgorithm()
149
+ # TODO `countInitialize` is hardcoded
150
+ stateUniversal = moduleSource.countInitialize(stateUniversal)
151
+
152
+ if not writeJob:
153
+ return stateUniversal
154
+
155
+ pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal.mapShape, None)
156
+ suffix = pathFilenameChopChop.suffix
157
+ pathJob = Path(str(pathFilenameChopChop)[0:-len(suffix)])
158
+ pathJob.mkdir(parents=True, exist_ok=True)
159
+ pathFilenameJob = pathJob / 'stateJob.pkl'
160
+
161
+ pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
162
+ return pathFilenameJob