mapFolding 0.8.1__py3-none-any.whl → 0.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. mapFolding/__init__.py +5 -1
  2. mapFolding/basecamp.py +2 -2
  3. mapFolding/beDRY.py +24 -31
  4. mapFolding/oeis.py +2 -2
  5. mapFolding/reference/__init__.py +45 -0
  6. mapFolding/reference/flattened.py +20 -2
  7. mapFolding/reference/hunterNumba.py +24 -0
  8. mapFolding/reference/irvineJavaPort.py +12 -0
  9. mapFolding/reference/{jax.py → jaxCount.py} +46 -27
  10. mapFolding/reference/jobsCompleted/[2x19]/p2x19.py +197 -0
  11. mapFolding/reference/jobsCompleted/__init__.py +50 -0
  12. mapFolding/reference/jobsCompleted/p2x19/p2x19.py +29 -0
  13. mapFolding/reference/lunnanNumpy.py +16 -1
  14. mapFolding/reference/lunnanWhile.py +15 -1
  15. mapFolding/reference/rotatedEntryPoint.py +18 -0
  16. mapFolding/reference/total_countPlus1vsPlusN.py +226 -203
  17. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +20 -1
  18. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +52 -37
  19. mapFolding/someAssemblyRequired/transformDataStructures.py +11 -5
  20. mapFolding/someAssemblyRequired/transformationTools.py +40 -42
  21. mapFolding/syntheticModules/__init__.py +1 -0
  22. mapFolding/theSSOT.py +69 -127
  23. {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/METADATA +56 -31
  24. mapfolding-0.8.3.dist-info/RECORD +43 -0
  25. {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/WHEEL +1 -1
  26. tests/conftest.py +43 -33
  27. tests/test_computations.py +7 -7
  28. tests/test_other.py +5 -4
  29. mapfolding-0.8.1.dist-info/RECORD +0 -39
  30. {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/entry_points.txt +0 -0
  31. {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/licenses/LICENSE +0 -0
  32. {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/top_level.txt +0 -0
@@ -1,211 +1,234 @@
1
+ """
2
+ Comparison of two nearly identical counting implementations with vastly different performance.
3
+
4
+ This file provides a direct comparison between two variants of the map folding algorithm
5
+ that differ only in their approach to incrementing the folding counter. Despite their apparent
6
+ similarity, one implementation demonstrates orders of magnitude better performance than the other.
7
+
8
+ Key characteristics:
9
+ - Both implementations use Numba for performance optimization
10
+ - Both use identical data structures and array initializations
11
+ - `foldings_plus_1`: Increments the counter by 1 for each valid folding
12
+ - `foldings`: Increments the counter by n (total leaves) when certain conditions are met
13
+
14
+ The performance difference illustrates how subtle algorithmic changes can dramatically
15
+ impact computational efficiency, even when the overall algorithm structure remains unchanged.
16
+ This example serves as a compelling demonstration of the importance of algorithm analysis
17
+ and optimization for combinatorial problems.
18
+
19
+ Note: These functions are isolated for educational purposes to highlight the specific
20
+ optimization technique. The main package uses more comprehensive optimizations derived
21
+ from this and other lessons.
22
+ """
23
+
1
24
  from numba import njit
2
25
  import numpy
3
26
 
4
27
  @njit(cache=True)
5
28
  def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
6
- n: int = 1 # Total number of leaves
7
- for dimension in p:
8
- n *= dimension
9
-
10
- d = len(p) # Number of dimensions
11
- # Compute arrays P, C, D as per the algorithm
12
- P = numpy.ones(d + 1, dtype=numpy.int64)
13
- for i in range(1, d + 1):
14
- P[i] = P[i - 1] * p[i - 1]
15
-
16
- # C[i][m] holds the i-th coordinate of leaf m
17
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
18
- for i in range(1, d + 1):
19
- for m in range(1, n + 1):
20
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
21
-
22
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
23
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
24
- for i in range(1, d + 1):
25
- for l in range(1, n + 1):
26
- for m in range(1, l + 1):
27
- delta = C[i][l] - C[i][m]
28
- if delta % 2 == 0:
29
- # If delta is even
30
- if C[i][m] == 1:
31
- D[i][l][m] = m
32
- else:
33
- D[i][l][m] = m - P[i - 1]
34
- else:
35
- # If delta is odd
36
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
37
- D[i][l][m] = m
38
- else:
39
- D[i][l][m] = m + P[i - 1]
40
- # Initialize arrays/lists
41
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
42
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
43
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
44
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
45
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
46
-
47
-
48
- # Initialize variables for backtracking
49
- total_count = 0 # Total number of foldings
50
- g = 0 # Gap index
51
- l = 1 # Current leaf
52
-
53
- # Start backtracking loop
54
- while l > 0:
55
- # If we have processed all leaves, increment total count
56
- if l > n:
57
- total_count += 1
58
- else:
59
- dd = 0 # Number of sections where leaf l is unconstrained
60
- gg = g # Temporary gap index
61
- g = gapter[l - 1] # Reset gap index for current leaf
62
-
63
- # Count possible gaps for leaf l in each section
64
- for i in range(1, d + 1):
65
- if D[i][l][l] == l:
66
- dd += 1
67
- else:
68
- m = D[i][l][l]
69
- while m != l:
70
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
71
- gap[gg] = m
72
- if count[m] == 0:
73
- gg += 1
74
- count[m] += 1
75
- m = D[i][l][B[m]]
76
-
77
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
78
- if dd == d:
79
- for m in range(l):
80
- gap[gg] = m
81
- gg += 1
82
-
83
- # Filter gaps that are common to all sections
84
- for j in range(g, gg):
85
- gap[g] = gap[j]
86
- if count[gap[j]] == d - dd:
87
- g += 1
88
- count[gap[j]] = 0 # Reset count for next iteration
89
-
90
- # Recursive backtracking steps
91
- while l > 0 and g == gapter[l - 1]:
92
- l -= 1
93
- B[A[l]] = B[l]
94
- A[B[l]] = A[l]
95
-
96
- if l > 0:
97
- g -= 1
98
- A[l] = gap[g]
99
- B[l] = B[A[l]]
100
- B[A[l]] = l
101
- A[B[l]] = l
102
- gapter[l] = g # Save current gap index
103
- l += 1 # Move to next leaf
104
-
105
- return total_count
29
+ n: int = 1 # Total number of leaves
30
+ for dimension in p:
31
+ n *= dimension
32
+
33
+ d = len(p) # Number of dimensions
34
+ # Compute arrays P, C, D as per the algorithm
35
+ P = numpy.ones(d + 1, dtype=numpy.int64)
36
+ for i in range(1, d + 1):
37
+ P[i] = P[i - 1] * p[i - 1]
38
+
39
+ # C[i][m] holds the i-th coordinate of leaf m
40
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
41
+ for i in range(1, d + 1):
42
+ for m in range(1, n + 1):
43
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
44
+
45
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
46
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
47
+ for i in range(1, d + 1):
48
+ for l in range(1, n + 1):
49
+ for m in range(1, l + 1):
50
+ delta = C[i][l] - C[i][m]
51
+ if delta % 2 == 0:
52
+ # If delta is even
53
+ if C[i][m] == 1:
54
+ D[i][l][m] = m
55
+ else:
56
+ D[i][l][m] = m - P[i - 1]
57
+ else:
58
+ # If delta is odd
59
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
60
+ D[i][l][m] = m
61
+ else:
62
+ D[i][l][m] = m + P[i - 1]
63
+ # Initialize arrays/lists
64
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
65
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
66
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
67
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
68
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
69
+
70
+
71
+ # Initialize variables for backtracking
72
+ total_count = 0 # Total number of foldings
73
+ g = 0 # Gap index
74
+ l = 1 # Current leaf
75
+
76
+ # Start backtracking loop
77
+ while l > 0:
78
+ # If we have processed all leaves, increment total count
79
+ if l > n:
80
+ total_count += 1
81
+ else:
82
+ dd = 0 # Number of sections where leaf l is unconstrained
83
+ gg = g # Temporary gap index
84
+ g = gapter[l - 1] # Reset gap index for current leaf
85
+
86
+ # Count possible gaps for leaf l in each section
87
+ for i in range(1, d + 1):
88
+ if D[i][l][l] == l:
89
+ dd += 1
90
+ else:
91
+ m = D[i][l][l]
92
+ while m != l:
93
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
94
+ gap[gg] = m
95
+ if count[m] == 0:
96
+ gg += 1
97
+ count[m] += 1
98
+ m = D[i][l][B[m]]
99
+
100
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
101
+ if dd == d:
102
+ for m in range(l):
103
+ gap[gg] = m
104
+ gg += 1
105
+
106
+ # Filter gaps that are common to all sections
107
+ for j in range(g, gg):
108
+ gap[g] = gap[j]
109
+ if count[gap[j]] == d - dd:
110
+ g += 1
111
+ count[gap[j]] = 0 # Reset count for next iteration
112
+
113
+ # Recursive backtracking steps
114
+ while l > 0 and g == gapter[l - 1]:
115
+ l -= 1
116
+ B[A[l]] = B[l]
117
+ A[B[l]] = A[l]
118
+
119
+ if l > 0:
120
+ g -= 1
121
+ A[l] = gap[g]
122
+ B[l] = B[A[l]]
123
+ B[A[l]] = l
124
+ A[B[l]] = l
125
+ gapter[l] = g # Save current gap index
126
+ l += 1 # Move to next leaf
127
+
128
+ return total_count
106
129
 
107
130
  @njit(cache=True)
108
131
  def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
109
- n: int = 1 # Total number of leaves
110
- for dimension in p:
111
- n *= dimension
112
-
113
- d = len(p) # Number of dimensions
114
- # Compute arrays P, C, D as per the algorithm
115
- P = numpy.ones(d + 1, dtype=numpy.int64)
116
- for i in range(1, d + 1):
117
- P[i] = P[i - 1] * p[i - 1]
118
-
119
- # C[i][m] holds the i-th coordinate of leaf m
120
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
121
- for i in range(1, d + 1):
122
- for m in range(1, n + 1):
123
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
124
- # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
125
-
126
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
127
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
128
- for i in range(1, d + 1):
129
- for l in range(1, n + 1):
130
- for m in range(1, l + 1):
131
- delta = C[i][l] - C[i][m]
132
- if delta % 2 == 0:
133
- # If delta is even
134
- if C[i][m] == 1:
135
- D[i][l][m] = m
136
- else:
137
- D[i][l][m] = m - P[i - 1]
138
- else:
139
- # If delta is odd
140
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
141
- D[i][l][m] = m
142
- else:
143
- D[i][l][m] = m + P[i - 1]
144
- # Initialize arrays/lists
145
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
146
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
147
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
148
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
149
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
150
-
151
-
152
- # Initialize variables for backtracking
153
- total_count = 0 # Total number of foldings
154
- g = 0 # Gap index
155
- l = 1 # Current leaf
156
-
157
- # Start backtracking loop
158
- while l > 0:
159
- if l <= 1 or B[0] == 1: # NOTE different
160
- # NOTE the above `if` statement encloses the the if/else block below
161
- # NOTE these changes increase the throughput by more than an order of magnitude
162
- if l > n:
163
- total_count += n
164
- else:
165
- dd = 0 # Number of sections where leaf l is unconstrained
166
- gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
167
- g = gg # NOTE different, but not important
168
-
169
- # Count possible gaps for leaf l in each section
170
- for i in range(1, d + 1):
171
- if D[i][l][l] == l:
172
- dd += 1
173
- else:
174
- m = D[i][l][l]
175
- while m != l:
176
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
177
- gap[gg] = m
178
- if count[m] == 0:
179
- gg += 1
180
- count[m] += 1
181
- m = D[i][l][B[m]]
182
-
183
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
184
- if dd == d:
185
- for m in range(l):
186
- gap[gg] = m
187
- gg += 1
188
-
189
- # Filter gaps that are common to all sections
190
- for j in range(g, gg):
191
- gap[g] = gap[j]
192
- if count[gap[j]] == d - dd:
193
- g += 1
194
- count[gap[j]] = 0 # Reset count for next iteration
195
-
196
- # Recursive backtracking steps
197
- while l > 0 and g == gapter[l - 1]:
198
- l -= 1
199
- B[A[l]] = B[l]
200
- A[B[l]] = A[l]
201
-
202
- if l > 0:
203
- g -= 1
204
- A[l] = gap[g]
205
- B[l] = B[A[l]]
206
- B[A[l]] = l
207
- A[B[l]] = l
208
- gapter[l] = g # Save current gap index
209
- l += 1 # Move to next leaf
210
-
211
- return total_count
132
+ n: int = 1 # Total number of leaves
133
+ for dimension in p:
134
+ n *= dimension
135
+
136
+ d = len(p) # Number of dimensions
137
+ # Compute arrays P, C, D as per the algorithm
138
+ P = numpy.ones(d + 1, dtype=numpy.int64)
139
+ for i in range(1, d + 1):
140
+ P[i] = P[i - 1] * p[i - 1]
141
+
142
+ # C[i][m] holds the i-th coordinate of leaf m
143
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
144
+ for i in range(1, d + 1):
145
+ for m in range(1, n + 1):
146
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
147
+ # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
148
+
149
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
150
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
151
+ for i in range(1, d + 1):
152
+ for l in range(1, n + 1):
153
+ for m in range(1, l + 1):
154
+ delta = C[i][l] - C[i][m]
155
+ if delta % 2 == 0:
156
+ # If delta is even
157
+ if C[i][m] == 1:
158
+ D[i][l][m] = m
159
+ else:
160
+ D[i][l][m] = m - P[i - 1]
161
+ else:
162
+ # If delta is odd
163
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
164
+ D[i][l][m] = m
165
+ else:
166
+ D[i][l][m] = m + P[i - 1]
167
+ # Initialize arrays/lists
168
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
169
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
170
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
171
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
172
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
173
+
174
+
175
+ # Initialize variables for backtracking
176
+ total_count = 0 # Total number of foldings
177
+ g = 0 # Gap index
178
+ l = 1 # Current leaf
179
+
180
+ # Start backtracking loop
181
+ while l > 0:
182
+ if l <= 1 or B[0] == 1: # NOTE different
183
+ # NOTE the above `if` statement encloses the the if/else block below
184
+ # NOTE these changes increase the throughput by more than an order of magnitude
185
+ if l > n:
186
+ total_count += n
187
+ else:
188
+ dd = 0 # Number of sections where leaf l is unconstrained
189
+ gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
190
+ g = gg # NOTE different, but not important
191
+
192
+ # Count possible gaps for leaf l in each section
193
+ for i in range(1, d + 1):
194
+ if D[i][l][l] == l:
195
+ dd += 1
196
+ else:
197
+ m = D[i][l][l]
198
+ while m != l:
199
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
200
+ gap[gg] = m
201
+ if count[m] == 0:
202
+ gg += 1
203
+ count[m] += 1
204
+ m = D[i][l][B[m]]
205
+
206
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
207
+ if dd == d:
208
+ for m in range(l):
209
+ gap[gg] = m
210
+ gg += 1
211
+
212
+ # Filter gaps that are common to all sections
213
+ for j in range(g, gg):
214
+ gap[g] = gap[j]
215
+ if count[gap[j]] == d - dd:
216
+ g += 1
217
+ count[gap[j]] = 0 # Reset count for next iteration
218
+
219
+ # Recursive backtracking steps
220
+ while l > 0 and g == gapter[l - 1]:
221
+ l -= 1
222
+ B[A[l]] = B[l]
223
+ A[B[l]] = A[l]
224
+
225
+ if l > 0:
226
+ g -= 1
227
+ A[l] = gap[g]
228
+ B[l] = B[A[l]]
229
+ B[A[l]] = l
230
+ A[B[l]] = l
231
+ gapter[l] = g # Save current gap index
232
+ l += 1 # Move to next leaf
233
+
234
+ return total_count
@@ -12,6 +12,14 @@ The extracted LLVM IR can be valuable for debugging, optimization analysis, or e
12
12
  purposes, as it provides a view into how high-level Python code is translated into
13
13
  lower-level representations for machine execution.
14
14
 
15
+ Example of successful use:
16
+ The LLVM IR for the groundbreaking 2x19 map calculation can be found at:
17
+ mapFolding/reference/jobsCompleted/[2x19]/[2x19].ll
18
+
19
+ This file demonstrates the low-level optimizations that made this previously
20
+ intractable calculation possible. The IR reveals how the abstract algorithm was
21
+ transformed into efficient machine code through Numba's compilation pipeline.
22
+
15
23
  While originally part of a tighter integration with the code generation pipeline,
16
24
  this module now operates as a standalone utility that can be applied to any module
17
25
  containing Numba-compiled functions.
@@ -23,7 +31,18 @@ import importlib.util
23
31
  import llvmlite.binding
24
32
 
25
33
  def writeModuleLLVM(pathFilename: Path, identifierCallable: str) -> Path:
26
- """Import the generated module directly and get its LLVM IR."""
34
+ """Import the generated module directly and get its LLVM IR.
35
+
36
+ Parameters
37
+ pathFilename: Path to the Python module file containing the Numba-compiled function
38
+ identifierCallable: Name of the function within the module to extract LLVM IR from
39
+
40
+ Returns
41
+ Path to the generated .ll file containing the extracted LLVM IR
42
+
43
+ For an example of the output, see reference/jobsCompleted/[2x19]/[2x19].ll,
44
+ which contains the IR for the historically significant 2x19 map calculation.
45
+ """
27
46
  specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
28
47
  if specTarget is None or specTarget.loader is None:
29
48
  raise ImportError(f"Could not create module spec or loader for {pathFilename}")
@@ -22,6 +22,7 @@ to generate a fresh optimized implementation.
22
22
  """
23
23
 
24
24
  from mapFolding.someAssemblyRequired import (
25
+ ast_Identifier,
25
26
  extractFunctionDef,
26
27
  ifThis,
27
28
  IngredientsFunction,
@@ -42,7 +43,17 @@ from mapFolding.someAssemblyRequired.transformDataStructures import shatter_data
42
43
  from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
43
44
  import ast
44
45
 
46
+ def astModuleToIngredientsFunction(astModule: ast.Module, identifierFunctionDef: ast_Identifier) -> IngredientsFunction:
47
+ astFunctionDef = extractFunctionDef(astModule, identifierFunctionDef)
48
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
49
+ return IngredientsFunction(astFunctionDef, LedgerOfImports(astModule))
50
+
51
+
45
52
  def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> None:
53
+ # TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
54
+ # TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
55
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
56
+ # ===========================================================
46
57
  """
47
58
  Think about a better organization of this function.
48
59
 
@@ -61,34 +72,51 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
61
72
  would be automatically triggered. I have no idea how that would happen, but the transformations are highly predictable,
62
73
  so using a programming language to construct if-this-then-that cascades shouldn't be a problem, you know?
63
74
 
64
- # TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
65
75
  """
66
- dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
67
- # TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
68
- # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
76
+ ingredientsDispatcher: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceDispatcherCallable)
77
+ ingredientsInitialize: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceInitializeCallable)
78
+ ingredientsParallel: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceParallelCallable)
79
+ ingredientsSequential: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceSequentialCallable)
69
80
 
81
+ # Inline functions
82
+ # NOTE Replacements statements are based on the identifiers in the _source_
83
+ dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
84
+ ingredientsInitialize.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsInitialize.astFunctionDef, dictionaryReplacementStatements)
85
+ ingredientsParallel.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsParallel.astFunctionDef, dictionaryReplacementStatements)
86
+ ingredientsSequential.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsSequential.astFunctionDef, dictionaryReplacementStatements)
87
+
88
+ # Assign CALLABLE identifiers per the recipe.
89
+ # TODO Assign the other identifiers.
90
+ listIngredientsFunctions = [ingredientsDispatcher, ingredientsInitialize, ingredientsParallel, ingredientsSequential]
91
+ listFindReplace = [(numbaFlow.sourceDispatcherCallable, numbaFlow.dispatcherCallable),
92
+ (numbaFlow.sourceInitializeCallable, numbaFlow.initializeCallable),
93
+ (numbaFlow.sourceParallelCallable, numbaFlow.parallelCallable),
94
+ (numbaFlow.sourceSequentialCallable, numbaFlow.sequentialCallable)]
95
+ for ingredients in listIngredientsFunctions:
96
+ ImaNode = ingredients.astFunctionDef
97
+ for source_Identifier, Z0Z_Identifier in listFindReplace:
98
+ findThis = ifThis.isCall_Identifier(source_Identifier)
99
+ doThis = Then.replaceDOTfuncWith(Make.astName(Z0Z_Identifier))
100
+ NodeReplacer(findThis, doThis).visit(ImaNode)
101
+
102
+ ingredientsDispatcher.astFunctionDef.name = numbaFlow.dispatcherCallable
103
+ ingredientsInitialize.astFunctionDef.name = numbaFlow.initializeCallable
104
+ ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
105
+ ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
70
106
  # ===========================================================
71
- sourcePython = numbaFlow.sourceDispatcherCallable
72
- astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
73
- if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
74
- ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
107
+ # Old organization
75
108
 
76
109
  # sourceParallelCallable
77
110
  shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
78
111
  ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
79
112
 
80
- # TODO remove hardcoding
81
- namespaceHARDCODED = 'concurrencyManager'
82
- identifierHARDCODED = 'submit'
83
- sourceNamespace = namespaceHARDCODED
84
- sourceIdentifier = identifierHARDCODED
85
113
  NodeReplacer(
86
- findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
114
+ findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
87
115
  , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
88
116
  ).visit(ingredientsDispatcher.astFunctionDef)
89
117
  NodeReplacer(
90
- findThis = ifThis.isCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
91
- , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(sourceNamespace), sourceIdentifier)
118
+ findThis = ifThis.isCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
119
+ , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(numbaFlow.sourceConcurrencyManagerNamespace), numbaFlow.sourceConcurrencyManagerIdentifier)
92
120
  , listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
93
121
  ).visit(ingredientsDispatcher.astFunctionDef)
94
122
 
@@ -121,32 +149,20 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
121
149
  ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
122
150
 
123
151
  NodeReplacer(
124
- findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
152
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
125
153
  , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
126
154
  ).visit(ingredientsDispatcher.astFunctionDef)
127
155
  NodeReplacer(
128
- findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
156
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
129
157
  , doThat = Then.insertThisBelow([shatteredDataclass.astAssignDataclassRepack])
130
158
  ).visit(ingredientsDispatcher.astFunctionDef)
131
159
  NodeReplacer(
132
- findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
160
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
133
161
  , doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
134
162
  ).visit(ingredientsDispatcher.astFunctionDef)
135
163
 
136
- # ===========================================================
137
- sourcePython = numbaFlow.sourceInitializeCallable
138
- astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
139
- if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
140
- astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
141
- ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
142
164
 
143
165
  # ===========================================================
144
- sourcePython = numbaFlow.sourceParallelCallable
145
- astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
146
- if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
147
- astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
148
- ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
149
- ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
150
166
  ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
151
167
  NodeReplacer(
152
168
  findThis = ifThis.isReturn
@@ -163,12 +179,6 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
163
179
  ingredientsParallel = decorateCallableWithNumba(ingredientsParallel)
164
180
 
165
181
  # ===========================================================
166
- sourcePython = numbaFlow.sourceSequentialCallable
167
- astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
168
- if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
169
- astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
170
- ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
171
- ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
172
182
  ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
173
183
  NodeReplacer(
174
184
  findThis = ifThis.isReturn
@@ -182,8 +192,13 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
182
192
  replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
183
193
  ingredientsSequential.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
184
194
  ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
195
+ # End old organization
196
+ # ===========================================================
185
197
 
186
198
  # ===========================================================
199
+ # End function-level transformations
200
+ # ===========================================================
201
+ # Module-level transformations
187
202
  ingredientsModuleNumbaUnified = IngredientsModule(
188
203
  ingredientsFunction=[ingredientsInitialize,
189
204
  ingredientsParallel,