mapFolding 0.8.1__py3-none-any.whl → 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +5 -1
- mapFolding/basecamp.py +2 -2
- mapFolding/beDRY.py +24 -31
- mapFolding/oeis.py +2 -2
- mapFolding/reference/__init__.py +45 -0
- mapFolding/reference/flattened.py +20 -2
- mapFolding/reference/hunterNumba.py +24 -0
- mapFolding/reference/irvineJavaPort.py +12 -0
- mapFolding/reference/{jax.py → jaxCount.py} +46 -27
- mapFolding/reference/jobsCompleted/[2x19]/p2x19.py +197 -0
- mapFolding/reference/jobsCompleted/__init__.py +50 -0
- mapFolding/reference/jobsCompleted/p2x19/p2x19.py +29 -0
- mapFolding/reference/lunnanNumpy.py +16 -1
- mapFolding/reference/lunnanWhile.py +15 -1
- mapFolding/reference/rotatedEntryPoint.py +18 -0
- mapFolding/reference/total_countPlus1vsPlusN.py +226 -203
- mapFolding/someAssemblyRequired/getLLVMforNoReason.py +20 -1
- mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +52 -37
- mapFolding/someAssemblyRequired/transformDataStructures.py +11 -5
- mapFolding/someAssemblyRequired/transformationTools.py +40 -42
- mapFolding/syntheticModules/__init__.py +1 -0
- mapFolding/theSSOT.py +69 -127
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/METADATA +56 -31
- mapfolding-0.8.3.dist-info/RECORD +43 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/WHEEL +1 -1
- tests/conftest.py +43 -33
- tests/test_computations.py +7 -7
- tests/test_other.py +5 -4
- mapfolding-0.8.1.dist-info/RECORD +0 -39
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.3.dist-info}/top_level.txt +0 -0
|
@@ -1,211 +1,234 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Comparison of two nearly identical counting implementations with vastly different performance.
|
|
3
|
+
|
|
4
|
+
This file provides a direct comparison between two variants of the map folding algorithm
|
|
5
|
+
that differ only in their approach to incrementing the folding counter. Despite their apparent
|
|
6
|
+
similarity, one implementation demonstrates orders of magnitude better performance than the other.
|
|
7
|
+
|
|
8
|
+
Key characteristics:
|
|
9
|
+
- Both implementations use Numba for performance optimization
|
|
10
|
+
- Both use identical data structures and array initializations
|
|
11
|
+
- `foldings_plus_1`: Increments the counter by 1 for each valid folding
|
|
12
|
+
- `foldings`: Increments the counter by n (total leaves) when certain conditions are met
|
|
13
|
+
|
|
14
|
+
The performance difference illustrates how subtle algorithmic changes can dramatically
|
|
15
|
+
impact computational efficiency, even when the overall algorithm structure remains unchanged.
|
|
16
|
+
This example serves as a compelling demonstration of the importance of algorithm analysis
|
|
17
|
+
and optimization for combinatorial problems.
|
|
18
|
+
|
|
19
|
+
Note: These functions are isolated for educational purposes to highlight the specific
|
|
20
|
+
optimization technique. The main package uses more comprehensive optimizations derived
|
|
21
|
+
from this and other lessons.
|
|
22
|
+
"""
|
|
23
|
+
|
|
1
24
|
from numba import njit
|
|
2
25
|
import numpy
|
|
3
26
|
|
|
4
27
|
@njit(cache=True)
|
|
5
28
|
def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
29
|
+
n: int = 1 # Total number of leaves
|
|
30
|
+
for dimension in p:
|
|
31
|
+
n *= dimension
|
|
32
|
+
|
|
33
|
+
d = len(p) # Number of dimensions
|
|
34
|
+
# Compute arrays P, C, D as per the algorithm
|
|
35
|
+
P = numpy.ones(d + 1, dtype=numpy.int64)
|
|
36
|
+
for i in range(1, d + 1):
|
|
37
|
+
P[i] = P[i - 1] * p[i - 1]
|
|
38
|
+
|
|
39
|
+
# C[i][m] holds the i-th coordinate of leaf m
|
|
40
|
+
C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
|
|
41
|
+
for i in range(1, d + 1):
|
|
42
|
+
for m in range(1, n + 1):
|
|
43
|
+
C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
|
|
44
|
+
|
|
45
|
+
# D[i][l][m] computes the leaf connected to m in section i when inserting l
|
|
46
|
+
D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
|
|
47
|
+
for i in range(1, d + 1):
|
|
48
|
+
for l in range(1, n + 1):
|
|
49
|
+
for m in range(1, l + 1):
|
|
50
|
+
delta = C[i][l] - C[i][m]
|
|
51
|
+
if delta % 2 == 0:
|
|
52
|
+
# If delta is even
|
|
53
|
+
if C[i][m] == 1:
|
|
54
|
+
D[i][l][m] = m
|
|
55
|
+
else:
|
|
56
|
+
D[i][l][m] = m - P[i - 1]
|
|
57
|
+
else:
|
|
58
|
+
# If delta is odd
|
|
59
|
+
if C[i][m] == p[i - 1] or m + P[i - 1] > l:
|
|
60
|
+
D[i][l][m] = m
|
|
61
|
+
else:
|
|
62
|
+
D[i][l][m] = m + P[i - 1]
|
|
63
|
+
# Initialize arrays/lists
|
|
64
|
+
A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
|
|
65
|
+
B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
|
|
66
|
+
count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
|
|
67
|
+
gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
|
|
68
|
+
gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Initialize variables for backtracking
|
|
72
|
+
total_count = 0 # Total number of foldings
|
|
73
|
+
g = 0 # Gap index
|
|
74
|
+
l = 1 # Current leaf
|
|
75
|
+
|
|
76
|
+
# Start backtracking loop
|
|
77
|
+
while l > 0:
|
|
78
|
+
# If we have processed all leaves, increment total count
|
|
79
|
+
if l > n:
|
|
80
|
+
total_count += 1
|
|
81
|
+
else:
|
|
82
|
+
dd = 0 # Number of sections where leaf l is unconstrained
|
|
83
|
+
gg = g # Temporary gap index
|
|
84
|
+
g = gapter[l - 1] # Reset gap index for current leaf
|
|
85
|
+
|
|
86
|
+
# Count possible gaps for leaf l in each section
|
|
87
|
+
for i in range(1, d + 1):
|
|
88
|
+
if D[i][l][l] == l:
|
|
89
|
+
dd += 1
|
|
90
|
+
else:
|
|
91
|
+
m = D[i][l][l]
|
|
92
|
+
while m != l:
|
|
93
|
+
if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
|
|
94
|
+
gap[gg] = m
|
|
95
|
+
if count[m] == 0:
|
|
96
|
+
gg += 1
|
|
97
|
+
count[m] += 1
|
|
98
|
+
m = D[i][l][B[m]]
|
|
99
|
+
|
|
100
|
+
# If leaf l is unconstrained in all sections, it can be inserted anywhere
|
|
101
|
+
if dd == d:
|
|
102
|
+
for m in range(l):
|
|
103
|
+
gap[gg] = m
|
|
104
|
+
gg += 1
|
|
105
|
+
|
|
106
|
+
# Filter gaps that are common to all sections
|
|
107
|
+
for j in range(g, gg):
|
|
108
|
+
gap[g] = gap[j]
|
|
109
|
+
if count[gap[j]] == d - dd:
|
|
110
|
+
g += 1
|
|
111
|
+
count[gap[j]] = 0 # Reset count for next iteration
|
|
112
|
+
|
|
113
|
+
# Recursive backtracking steps
|
|
114
|
+
while l > 0 and g == gapter[l - 1]:
|
|
115
|
+
l -= 1
|
|
116
|
+
B[A[l]] = B[l]
|
|
117
|
+
A[B[l]] = A[l]
|
|
118
|
+
|
|
119
|
+
if l > 0:
|
|
120
|
+
g -= 1
|
|
121
|
+
A[l] = gap[g]
|
|
122
|
+
B[l] = B[A[l]]
|
|
123
|
+
B[A[l]] = l
|
|
124
|
+
A[B[l]] = l
|
|
125
|
+
gapter[l] = g # Save current gap index
|
|
126
|
+
l += 1 # Move to next leaf
|
|
127
|
+
|
|
128
|
+
return total_count
|
|
106
129
|
|
|
107
130
|
@njit(cache=True)
|
|
108
131
|
def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
132
|
+
n: int = 1 # Total number of leaves
|
|
133
|
+
for dimension in p:
|
|
134
|
+
n *= dimension
|
|
135
|
+
|
|
136
|
+
d = len(p) # Number of dimensions
|
|
137
|
+
# Compute arrays P, C, D as per the algorithm
|
|
138
|
+
P = numpy.ones(d + 1, dtype=numpy.int64)
|
|
139
|
+
for i in range(1, d + 1):
|
|
140
|
+
P[i] = P[i - 1] * p[i - 1]
|
|
141
|
+
|
|
142
|
+
# C[i][m] holds the i-th coordinate of leaf m
|
|
143
|
+
C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
|
|
144
|
+
for i in range(1, d + 1):
|
|
145
|
+
for m in range(1, n + 1):
|
|
146
|
+
C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
|
|
147
|
+
# C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
|
|
148
|
+
|
|
149
|
+
# D[i][l][m] computes the leaf connected to m in section i when inserting l
|
|
150
|
+
D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
|
|
151
|
+
for i in range(1, d + 1):
|
|
152
|
+
for l in range(1, n + 1):
|
|
153
|
+
for m in range(1, l + 1):
|
|
154
|
+
delta = C[i][l] - C[i][m]
|
|
155
|
+
if delta % 2 == 0:
|
|
156
|
+
# If delta is even
|
|
157
|
+
if C[i][m] == 1:
|
|
158
|
+
D[i][l][m] = m
|
|
159
|
+
else:
|
|
160
|
+
D[i][l][m] = m - P[i - 1]
|
|
161
|
+
else:
|
|
162
|
+
# If delta is odd
|
|
163
|
+
if C[i][m] == p[i - 1] or m + P[i - 1] > l:
|
|
164
|
+
D[i][l][m] = m
|
|
165
|
+
else:
|
|
166
|
+
D[i][l][m] = m + P[i - 1]
|
|
167
|
+
# Initialize arrays/lists
|
|
168
|
+
A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
|
|
169
|
+
B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
|
|
170
|
+
count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
|
|
171
|
+
gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
|
|
172
|
+
gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# Initialize variables for backtracking
|
|
176
|
+
total_count = 0 # Total number of foldings
|
|
177
|
+
g = 0 # Gap index
|
|
178
|
+
l = 1 # Current leaf
|
|
179
|
+
|
|
180
|
+
# Start backtracking loop
|
|
181
|
+
while l > 0:
|
|
182
|
+
if l <= 1 or B[0] == 1: # NOTE different
|
|
183
|
+
# NOTE the above `if` statement encloses the the if/else block below
|
|
184
|
+
# NOTE these changes increase the throughput by more than an order of magnitude
|
|
185
|
+
if l > n:
|
|
186
|
+
total_count += n
|
|
187
|
+
else:
|
|
188
|
+
dd = 0 # Number of sections where leaf l is unconstrained
|
|
189
|
+
gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
|
|
190
|
+
g = gg # NOTE different, but not important
|
|
191
|
+
|
|
192
|
+
# Count possible gaps for leaf l in each section
|
|
193
|
+
for i in range(1, d + 1):
|
|
194
|
+
if D[i][l][l] == l:
|
|
195
|
+
dd += 1
|
|
196
|
+
else:
|
|
197
|
+
m = D[i][l][l]
|
|
198
|
+
while m != l:
|
|
199
|
+
if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
|
|
200
|
+
gap[gg] = m
|
|
201
|
+
if count[m] == 0:
|
|
202
|
+
gg += 1
|
|
203
|
+
count[m] += 1
|
|
204
|
+
m = D[i][l][B[m]]
|
|
205
|
+
|
|
206
|
+
# If leaf l is unconstrained in all sections, it can be inserted anywhere
|
|
207
|
+
if dd == d:
|
|
208
|
+
for m in range(l):
|
|
209
|
+
gap[gg] = m
|
|
210
|
+
gg += 1
|
|
211
|
+
|
|
212
|
+
# Filter gaps that are common to all sections
|
|
213
|
+
for j in range(g, gg):
|
|
214
|
+
gap[g] = gap[j]
|
|
215
|
+
if count[gap[j]] == d - dd:
|
|
216
|
+
g += 1
|
|
217
|
+
count[gap[j]] = 0 # Reset count for next iteration
|
|
218
|
+
|
|
219
|
+
# Recursive backtracking steps
|
|
220
|
+
while l > 0 and g == gapter[l - 1]:
|
|
221
|
+
l -= 1
|
|
222
|
+
B[A[l]] = B[l]
|
|
223
|
+
A[B[l]] = A[l]
|
|
224
|
+
|
|
225
|
+
if l > 0:
|
|
226
|
+
g -= 1
|
|
227
|
+
A[l] = gap[g]
|
|
228
|
+
B[l] = B[A[l]]
|
|
229
|
+
B[A[l]] = l
|
|
230
|
+
A[B[l]] = l
|
|
231
|
+
gapter[l] = g # Save current gap index
|
|
232
|
+
l += 1 # Move to next leaf
|
|
233
|
+
|
|
234
|
+
return total_count
|
|
@@ -12,6 +12,14 @@ The extracted LLVM IR can be valuable for debugging, optimization analysis, or e
|
|
|
12
12
|
purposes, as it provides a view into how high-level Python code is translated into
|
|
13
13
|
lower-level representations for machine execution.
|
|
14
14
|
|
|
15
|
+
Example of successful use:
|
|
16
|
+
The LLVM IR for the groundbreaking 2x19 map calculation can be found at:
|
|
17
|
+
mapFolding/reference/jobsCompleted/[2x19]/[2x19].ll
|
|
18
|
+
|
|
19
|
+
This file demonstrates the low-level optimizations that made this previously
|
|
20
|
+
intractable calculation possible. The IR reveals how the abstract algorithm was
|
|
21
|
+
transformed into efficient machine code through Numba's compilation pipeline.
|
|
22
|
+
|
|
15
23
|
While originally part of a tighter integration with the code generation pipeline,
|
|
16
24
|
this module now operates as a standalone utility that can be applied to any module
|
|
17
25
|
containing Numba-compiled functions.
|
|
@@ -23,7 +31,18 @@ import importlib.util
|
|
|
23
31
|
import llvmlite.binding
|
|
24
32
|
|
|
25
33
|
def writeModuleLLVM(pathFilename: Path, identifierCallable: str) -> Path:
|
|
26
|
-
"""Import the generated module directly and get its LLVM IR.
|
|
34
|
+
"""Import the generated module directly and get its LLVM IR.
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
pathFilename: Path to the Python module file containing the Numba-compiled function
|
|
38
|
+
identifierCallable: Name of the function within the module to extract LLVM IR from
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
Path to the generated .ll file containing the extracted LLVM IR
|
|
42
|
+
|
|
43
|
+
For an example of the output, see reference/jobsCompleted/[2x19]/[2x19].ll,
|
|
44
|
+
which contains the IR for the historically significant 2x19 map calculation.
|
|
45
|
+
"""
|
|
27
46
|
specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
|
|
28
47
|
if specTarget is None or specTarget.loader is None:
|
|
29
48
|
raise ImportError(f"Could not create module spec or loader for {pathFilename}")
|
|
@@ -22,6 +22,7 @@ to generate a fresh optimized implementation.
|
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
24
|
from mapFolding.someAssemblyRequired import (
|
|
25
|
+
ast_Identifier,
|
|
25
26
|
extractFunctionDef,
|
|
26
27
|
ifThis,
|
|
27
28
|
IngredientsFunction,
|
|
@@ -42,7 +43,17 @@ from mapFolding.someAssemblyRequired.transformDataStructures import shatter_data
|
|
|
42
43
|
from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
|
|
43
44
|
import ast
|
|
44
45
|
|
|
46
|
+
def astModuleToIngredientsFunction(astModule: ast.Module, identifierFunctionDef: ast_Identifier) -> IngredientsFunction:
|
|
47
|
+
astFunctionDef = extractFunctionDef(astModule, identifierFunctionDef)
|
|
48
|
+
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
49
|
+
return IngredientsFunction(astFunctionDef, LedgerOfImports(astModule))
|
|
50
|
+
|
|
51
|
+
|
|
45
52
|
def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> None:
|
|
53
|
+
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
54
|
+
# TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
|
|
55
|
+
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
56
|
+
# ===========================================================
|
|
46
57
|
"""
|
|
47
58
|
Think about a better organization of this function.
|
|
48
59
|
|
|
@@ -61,34 +72,51 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
61
72
|
would be automatically triggered. I have no idea how that would happen, but the transformations are highly predictable,
|
|
62
73
|
so using a programming language to construct if-this-then-that cascades shouldn't be a problem, you know?
|
|
63
74
|
|
|
64
|
-
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
65
75
|
"""
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
76
|
+
ingredientsDispatcher: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceDispatcherCallable)
|
|
77
|
+
ingredientsInitialize: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceInitializeCallable)
|
|
78
|
+
ingredientsParallel: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceParallelCallable)
|
|
79
|
+
ingredientsSequential: IngredientsFunction = astModuleToIngredientsFunction(numbaFlow.source_astModule, numbaFlow.sourceSequentialCallable)
|
|
69
80
|
|
|
81
|
+
# Inline functions
|
|
82
|
+
# NOTE Replacements statements are based on the identifiers in the _source_
|
|
83
|
+
dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
|
|
84
|
+
ingredientsInitialize.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsInitialize.astFunctionDef, dictionaryReplacementStatements)
|
|
85
|
+
ingredientsParallel.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsParallel.astFunctionDef, dictionaryReplacementStatements)
|
|
86
|
+
ingredientsSequential.astFunctionDef = inlineThisFunctionWithTheseValues(ingredientsSequential.astFunctionDef, dictionaryReplacementStatements)
|
|
87
|
+
|
|
88
|
+
# Assign CALLABLE identifiers per the recipe.
|
|
89
|
+
# TODO Assign the other identifiers.
|
|
90
|
+
listIngredientsFunctions = [ingredientsDispatcher, ingredientsInitialize, ingredientsParallel, ingredientsSequential]
|
|
91
|
+
listFindReplace = [(numbaFlow.sourceDispatcherCallable, numbaFlow.dispatcherCallable),
|
|
92
|
+
(numbaFlow.sourceInitializeCallable, numbaFlow.initializeCallable),
|
|
93
|
+
(numbaFlow.sourceParallelCallable, numbaFlow.parallelCallable),
|
|
94
|
+
(numbaFlow.sourceSequentialCallable, numbaFlow.sequentialCallable)]
|
|
95
|
+
for ingredients in listIngredientsFunctions:
|
|
96
|
+
ImaNode = ingredients.astFunctionDef
|
|
97
|
+
for source_Identifier, Z0Z_Identifier in listFindReplace:
|
|
98
|
+
findThis = ifThis.isCall_Identifier(source_Identifier)
|
|
99
|
+
doThis = Then.replaceDOTfuncWith(Make.astName(Z0Z_Identifier))
|
|
100
|
+
NodeReplacer(findThis, doThis).visit(ImaNode)
|
|
101
|
+
|
|
102
|
+
ingredientsDispatcher.astFunctionDef.name = numbaFlow.dispatcherCallable
|
|
103
|
+
ingredientsInitialize.astFunctionDef.name = numbaFlow.initializeCallable
|
|
104
|
+
ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
|
|
105
|
+
ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
|
|
70
106
|
# ===========================================================
|
|
71
|
-
|
|
72
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
73
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
74
|
-
ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
107
|
+
# Old organization
|
|
75
108
|
|
|
76
109
|
# sourceParallelCallable
|
|
77
110
|
shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
|
|
78
111
|
ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
|
|
79
112
|
|
|
80
|
-
# TODO remove hardcoding
|
|
81
|
-
namespaceHARDCODED = 'concurrencyManager'
|
|
82
|
-
identifierHARDCODED = 'submit'
|
|
83
|
-
sourceNamespace = namespaceHARDCODED
|
|
84
|
-
sourceIdentifier = identifierHARDCODED
|
|
85
113
|
NodeReplacer(
|
|
86
|
-
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(
|
|
114
|
+
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
87
115
|
, doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
|
|
88
116
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
89
117
|
NodeReplacer(
|
|
90
|
-
findThis = ifThis.isCallNamespace_Identifier(
|
|
91
|
-
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(
|
|
118
|
+
findThis = ifThis.isCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
119
|
+
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(numbaFlow.sourceConcurrencyManagerNamespace), numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
92
120
|
, listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
|
|
93
121
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
94
122
|
|
|
@@ -121,32 +149,20 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
121
149
|
ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
|
|
122
150
|
|
|
123
151
|
NodeReplacer(
|
|
124
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
152
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
|
|
125
153
|
, doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
|
|
126
154
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
127
155
|
NodeReplacer(
|
|
128
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
156
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
|
|
129
157
|
, doThat = Then.insertThisBelow([shatteredDataclass.astAssignDataclassRepack])
|
|
130
158
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
131
159
|
NodeReplacer(
|
|
132
|
-
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
160
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable) # NOTE source
|
|
133
161
|
, doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
|
|
134
162
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
135
163
|
|
|
136
|
-
# ===========================================================
|
|
137
|
-
sourcePython = numbaFlow.sourceInitializeCallable
|
|
138
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
139
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
140
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
141
|
-
ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
142
164
|
|
|
143
165
|
# ===========================================================
|
|
144
|
-
sourcePython = numbaFlow.sourceParallelCallable
|
|
145
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
146
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
147
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
148
|
-
ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
149
|
-
ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
|
|
150
166
|
ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
|
|
151
167
|
NodeReplacer(
|
|
152
168
|
findThis = ifThis.isReturn
|
|
@@ -163,12 +179,6 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
163
179
|
ingredientsParallel = decorateCallableWithNumba(ingredientsParallel)
|
|
164
180
|
|
|
165
181
|
# ===========================================================
|
|
166
|
-
sourcePython = numbaFlow.sourceSequentialCallable
|
|
167
|
-
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
168
|
-
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
169
|
-
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
170
|
-
ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
171
|
-
ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
|
|
172
182
|
ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
|
|
173
183
|
NodeReplacer(
|
|
174
184
|
findThis = ifThis.isReturn
|
|
@@ -182,8 +192,13 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
182
192
|
replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
|
|
183
193
|
ingredientsSequential.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
|
|
184
194
|
ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
|
|
195
|
+
# End old organization
|
|
196
|
+
# ===========================================================
|
|
185
197
|
|
|
186
198
|
# ===========================================================
|
|
199
|
+
# End function-level transformations
|
|
200
|
+
# ===========================================================
|
|
201
|
+
# Module-level transformations
|
|
187
202
|
ingredientsModuleNumbaUnified = IngredientsModule(
|
|
188
203
|
ingredientsFunction=[ingredientsInitialize,
|
|
189
204
|
ingredientsParallel,
|