mapFolding 0.8.1__py3-none-any.whl → 0.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/basecamp.py +2 -2
- mapFolding/beDRY.py +24 -31
- mapFolding/oeis.py +2 -2
- mapFolding/reference/__init__.py +38 -0
- mapFolding/reference/flattened.py +20 -2
- mapFolding/reference/hunterNumba.py +24 -0
- mapFolding/reference/irvineJavaPort.py +12 -0
- mapFolding/reference/{jax.py → jaxCount.py} +46 -27
- mapFolding/reference/lunnanNumpy.py +16 -1
- mapFolding/reference/lunnanWhile.py +15 -1
- mapFolding/reference/rotatedEntryPoint.py +18 -0
- mapFolding/reference/total_countPlus1vsPlusN.py +226 -203
- mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +5 -8
- mapFolding/someAssemblyRequired/transformDataStructures.py +10 -4
- mapFolding/someAssemblyRequired/transformationTools.py +19 -28
- mapFolding/theSSOT.py +70 -121
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.2.dist-info}/METADATA +54 -30
- mapfolding-0.8.2.dist-info/RECORD +39 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.2.dist-info}/WHEEL +1 -1
- tests/conftest.py +43 -33
- tests/test_computations.py +7 -7
- tests/test_other.py +2 -2
- mapfolding-0.8.1.dist-info/RECORD +0 -39
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.2.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.2.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.8.1.dist-info → mapfolding-0.8.2.dist-info}/top_level.txt +0 -0
|
@@ -1,211 +1,234 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Comparison of two nearly identical counting implementations with vastly different performance.
|
|
3
|
+
|
|
4
|
+
This file provides a direct comparison between two variants of the map folding algorithm
|
|
5
|
+
that differ only in their approach to incrementing the folding counter. Despite their apparent
|
|
6
|
+
similarity, one implementation demonstrates orders of magnitude better performance than the other.
|
|
7
|
+
|
|
8
|
+
Key characteristics:
|
|
9
|
+
- Both implementations use Numba for performance optimization
|
|
10
|
+
- Both use identical data structures and array initializations
|
|
11
|
+
- `foldings_plus_1`: Increments the counter by 1 for each valid folding
|
|
12
|
+
- `foldings`: Increments the counter by n (total leaves) when certain conditions are met
|
|
13
|
+
|
|
14
|
+
The performance difference illustrates how subtle algorithmic changes can dramatically
|
|
15
|
+
impact computational efficiency, even when the overall algorithm structure remains unchanged.
|
|
16
|
+
This example serves as a compelling demonstration of the importance of algorithm analysis
|
|
17
|
+
and optimization for combinatorial problems.
|
|
18
|
+
|
|
19
|
+
Note: These functions are isolated for educational purposes to highlight the specific
|
|
20
|
+
optimization technique. The main package uses more comprehensive optimizations derived
|
|
21
|
+
from this and other lessons.
|
|
22
|
+
"""
|
|
23
|
+
|
|
1
24
|
from numba import njit
|
|
2
25
|
import numpy
|
|
3
26
|
|
|
4
27
|
@njit(cache=True)
|
|
5
28
|
def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
29
|
+
n: int = 1 # Total number of leaves
|
|
30
|
+
for dimension in p:
|
|
31
|
+
n *= dimension
|
|
32
|
+
|
|
33
|
+
d = len(p) # Number of dimensions
|
|
34
|
+
# Compute arrays P, C, D as per the algorithm
|
|
35
|
+
P = numpy.ones(d + 1, dtype=numpy.int64)
|
|
36
|
+
for i in range(1, d + 1):
|
|
37
|
+
P[i] = P[i - 1] * p[i - 1]
|
|
38
|
+
|
|
39
|
+
# C[i][m] holds the i-th coordinate of leaf m
|
|
40
|
+
C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
|
|
41
|
+
for i in range(1, d + 1):
|
|
42
|
+
for m in range(1, n + 1):
|
|
43
|
+
C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
|
|
44
|
+
|
|
45
|
+
# D[i][l][m] computes the leaf connected to m in section i when inserting l
|
|
46
|
+
D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
|
|
47
|
+
for i in range(1, d + 1):
|
|
48
|
+
for l in range(1, n + 1):
|
|
49
|
+
for m in range(1, l + 1):
|
|
50
|
+
delta = C[i][l] - C[i][m]
|
|
51
|
+
if delta % 2 == 0:
|
|
52
|
+
# If delta is even
|
|
53
|
+
if C[i][m] == 1:
|
|
54
|
+
D[i][l][m] = m
|
|
55
|
+
else:
|
|
56
|
+
D[i][l][m] = m - P[i - 1]
|
|
57
|
+
else:
|
|
58
|
+
# If delta is odd
|
|
59
|
+
if C[i][m] == p[i - 1] or m + P[i - 1] > l:
|
|
60
|
+
D[i][l][m] = m
|
|
61
|
+
else:
|
|
62
|
+
D[i][l][m] = m + P[i - 1]
|
|
63
|
+
# Initialize arrays/lists
|
|
64
|
+
A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
|
|
65
|
+
B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
|
|
66
|
+
count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
|
|
67
|
+
gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
|
|
68
|
+
gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Initialize variables for backtracking
|
|
72
|
+
total_count = 0 # Total number of foldings
|
|
73
|
+
g = 0 # Gap index
|
|
74
|
+
l = 1 # Current leaf
|
|
75
|
+
|
|
76
|
+
# Start backtracking loop
|
|
77
|
+
while l > 0:
|
|
78
|
+
# If we have processed all leaves, increment total count
|
|
79
|
+
if l > n:
|
|
80
|
+
total_count += 1
|
|
81
|
+
else:
|
|
82
|
+
dd = 0 # Number of sections where leaf l is unconstrained
|
|
83
|
+
gg = g # Temporary gap index
|
|
84
|
+
g = gapter[l - 1] # Reset gap index for current leaf
|
|
85
|
+
|
|
86
|
+
# Count possible gaps for leaf l in each section
|
|
87
|
+
for i in range(1, d + 1):
|
|
88
|
+
if D[i][l][l] == l:
|
|
89
|
+
dd += 1
|
|
90
|
+
else:
|
|
91
|
+
m = D[i][l][l]
|
|
92
|
+
while m != l:
|
|
93
|
+
if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
|
|
94
|
+
gap[gg] = m
|
|
95
|
+
if count[m] == 0:
|
|
96
|
+
gg += 1
|
|
97
|
+
count[m] += 1
|
|
98
|
+
m = D[i][l][B[m]]
|
|
99
|
+
|
|
100
|
+
# If leaf l is unconstrained in all sections, it can be inserted anywhere
|
|
101
|
+
if dd == d:
|
|
102
|
+
for m in range(l):
|
|
103
|
+
gap[gg] = m
|
|
104
|
+
gg += 1
|
|
105
|
+
|
|
106
|
+
# Filter gaps that are common to all sections
|
|
107
|
+
for j in range(g, gg):
|
|
108
|
+
gap[g] = gap[j]
|
|
109
|
+
if count[gap[j]] == d - dd:
|
|
110
|
+
g += 1
|
|
111
|
+
count[gap[j]] = 0 # Reset count for next iteration
|
|
112
|
+
|
|
113
|
+
# Recursive backtracking steps
|
|
114
|
+
while l > 0 and g == gapter[l - 1]:
|
|
115
|
+
l -= 1
|
|
116
|
+
B[A[l]] = B[l]
|
|
117
|
+
A[B[l]] = A[l]
|
|
118
|
+
|
|
119
|
+
if l > 0:
|
|
120
|
+
g -= 1
|
|
121
|
+
A[l] = gap[g]
|
|
122
|
+
B[l] = B[A[l]]
|
|
123
|
+
B[A[l]] = l
|
|
124
|
+
A[B[l]] = l
|
|
125
|
+
gapter[l] = g # Save current gap index
|
|
126
|
+
l += 1 # Move to next leaf
|
|
127
|
+
|
|
128
|
+
return total_count
|
|
106
129
|
|
|
107
130
|
@njit(cache=True)
|
|
108
131
|
def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
132
|
+
n: int = 1 # Total number of leaves
|
|
133
|
+
for dimension in p:
|
|
134
|
+
n *= dimension
|
|
135
|
+
|
|
136
|
+
d = len(p) # Number of dimensions
|
|
137
|
+
# Compute arrays P, C, D as per the algorithm
|
|
138
|
+
P = numpy.ones(d + 1, dtype=numpy.int64)
|
|
139
|
+
for i in range(1, d + 1):
|
|
140
|
+
P[i] = P[i - 1] * p[i - 1]
|
|
141
|
+
|
|
142
|
+
# C[i][m] holds the i-th coordinate of leaf m
|
|
143
|
+
C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
|
|
144
|
+
for i in range(1, d + 1):
|
|
145
|
+
for m in range(1, n + 1):
|
|
146
|
+
C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
|
|
147
|
+
# C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
|
|
148
|
+
|
|
149
|
+
# D[i][l][m] computes the leaf connected to m in section i when inserting l
|
|
150
|
+
D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
|
|
151
|
+
for i in range(1, d + 1):
|
|
152
|
+
for l in range(1, n + 1):
|
|
153
|
+
for m in range(1, l + 1):
|
|
154
|
+
delta = C[i][l] - C[i][m]
|
|
155
|
+
if delta % 2 == 0:
|
|
156
|
+
# If delta is even
|
|
157
|
+
if C[i][m] == 1:
|
|
158
|
+
D[i][l][m] = m
|
|
159
|
+
else:
|
|
160
|
+
D[i][l][m] = m - P[i - 1]
|
|
161
|
+
else:
|
|
162
|
+
# If delta is odd
|
|
163
|
+
if C[i][m] == p[i - 1] or m + P[i - 1] > l:
|
|
164
|
+
D[i][l][m] = m
|
|
165
|
+
else:
|
|
166
|
+
D[i][l][m] = m + P[i - 1]
|
|
167
|
+
# Initialize arrays/lists
|
|
168
|
+
A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
|
|
169
|
+
B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
|
|
170
|
+
count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
|
|
171
|
+
gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
|
|
172
|
+
gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# Initialize variables for backtracking
|
|
176
|
+
total_count = 0 # Total number of foldings
|
|
177
|
+
g = 0 # Gap index
|
|
178
|
+
l = 1 # Current leaf
|
|
179
|
+
|
|
180
|
+
# Start backtracking loop
|
|
181
|
+
while l > 0:
|
|
182
|
+
if l <= 1 or B[0] == 1: # NOTE different
|
|
183
|
+
# NOTE the above `if` statement encloses the the if/else block below
|
|
184
|
+
# NOTE these changes increase the throughput by more than an order of magnitude
|
|
185
|
+
if l > n:
|
|
186
|
+
total_count += n
|
|
187
|
+
else:
|
|
188
|
+
dd = 0 # Number of sections where leaf l is unconstrained
|
|
189
|
+
gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
|
|
190
|
+
g = gg # NOTE different, but not important
|
|
191
|
+
|
|
192
|
+
# Count possible gaps for leaf l in each section
|
|
193
|
+
for i in range(1, d + 1):
|
|
194
|
+
if D[i][l][l] == l:
|
|
195
|
+
dd += 1
|
|
196
|
+
else:
|
|
197
|
+
m = D[i][l][l]
|
|
198
|
+
while m != l:
|
|
199
|
+
if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
|
|
200
|
+
gap[gg] = m
|
|
201
|
+
if count[m] == 0:
|
|
202
|
+
gg += 1
|
|
203
|
+
count[m] += 1
|
|
204
|
+
m = D[i][l][B[m]]
|
|
205
|
+
|
|
206
|
+
# If leaf l is unconstrained in all sections, it can be inserted anywhere
|
|
207
|
+
if dd == d:
|
|
208
|
+
for m in range(l):
|
|
209
|
+
gap[gg] = m
|
|
210
|
+
gg += 1
|
|
211
|
+
|
|
212
|
+
# Filter gaps that are common to all sections
|
|
213
|
+
for j in range(g, gg):
|
|
214
|
+
gap[g] = gap[j]
|
|
215
|
+
if count[gap[j]] == d - dd:
|
|
216
|
+
g += 1
|
|
217
|
+
count[gap[j]] = 0 # Reset count for next iteration
|
|
218
|
+
|
|
219
|
+
# Recursive backtracking steps
|
|
220
|
+
while l > 0 and g == gapter[l - 1]:
|
|
221
|
+
l -= 1
|
|
222
|
+
B[A[l]] = B[l]
|
|
223
|
+
A[B[l]] = A[l]
|
|
224
|
+
|
|
225
|
+
if l > 0:
|
|
226
|
+
g -= 1
|
|
227
|
+
A[l] = gap[g]
|
|
228
|
+
B[l] = B[A[l]]
|
|
229
|
+
B[A[l]] = l
|
|
230
|
+
A[B[l]] = l
|
|
231
|
+
gapter[l] = g # Save current gap index
|
|
232
|
+
l += 1 # Move to next leaf
|
|
233
|
+
|
|
234
|
+
return total_count
|
|
@@ -77,18 +77,13 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
77
77
|
shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
|
|
78
78
|
ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
|
|
79
79
|
|
|
80
|
-
# TODO remove hardcoding
|
|
81
|
-
namespaceHARDCODED = 'concurrencyManager'
|
|
82
|
-
identifierHARDCODED = 'submit'
|
|
83
|
-
sourceNamespace = namespaceHARDCODED
|
|
84
|
-
sourceIdentifier = identifierHARDCODED
|
|
85
80
|
NodeReplacer(
|
|
86
|
-
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(
|
|
81
|
+
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
87
82
|
, doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
|
|
88
83
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
89
84
|
NodeReplacer(
|
|
90
|
-
findThis = ifThis.isCallNamespace_Identifier(
|
|
91
|
-
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(
|
|
85
|
+
findThis = ifThis.isCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
86
|
+
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(numbaFlow.sourceConcurrencyManagerNamespace), numbaFlow.sourceConcurrencyManagerIdentifier)
|
|
92
87
|
, listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
|
|
93
88
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
94
89
|
|
|
@@ -133,6 +128,8 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
|
|
|
133
128
|
, doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
|
|
134
129
|
).visit(ingredientsDispatcher.astFunctionDef)
|
|
135
130
|
|
|
131
|
+
ingredientsDispatcher.astFunctionDef.name = numbaFlow.dispatcherCallable
|
|
132
|
+
|
|
136
133
|
# ===========================================================
|
|
137
134
|
sourcePython = numbaFlow.sourceInitializeCallable
|
|
138
135
|
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
@@ -21,7 +21,7 @@ designed to be applicable to various data structure transformation scenarios.
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
from collections.abc import Sequence
|
|
24
|
-
from importlib import import_module
|
|
24
|
+
from importlib import import_module as importlib_import_module
|
|
25
25
|
from inspect import getsource as inspect_getsource
|
|
26
26
|
from mapFolding.beDRY import outfitCountFolds, validateListDimensions
|
|
27
27
|
from mapFolding.filesystem import getPathFilenameFoldsTotal
|
|
@@ -36,7 +36,7 @@ from mapFolding.someAssemblyRequired import (
|
|
|
36
36
|
Then,
|
|
37
37
|
Z0Z_executeActionUnlessDescendantMatches,
|
|
38
38
|
)
|
|
39
|
-
from mapFolding.theSSOT import ComputationState,
|
|
39
|
+
from mapFolding.theSSOT import ComputationState, The
|
|
40
40
|
from pathlib import Path
|
|
41
41
|
from types import ModuleType
|
|
42
42
|
from typing import Any, Literal, overload
|
|
@@ -69,7 +69,9 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, da
|
|
|
69
69
|
dataclass_Identifier: The identifier of the dataclass to be dismantled.
|
|
70
70
|
instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
|
|
71
71
|
"""
|
|
72
|
-
|
|
72
|
+
# TODO learn whether dataclasses.make_dataclass would be useful to transform the target dataclass into the `ShatteredDataclass`
|
|
73
|
+
|
|
74
|
+
module: ast.Module = ast.parse(inspect_getsource(importlib_import_module(logicalPathModule)))
|
|
73
75
|
astName_dataclassesDOTdataclass = Make.astName(dataclass_Identifier)
|
|
74
76
|
|
|
75
77
|
dataclass = extractClassDef(dataclass_Identifier, module)
|
|
@@ -122,6 +124,10 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, da
|
|
|
122
124
|
shatteredDataclass.ledgerDataclassANDFragments.addImportFromStr(logicalPathModule, dataclass_Identifier)
|
|
123
125
|
return shatteredDataclass
|
|
124
126
|
|
|
127
|
+
def getSourceAlgorithmVESTIGIAL() -> ModuleType:
|
|
128
|
+
moduleImported: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
|
|
129
|
+
return moduleImported
|
|
130
|
+
|
|
125
131
|
@overload
|
|
126
132
|
def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[True], **keywordArguments: Any) -> Path: ...
|
|
127
133
|
@overload
|
|
@@ -145,7 +151,7 @@ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: bool = True
|
|
|
145
151
|
mapShape = validateListDimensions(listDimensions)
|
|
146
152
|
stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
|
|
147
153
|
|
|
148
|
-
moduleSource: ModuleType =
|
|
154
|
+
moduleSource: ModuleType = getSourceAlgorithmVESTIGIAL()
|
|
149
155
|
# TODO `countInitialize` is hardcoded
|
|
150
156
|
stateUniversal = moduleSource.countInitialize(stateUniversal)
|
|
151
157
|
|
|
@@ -26,27 +26,16 @@ from autoflake import fix_code as autoflake_fix_code
|
|
|
26
26
|
from collections import defaultdict
|
|
27
27
|
from collections.abc import Callable, Container, Sequence
|
|
28
28
|
from copy import deepcopy
|
|
29
|
+
from importlib import import_module as importlib_import_module
|
|
29
30
|
from inspect import getsource as inspect_getsource
|
|
30
31
|
from mapFolding.filesystem import writeStringToHere
|
|
31
32
|
from mapFolding.theSSOT import (
|
|
32
|
-
getSourceAlgorithm,
|
|
33
33
|
raiseIfNoneGitHubIssueNumber3,
|
|
34
|
-
|
|
35
|
-
theDataclassInstance,
|
|
36
|
-
theDataclassInstanceTaskDistribution,
|
|
37
|
-
theDispatcherCallable,
|
|
38
|
-
theFileExtension,
|
|
34
|
+
The,
|
|
39
35
|
theFormatStrModuleForCallableSynthetic,
|
|
40
36
|
theFormatStrModuleSynthetic,
|
|
41
|
-
theLogicalPathModuleDataclass,
|
|
42
37
|
theLogicalPathModuleDispatcherSynthetic,
|
|
43
38
|
theModuleDispatcherSynthetic,
|
|
44
|
-
theModuleOfSyntheticModules,
|
|
45
|
-
thePackageName,
|
|
46
|
-
thePathPackage,
|
|
47
|
-
theSourceInitializeCallable,
|
|
48
|
-
theSourceParallelCallable,
|
|
49
|
-
theSourceSequentialCallable,
|
|
50
39
|
)
|
|
51
40
|
from os import PathLike
|
|
52
41
|
from pathlib import Path, PurePath, PurePosixPath
|
|
@@ -563,25 +552,27 @@ class RecipeSynthesizeFlow:
|
|
|
563
552
|
"""Settings for synthesizing flow."""
|
|
564
553
|
# ========================================
|
|
565
554
|
# Source
|
|
566
|
-
sourceAlgorithm: ModuleType =
|
|
555
|
+
sourceAlgorithm: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
|
|
567
556
|
sourcePython: str = inspect_getsource(sourceAlgorithm)
|
|
568
557
|
source_astModule: ast.Module = ast.parse(sourcePython)
|
|
569
558
|
|
|
570
559
|
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
571
|
-
sourceDispatcherCallable: str =
|
|
572
|
-
sourceInitializeCallable: str =
|
|
573
|
-
sourceParallelCallable: str =
|
|
574
|
-
sourceSequentialCallable: str =
|
|
575
|
-
|
|
576
|
-
sourceDataclassIdentifier: str =
|
|
577
|
-
sourceDataclassInstance: str =
|
|
578
|
-
sourceDataclassInstanceTaskDistribution: str =
|
|
579
|
-
sourcePathModuleDataclass: str =
|
|
580
|
-
|
|
560
|
+
sourceDispatcherCallable: str = The.dispatcherCallable
|
|
561
|
+
sourceInitializeCallable: str = The.sourceInitializeCallable
|
|
562
|
+
sourceParallelCallable: str = The.sourceParallelCallable
|
|
563
|
+
sourceSequentialCallable: str = The.sourceSequentialCallable
|
|
564
|
+
|
|
565
|
+
sourceDataclassIdentifier: str = The.dataclassIdentifier
|
|
566
|
+
sourceDataclassInstance: str = The.dataclassInstance
|
|
567
|
+
sourceDataclassInstanceTaskDistribution: str = The.dataclassInstanceTaskDistribution
|
|
568
|
+
sourcePathModuleDataclass: str = The.logicalPathModuleDataclass
|
|
569
|
+
|
|
570
|
+
sourceConcurrencyManagerNamespace = The.sourceConcurrencyManagerNamespace
|
|
571
|
+
sourceConcurrencyManagerIdentifier = The.sourceConcurrencyManagerIdentifier
|
|
581
572
|
# ========================================
|
|
582
573
|
# Filesystem
|
|
583
|
-
pathPackage: PurePosixPath | None = PurePosixPath(
|
|
584
|
-
fileExtension: str =
|
|
574
|
+
pathPackage: PurePosixPath | None = PurePosixPath(The.pathPackage)
|
|
575
|
+
fileExtension: str = The.fileExtension
|
|
585
576
|
|
|
586
577
|
# ========================================
|
|
587
578
|
# Logical identifiers
|
|
@@ -590,11 +581,11 @@ class RecipeSynthesizeFlow:
|
|
|
590
581
|
formatStrModuleForCallableSynthetic: str = theFormatStrModuleForCallableSynthetic
|
|
591
582
|
|
|
592
583
|
# Package
|
|
593
|
-
packageName: ast_Identifier | None =
|
|
584
|
+
packageName: ast_Identifier | None = The.packageName
|
|
594
585
|
|
|
595
586
|
# Module
|
|
596
587
|
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
597
|
-
Z0Z_flowLogicalPathRoot: str =
|
|
588
|
+
Z0Z_flowLogicalPathRoot: str | None = The.moduleOfSyntheticModules
|
|
598
589
|
moduleDispatcher: str = theModuleDispatcherSynthetic
|
|
599
590
|
logicalPathModuleDataclass: str = sourcePathModuleDataclass
|
|
600
591
|
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|