mapFolding 0.8.0__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. mapFolding/__init__.py +33 -4
  2. mapFolding/basecamp.py +16 -2
  3. mapFolding/beDRY.py +40 -32
  4. mapFolding/filesystem.py +124 -90
  5. mapFolding/noHomeYet.py +12 -0
  6. mapFolding/oeis.py +18 -3
  7. mapFolding/reference/__init__.py +38 -0
  8. mapFolding/reference/flattened.py +66 -47
  9. mapFolding/reference/hunterNumba.py +28 -4
  10. mapFolding/reference/irvineJavaPort.py +13 -1
  11. mapFolding/reference/{jax.py → jaxCount.py} +46 -27
  12. mapFolding/reference/lunnanNumpy.py +19 -5
  13. mapFolding/reference/lunnanWhile.py +19 -7
  14. mapFolding/reference/rotatedEntryPoint.py +20 -3
  15. mapFolding/reference/total_countPlus1vsPlusN.py +226 -203
  16. mapFolding/someAssemblyRequired/__init__.py +29 -0
  17. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +32 -14
  18. mapFolding/someAssemblyRequired/ingredientsNumba.py +22 -1
  19. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +193 -0
  20. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +3 -4
  21. mapFolding/someAssemblyRequired/transformDataStructures.py +168 -0
  22. mapFolding/someAssemblyRequired/transformationTools.py +233 -225
  23. mapFolding/theDao.py +19 -5
  24. mapFolding/theSSOT.py +89 -122
  25. mapfolding-0.8.2.dist-info/METADATA +187 -0
  26. mapfolding-0.8.2.dist-info/RECORD +39 -0
  27. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/WHEEL +1 -1
  28. tests/conftest.py +43 -33
  29. tests/test_computations.py +7 -7
  30. tests/test_other.py +2 -2
  31. mapFolding/reference/lunnan.py +0 -153
  32. mapFolding/someAssemblyRequired/Z0Z_workbench.py +0 -350
  33. mapFolding/someAssemblyRequired/synthesizeDataConverters.py +0 -117
  34. mapFolding/syntheticModules/numbaCountHistoricalExample.py +0 -158
  35. mapFolding/syntheticModules/numba_doTheNeedfulHistoricalExample.py +0 -13
  36. mapfolding-0.8.0.dist-info/METADATA +0 -157
  37. mapfolding-0.8.0.dist-info/RECORD +0 -41
  38. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/entry_points.txt +0 -0
  39. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info/licenses}/LICENSE +0 -0
  40. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/top_level.txt +0 -0
@@ -1,211 +1,234 @@
1
+ """
2
+ Comparison of two nearly identical counting implementations with vastly different performance.
3
+
4
+ This file provides a direct comparison between two variants of the map folding algorithm
5
+ that differ only in their approach to incrementing the folding counter. Despite their apparent
6
+ similarity, one implementation demonstrates orders of magnitude better performance than the other.
7
+
8
+ Key characteristics:
9
+ - Both implementations use Numba for performance optimization
10
+ - Both use identical data structures and array initializations
11
+ - `foldings_plus_1`: Increments the counter by 1 for each valid folding
12
+ - `foldings`: Increments the counter by n (total leaves) when certain conditions are met
13
+
14
+ The performance difference illustrates how subtle algorithmic changes can dramatically
15
+ impact computational efficiency, even when the overall algorithm structure remains unchanged.
16
+ This example serves as a compelling demonstration of the importance of algorithm analysis
17
+ and optimization for combinatorial problems.
18
+
19
+ Note: These functions are isolated for educational purposes to highlight the specific
20
+ optimization technique. The main package uses more comprehensive optimizations derived
21
+ from this and other lessons.
22
+ """
23
+
1
24
  from numba import njit
2
25
  import numpy
3
26
 
4
27
  @njit(cache=True)
5
28
  def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
6
- n: int = 1 # Total number of leaves
7
- for dimension in p:
8
- n *= dimension
9
-
10
- d = len(p) # Number of dimensions
11
- # Compute arrays P, C, D as per the algorithm
12
- P = numpy.ones(d + 1, dtype=numpy.int64)
13
- for i in range(1, d + 1):
14
- P[i] = P[i - 1] * p[i - 1]
15
-
16
- # C[i][m] holds the i-th coordinate of leaf m
17
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
18
- for i in range(1, d + 1):
19
- for m in range(1, n + 1):
20
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
21
-
22
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
23
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
24
- for i in range(1, d + 1):
25
- for l in range(1, n + 1):
26
- for m in range(1, l + 1):
27
- delta = C[i][l] - C[i][m]
28
- if delta % 2 == 0:
29
- # If delta is even
30
- if C[i][m] == 1:
31
- D[i][l][m] = m
32
- else:
33
- D[i][l][m] = m - P[i - 1]
34
- else:
35
- # If delta is odd
36
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
37
- D[i][l][m] = m
38
- else:
39
- D[i][l][m] = m + P[i - 1]
40
- # Initialize arrays/lists
41
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
42
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
43
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
44
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
45
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
46
-
47
-
48
- # Initialize variables for backtracking
49
- total_count = 0 # Total number of foldings
50
- g = 0 # Gap index
51
- l = 1 # Current leaf
52
-
53
- # Start backtracking loop
54
- while l > 0:
55
- # If we have processed all leaves, increment total count
56
- if l > n:
57
- total_count += 1
58
- else:
59
- dd = 0 # Number of sections where leaf l is unconstrained
60
- gg = g # Temporary gap index
61
- g = gapter[l - 1] # Reset gap index for current leaf
62
-
63
- # Count possible gaps for leaf l in each section
64
- for i in range(1, d + 1):
65
- if D[i][l][l] == l:
66
- dd += 1
67
- else:
68
- m = D[i][l][l]
69
- while m != l:
70
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
71
- gap[gg] = m
72
- if count[m] == 0:
73
- gg += 1
74
- count[m] += 1
75
- m = D[i][l][B[m]]
76
-
77
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
78
- if dd == d:
79
- for m in range(l):
80
- gap[gg] = m
81
- gg += 1
82
-
83
- # Filter gaps that are common to all sections
84
- for j in range(g, gg):
85
- gap[g] = gap[j]
86
- if count[gap[j]] == d - dd:
87
- g += 1
88
- count[gap[j]] = 0 # Reset count for next iteration
89
-
90
- # Recursive backtracking steps
91
- while l > 0 and g == gapter[l - 1]:
92
- l -= 1
93
- B[A[l]] = B[l]
94
- A[B[l]] = A[l]
95
-
96
- if l > 0:
97
- g -= 1
98
- A[l] = gap[g]
99
- B[l] = B[A[l]]
100
- B[A[l]] = l
101
- A[B[l]] = l
102
- gapter[l] = g # Save current gap index
103
- l += 1 # Move to next leaf
104
-
105
- return total_count
29
+ n: int = 1 # Total number of leaves
30
+ for dimension in p:
31
+ n *= dimension
32
+
33
+ d = len(p) # Number of dimensions
34
+ # Compute arrays P, C, D as per the algorithm
35
+ P = numpy.ones(d + 1, dtype=numpy.int64)
36
+ for i in range(1, d + 1):
37
+ P[i] = P[i - 1] * p[i - 1]
38
+
39
+ # C[i][m] holds the i-th coordinate of leaf m
40
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
41
+ for i in range(1, d + 1):
42
+ for m in range(1, n + 1):
43
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
44
+
45
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
46
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
47
+ for i in range(1, d + 1):
48
+ for l in range(1, n + 1):
49
+ for m in range(1, l + 1):
50
+ delta = C[i][l] - C[i][m]
51
+ if delta % 2 == 0:
52
+ # If delta is even
53
+ if C[i][m] == 1:
54
+ D[i][l][m] = m
55
+ else:
56
+ D[i][l][m] = m - P[i - 1]
57
+ else:
58
+ # If delta is odd
59
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
60
+ D[i][l][m] = m
61
+ else:
62
+ D[i][l][m] = m + P[i - 1]
63
+ # Initialize arrays/lists
64
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
65
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
66
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
67
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
68
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
69
+
70
+
71
+ # Initialize variables for backtracking
72
+ total_count = 0 # Total number of foldings
73
+ g = 0 # Gap index
74
+ l = 1 # Current leaf
75
+
76
+ # Start backtracking loop
77
+ while l > 0:
78
+ # If we have processed all leaves, increment total count
79
+ if l > n:
80
+ total_count += 1
81
+ else:
82
+ dd = 0 # Number of sections where leaf l is unconstrained
83
+ gg = g # Temporary gap index
84
+ g = gapter[l - 1] # Reset gap index for current leaf
85
+
86
+ # Count possible gaps for leaf l in each section
87
+ for i in range(1, d + 1):
88
+ if D[i][l][l] == l:
89
+ dd += 1
90
+ else:
91
+ m = D[i][l][l]
92
+ while m != l:
93
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
94
+ gap[gg] = m
95
+ if count[m] == 0:
96
+ gg += 1
97
+ count[m] += 1
98
+ m = D[i][l][B[m]]
99
+
100
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
101
+ if dd == d:
102
+ for m in range(l):
103
+ gap[gg] = m
104
+ gg += 1
105
+
106
+ # Filter gaps that are common to all sections
107
+ for j in range(g, gg):
108
+ gap[g] = gap[j]
109
+ if count[gap[j]] == d - dd:
110
+ g += 1
111
+ count[gap[j]] = 0 # Reset count for next iteration
112
+
113
+ # Recursive backtracking steps
114
+ while l > 0 and g == gapter[l - 1]:
115
+ l -= 1
116
+ B[A[l]] = B[l]
117
+ A[B[l]] = A[l]
118
+
119
+ if l > 0:
120
+ g -= 1
121
+ A[l] = gap[g]
122
+ B[l] = B[A[l]]
123
+ B[A[l]] = l
124
+ A[B[l]] = l
125
+ gapter[l] = g # Save current gap index
126
+ l += 1 # Move to next leaf
127
+
128
+ return total_count
106
129
 
107
130
  @njit(cache=True)
108
131
  def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
109
- n: int = 1 # Total number of leaves
110
- for dimension in p:
111
- n *= dimension
112
-
113
- d = len(p) # Number of dimensions
114
- # Compute arrays P, C, D as per the algorithm
115
- P = numpy.ones(d + 1, dtype=numpy.int64)
116
- for i in range(1, d + 1):
117
- P[i] = P[i - 1] * p[i - 1]
118
-
119
- # C[i][m] holds the i-th coordinate of leaf m
120
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
121
- for i in range(1, d + 1):
122
- for m in range(1, n + 1):
123
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
124
- # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
125
-
126
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
127
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
128
- for i in range(1, d + 1):
129
- for l in range(1, n + 1):
130
- for m in range(1, l + 1):
131
- delta = C[i][l] - C[i][m]
132
- if delta % 2 == 0:
133
- # If delta is even
134
- if C[i][m] == 1:
135
- D[i][l][m] = m
136
- else:
137
- D[i][l][m] = m - P[i - 1]
138
- else:
139
- # If delta is odd
140
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
141
- D[i][l][m] = m
142
- else:
143
- D[i][l][m] = m + P[i - 1]
144
- # Initialize arrays/lists
145
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
146
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
147
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
148
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
149
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
150
-
151
-
152
- # Initialize variables for backtracking
153
- total_count = 0 # Total number of foldings
154
- g = 0 # Gap index
155
- l = 1 # Current leaf
156
-
157
- # Start backtracking loop
158
- while l > 0:
159
- if l <= 1 or B[0] == 1: # NOTE different
160
- # NOTE the above `if` statement encloses the the if/else block below
161
- # NOTE these changes increase the throughput by more than an order of magnitude
162
- if l > n:
163
- total_count += n
164
- else:
165
- dd = 0 # Number of sections where leaf l is unconstrained
166
- gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
167
- g = gg # NOTE different, but not important
168
-
169
- # Count possible gaps for leaf l in each section
170
- for i in range(1, d + 1):
171
- if D[i][l][l] == l:
172
- dd += 1
173
- else:
174
- m = D[i][l][l]
175
- while m != l:
176
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
177
- gap[gg] = m
178
- if count[m] == 0:
179
- gg += 1
180
- count[m] += 1
181
- m = D[i][l][B[m]]
182
-
183
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
184
- if dd == d:
185
- for m in range(l):
186
- gap[gg] = m
187
- gg += 1
188
-
189
- # Filter gaps that are common to all sections
190
- for j in range(g, gg):
191
- gap[g] = gap[j]
192
- if count[gap[j]] == d - dd:
193
- g += 1
194
- count[gap[j]] = 0 # Reset count for next iteration
195
-
196
- # Recursive backtracking steps
197
- while l > 0 and g == gapter[l - 1]:
198
- l -= 1
199
- B[A[l]] = B[l]
200
- A[B[l]] = A[l]
201
-
202
- if l > 0:
203
- g -= 1
204
- A[l] = gap[g]
205
- B[l] = B[A[l]]
206
- B[A[l]] = l
207
- A[B[l]] = l
208
- gapter[l] = g # Save current gap index
209
- l += 1 # Move to next leaf
210
-
211
- return total_count
132
+ n: int = 1 # Total number of leaves
133
+ for dimension in p:
134
+ n *= dimension
135
+
136
+ d = len(p) # Number of dimensions
137
+ # Compute arrays P, C, D as per the algorithm
138
+ P = numpy.ones(d + 1, dtype=numpy.int64)
139
+ for i in range(1, d + 1):
140
+ P[i] = P[i - 1] * p[i - 1]
141
+
142
+ # C[i][m] holds the i-th coordinate of leaf m
143
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
144
+ for i in range(1, d + 1):
145
+ for m in range(1, n + 1):
146
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
147
+ # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
148
+
149
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
150
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
151
+ for i in range(1, d + 1):
152
+ for l in range(1, n + 1):
153
+ for m in range(1, l + 1):
154
+ delta = C[i][l] - C[i][m]
155
+ if delta % 2 == 0:
156
+ # If delta is even
157
+ if C[i][m] == 1:
158
+ D[i][l][m] = m
159
+ else:
160
+ D[i][l][m] = m - P[i - 1]
161
+ else:
162
+ # If delta is odd
163
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
164
+ D[i][l][m] = m
165
+ else:
166
+ D[i][l][m] = m + P[i - 1]
167
+ # Initialize arrays/lists
168
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
169
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
170
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
171
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
172
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
173
+
174
+
175
+ # Initialize variables for backtracking
176
+ total_count = 0 # Total number of foldings
177
+ g = 0 # Gap index
178
+ l = 1 # Current leaf
179
+
180
+ # Start backtracking loop
181
+ while l > 0:
182
+ if l <= 1 or B[0] == 1: # NOTE different
183
+ # NOTE the above `if` statement encloses the the if/else block below
184
+ # NOTE these changes increase the throughput by more than an order of magnitude
185
+ if l > n:
186
+ total_count += n
187
+ else:
188
+ dd = 0 # Number of sections where leaf l is unconstrained
189
+ gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
190
+ g = gg # NOTE different, but not important
191
+
192
+ # Count possible gaps for leaf l in each section
193
+ for i in range(1, d + 1):
194
+ if D[i][l][l] == l:
195
+ dd += 1
196
+ else:
197
+ m = D[i][l][l]
198
+ while m != l:
199
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
200
+ gap[gg] = m
201
+ if count[m] == 0:
202
+ gg += 1
203
+ count[m] += 1
204
+ m = D[i][l][B[m]]
205
+
206
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
207
+ if dd == d:
208
+ for m in range(l):
209
+ gap[gg] = m
210
+ gg += 1
211
+
212
+ # Filter gaps that are common to all sections
213
+ for j in range(g, gg):
214
+ gap[g] = gap[j]
215
+ if count[gap[j]] == d - dd:
216
+ g += 1
217
+ count[gap[j]] = 0 # Reset count for next iteration
218
+
219
+ # Recursive backtracking steps
220
+ while l > 0 and g == gapter[l - 1]:
221
+ l -= 1
222
+ B[A[l]] = B[l]
223
+ A[B[l]] = A[l]
224
+
225
+ if l > 0:
226
+ g -= 1
227
+ A[l] = gap[g]
228
+ B[l] = B[A[l]]
229
+ B[A[l]] = l
230
+ A[B[l]] = l
231
+ gapter[l] = g # Save current gap index
232
+ l += 1 # Move to next leaf
233
+
234
+ return total_count
@@ -1,3 +1,29 @@
1
+ """
2
+ Code transformation framework for algorithmic optimization.
3
+
4
+ This package implements a comprehensive framework for programmatically analyzing,
5
+ transforming, and generating Python code. It enables sophisticated algorithm optimization
6
+ through abstract syntax tree (AST) manipulation, allowing algorithms to be transformed
7
+ from a readable, functional implementation into highly-optimized variants tailored for
8
+ different execution environments or specific computational tasks.
9
+
10
+ Core capabilities:
11
+ 1. AST Pattern Recognition - Precisely identify and match code patterns using composable predicates
12
+ 2. Algorithm Transformation - Convert functional state-based implementations to primitive operations
13
+ 3. Dataclass "Shattering" - Decompose complex state objects into primitive components
14
+ 4. Performance Optimization - Apply domain-specific optimizations for numerical computation
15
+ 5. Code Generation - Generate specialized implementations with appropriate imports and syntax
16
+
17
+ The transformation pipeline supports multiple optimization targets, from general-purpose
18
+ acceleration to generating highly-specialized variants optimized for specific input parameters.
19
+ This multi-level transformation approach allows for both development flexibility and
20
+ runtime performance, preserving algorithm readability in the source while enabling
21
+ maximum execution speed in production.
22
+
23
+ These tools were developed for map folding computation optimization but are designed as
24
+ general-purpose utilities applicable to a wide range of code transformation scenarios,
25
+ particularly for numerically-intensive algorithms that benefit from just-in-time compilation.
26
+ """
1
27
  from mapFolding.someAssemblyRequired.transformationTools import (
2
28
  ast_Identifier as ast_Identifier,
3
29
  extractClassDef as extractClassDef,
@@ -5,6 +31,7 @@ from mapFolding.someAssemblyRequired.transformationTools import (
5
31
  ifThis as ifThis,
6
32
  IngredientsFunction as IngredientsFunction,
7
33
  IngredientsModule as IngredientsModule,
34
+ inlineThisFunctionWithTheseValues as inlineThisFunctionWithTheseValues,
8
35
  LedgerOfImports as LedgerOfImports,
9
36
  Make as Make,
10
37
  makeDictionaryReplacementStatements as makeDictionaryReplacementStatements,
@@ -13,5 +40,7 @@ from mapFolding.someAssemblyRequired.transformationTools import (
13
40
  RecipeSynthesizeFlow as RecipeSynthesizeFlow,
14
41
  strDotStrCuzPyStoopid as strDotStrCuzPyStoopid,
15
42
  Then as Then,
43
+ write_astModule as write_astModule,
16
44
  Z0Z_executeActionUnlessDescendantMatches as Z0Z_executeActionUnlessDescendantMatches,
45
+ Z0Z_replaceMatchingASTnodes as Z0Z_replaceMatchingASTnodes,
17
46
  )
@@ -1,20 +1,38 @@
1
+ """
2
+ Utility for extracting LLVM IR from compiled Python modules.
3
+
4
+ This module provides functionality to extract and save the LLVM Intermediate Representation (IR)
5
+ generated when Numba compiles Python functions. It implements a simple interface that:
6
+
7
+ 1. Imports a specified Python module from its file path
8
+ 2. Extracts the LLVM IR from a specified function within that module
9
+ 3. Writes the IR to a file with the same base name but with the .ll extension
10
+
11
+ The extracted LLVM IR can be valuable for debugging, optimization analysis, or educational
12
+ purposes, as it provides a view into how high-level Python code is translated into
13
+ lower-level representations for machine execution.
14
+
15
+ While originally part of a tighter integration with the code generation pipeline,
16
+ this module now operates as a standalone utility that can be applied to any module
17
+ containing Numba-compiled functions.
18
+ """
1
19
  from importlib.machinery import ModuleSpec
20
+ from pathlib import Path
2
21
  from types import ModuleType
3
22
  import importlib.util
4
23
  import llvmlite.binding
5
- import pathlib
6
24
 
7
- def writeModuleLLVM(pathFilename: pathlib.Path, identifierCallable: str) -> pathlib.Path:
8
- """Import the generated module directly and get its LLVM IR."""
9
- specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
10
- if specTarget is None or specTarget.loader is None:
11
- raise ImportError(f"Could not create module spec or loader for {pathFilename}")
12
- moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
13
- specTarget.loader.exec_module(moduleTarget)
25
+ def writeModuleLLVM(pathFilename: Path, identifierCallable: str) -> Path:
26
+ """Import the generated module directly and get its LLVM IR."""
27
+ specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
28
+ if specTarget is None or specTarget.loader is None:
29
+ raise ImportError(f"Could not create module spec or loader for {pathFilename}")
30
+ moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
31
+ specTarget.loader.exec_module(moduleTarget)
14
32
 
15
- # Get LLVM IR and write to file
16
- linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
17
- moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
18
- pathFilenameLLVM: pathlib.Path = pathFilename.with_suffix(".ll")
19
- pathFilenameLLVM.write_text(str(moduleLLVM))
20
- return pathFilenameLLVM
33
+ # Get LLVM IR and write to file
34
+ linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
35
+ moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
36
+ pathFilenameLLVM: Path = pathFilename.with_suffix(".ll")
37
+ pathFilenameLLVM.write_text(str(moduleLLVM))
38
+ return pathFilenameLLVM
@@ -1,7 +1,28 @@
1
+ """
2
+ Numba-specific ingredients for optimized code generation.
3
+
4
+ This module provides specialized tools, constants, and types specifically designed
5
+ for transforming Python code into Numba-accelerated implementations. It implements:
6
+
7
+ 1. A range of Numba jit decorator configurations for different optimization scenarios
8
+ 2. Functions to identify and manipulate Numba decorators in abstract syntax trees
9
+ 3. Utilities for applying appropriate Numba typing to transformed code
10
+ 4. Parameter management for Numba compilation options
11
+
12
+ The configurations range from conservative options that prioritize compatibility and
13
+ error detection to aggressive optimizations that maximize performance at the cost of
14
+ flexibility. While this module specifically targets Numba, its design follows the pattern
15
+ of generic code transformation tools in the package, allowing similar approaches to be
16
+ applied to other acceleration technologies.
17
+
18
+ This module works in conjunction with transformation tools to convert the general-purpose
19
+ algorithm implementation into a highly-optimized Numba version.
20
+ """
21
+
1
22
  from collections.abc import Callable, Sequence
2
23
  from mapFolding.someAssemblyRequired import ifThis, IngredientsFunction, Make
3
24
  from numba.core.compiler import CompilerBase as numbaCompilerBase
4
- from typing import Any, TYPE_CHECKING, Final, cast
25
+ from typing import Any, cast, Final, TYPE_CHECKING
5
26
  import ast
6
27
 
7
28
  try: