mapFolding 0.7.1__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. mapFolding/__init__.py +33 -4
  2. mapFolding/basecamp.py +14 -0
  3. mapFolding/beDRY.py +93 -82
  4. mapFolding/filesystem.py +124 -90
  5. mapFolding/noHomeYet.py +14 -2
  6. mapFolding/oeis.py +18 -3
  7. mapFolding/reference/flattened.py +46 -45
  8. mapFolding/reference/hunterNumba.py +4 -4
  9. mapFolding/reference/irvineJavaPort.py +1 -1
  10. mapFolding/reference/lunnanNumpy.py +3 -4
  11. mapFolding/reference/lunnanWhile.py +5 -7
  12. mapFolding/reference/rotatedEntryPoint.py +2 -3
  13. mapFolding/someAssemblyRequired/__init__.py +33 -3
  14. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +32 -15
  15. mapFolding/someAssemblyRequired/ingredientsNumba.py +108 -2
  16. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +196 -0
  17. mapFolding/someAssemblyRequired/{synthesizeNumbaJob.py → synthesizeNumbaJobVESTIGIAL.py} +19 -23
  18. mapFolding/someAssemblyRequired/transformDataStructures.py +162 -0
  19. mapFolding/someAssemblyRequired/transformationTools.py +607 -252
  20. mapFolding/syntheticModules/numbaCount_doTheNeedful.py +197 -12
  21. mapFolding/theDao.py +37 -16
  22. mapFolding/theSSOT.py +47 -44
  23. {mapfolding-0.7.1.dist-info → mapfolding-0.8.1.dist-info}/METADATA +51 -46
  24. mapfolding-0.8.1.dist-info/RECORD +39 -0
  25. {mapfolding-0.7.1.dist-info → mapfolding-0.8.1.dist-info}/WHEEL +1 -1
  26. tests/conftest.py +2 -3
  27. tests/test_filesystem.py +0 -2
  28. tests/test_other.py +2 -3
  29. tests/test_tasks.py +0 -4
  30. mapFolding/reference/lunnan.py +0 -153
  31. mapFolding/someAssemblyRequired/Z0Z_workbench.py +0 -33
  32. mapFolding/someAssemblyRequired/synthesizeCountingFunctions.py +0 -7
  33. mapFolding/someAssemblyRequired/synthesizeDataConverters.py +0 -135
  34. mapFolding/someAssemblyRequired/synthesizeNumba.py +0 -91
  35. mapFolding/someAssemblyRequired/synthesizeNumbaModules.py +0 -91
  36. mapFolding/someAssemblyRequired/whatWillBe.py +0 -357
  37. mapFolding/syntheticModules/dataNamespaceFlattened.py +0 -30
  38. mapFolding/syntheticModules/multiprocessingCount_doTheNeedful.py +0 -216
  39. mapFolding/syntheticModules/numbaCount.py +0 -90
  40. mapFolding/syntheticModules/numbaCountExample.py +0 -158
  41. mapFolding/syntheticModules/numbaCountSequential.py +0 -111
  42. mapFolding/syntheticModules/numba_doTheNeedful.py +0 -12
  43. mapFolding/syntheticModules/numba_doTheNeedfulExample.py +0 -13
  44. mapfolding-0.7.1.dist-info/RECORD +0 -51
  45. /mapFolding/{syntheticModules → reference}/__init__.py +0 -0
  46. {mapfolding-0.7.1.dist-info → mapfolding-0.8.1.dist-info}/entry_points.txt +0 -0
  47. {mapfolding-0.7.1.dist-info → mapfolding-0.8.1.dist-info/licenses}/LICENSE +0 -0
  48. {mapfolding-0.7.1.dist-info → mapfolding-0.8.1.dist-info}/top_level.txt +0 -0
@@ -1,14 +1,15 @@
1
1
  """The algorithm flattened into semantic sections.
2
2
  This version is not maintained, so you may see differences from the current version."""
3
+ from collections.abc import Sequence
3
4
  from numpy import integer
4
5
  from numpy.typing import NDArray
5
- from typing import List, Any, Final, Optional, Union, Sequence, Tuple, Type, TypedDict
6
+ from typing import Any, Final, TypedDict
6
7
  import enum
7
8
  import numpy
8
9
  import sys
9
10
 
10
- def countFolds(listDimensions: Sequence[int], computationDivisions = None, CPUlimit: Optional[Union[int, float, bool]] = None):
11
- def doWhile():
11
+ def countFolds(listDimensions: Sequence[int], computationDivisions: int | str | None = None, CPUlimit: int | float | bool | None = None) -> int:
12
+ def doWhile() -> None:
12
13
 
13
14
  while activeLeafGreaterThan0Condition():
14
15
 
@@ -49,99 +50,99 @@ def countFolds(listDimensions: Sequence[int], computationDivisions = None, CPUli
49
50
  if placeLeafCondition():
50
51
  placeLeaf()
51
52
 
52
- def activeGapIncrement():
53
+ def activeGapIncrement() -> None:
53
54
  my[indexMy.gap1ndex] += 1
54
55
 
55
- def activeLeafGreaterThan0Condition():
56
+ def activeLeafGreaterThan0Condition() -> bool:
56
57
  return my[indexMy.leaf1ndex] > 0
57
58
 
58
- def activeLeafGreaterThanLeavesTotalCondition():
59
+ def activeLeafGreaterThanLeavesTotalCondition() -> bool:
59
60
  return my[indexMy.leaf1ndex] > the[indexThe.leavesTotal]
60
61
 
61
- def activeLeafIsTheFirstLeafCondition():
62
+ def activeLeafIsTheFirstLeafCondition() -> bool:
62
63
  return my[indexMy.leaf1ndex] <= 1
63
64
 
64
- def activeLeafNotEqualToTaskDivisionsCondition():
65
+ def activeLeafNotEqualToTaskDivisionsCondition() -> bool:
65
66
  return my[indexMy.leaf1ndex] != the[indexThe.taskDivisions]
66
67
 
67
- def allDimensionsAreUnconstrained():
68
+ def allDimensionsAreUnconstrained() -> bool:
68
69
  return my[indexMy.dimensionsUnconstrained] == the[indexThe.dimensionsTotal]
69
70
 
70
- def backtrack():
71
+ def backtrack() -> None:
71
72
  my[indexMy.leaf1ndex] -= 1
72
73
  track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]] = track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]
73
74
  track[indexTrack.leafAbove, track[indexTrack.leafBelow, my[indexMy.leaf1ndex]]] = track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]
74
75
 
75
- def backtrackCondition():
76
+ def backtrackCondition() -> bool:
76
77
  return my[indexMy.leaf1ndex] > 0 and my[indexMy.gap1ndex] == track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex] - 1]
77
78
 
78
- def computationDivisionsCondition():
79
+ def computationDivisionsCondition() -> bool:
79
80
  return the[indexThe.taskDivisions] == int(False)
80
81
 
81
- def countGaps():
82
+ def countGaps() -> None:
82
83
  gapsWhere[my[indexMy.gap1ndexCeiling]] = my[indexMy.leafConnectee]
83
84
  if track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] == 0:
84
85
  gap1ndexCeilingIncrement()
85
86
  track[indexTrack.countDimensionsGapped, my[indexMy.leafConnectee]] += 1
86
87
 
87
- def dimension1ndexIncrement():
88
+ def dimension1ndexIncrement() -> None:
88
89
  my[indexMy.dimension1ndex] += 1
89
90
 
90
- def dimensionsUnconstrainedCondition():
91
+ def dimensionsUnconstrainedCondition() -> bool:
91
92
  return connectionGraph[my[indexMy.dimension1ndex], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]] == my[indexMy.leaf1ndex]
92
93
 
93
- def dimensionsUnconstrainedIncrement():
94
+ def dimensionsUnconstrainedIncrement() -> None:
94
95
  my[indexMy.dimensionsUnconstrained] += 1
95
96
 
96
- def filterCommonGaps():
97
+ def filterCommonGaps() -> None:
97
98
  gapsWhere[my[indexMy.gap1ndex]] = gapsWhere[my[indexMy.indexMiniGap]]
98
99
  if track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] == the[indexThe.dimensionsTotal] - my[indexMy.dimensionsUnconstrained]:
99
100
  activeGapIncrement()
100
101
  track[indexTrack.countDimensionsGapped, gapsWhere[my[indexMy.indexMiniGap]]] = 0
101
102
 
102
- def findGapsInitializeVariables():
103
+ def findGapsInitializeVariables() -> None:
103
104
  my[indexMy.dimensionsUnconstrained] = 0
104
105
  my[indexMy.gap1ndexCeiling] = track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex] - 1]
105
106
  my[indexMy.dimension1ndex] = 1
106
107
 
107
- def foldsSubTotalsIncrement():
108
+ def foldsSubTotalsIncrement() -> None:
108
109
  foldsSubTotals[my[indexMy.taskIndex]] += the[indexThe.leavesTotal]
109
110
 
110
- def gap1ndexCeilingIncrement():
111
+ def gap1ndexCeilingIncrement() -> None:
111
112
  my[indexMy.gap1ndexCeiling] += 1
112
113
 
113
- def indexMiniGapIncrement():
114
+ def indexMiniGapIncrement() -> None:
114
115
  my[indexMy.indexMiniGap] += 1
115
116
 
116
- def indexMiniGapInitialization():
117
+ def indexMiniGapInitialization() -> None:
117
118
  my[indexMy.indexMiniGap] = my[indexMy.gap1ndex]
118
119
 
119
- def insertUnconstrainedLeaf():
120
+ def insertUnconstrainedLeaf() -> None:
120
121
  my[indexMy.indexLeaf] = 0
121
122
  while my[indexMy.indexLeaf] < my[indexMy.leaf1ndex]:
122
123
  gapsWhere[my[indexMy.gap1ndexCeiling]] = my[indexMy.indexLeaf]
123
124
  my[indexMy.gap1ndexCeiling] += 1
124
125
  my[indexMy.indexLeaf] += 1
125
126
 
126
- def leafBelowSentinelIs1Condition():
127
+ def leafBelowSentinelIs1Condition() -> bool:
127
128
  return track[indexTrack.leafBelow, 0] == 1
128
129
 
129
- def leafConnecteeInitialization():
130
+ def leafConnecteeInitialization() -> None:
130
131
  my[indexMy.leafConnectee] = connectionGraph[my[indexMy.dimension1ndex], my[indexMy.leaf1ndex], my[indexMy.leaf1ndex]]
131
132
 
132
- def leafConnecteeUpdate():
133
+ def leafConnecteeUpdate() -> None:
133
134
  my[indexMy.leafConnectee] = connectionGraph[my[indexMy.dimension1ndex], my[indexMy.leaf1ndex], track[indexTrack.leafBelow, my[indexMy.leafConnectee]]]
134
135
 
135
- def loopingLeavesConnectedToActiveLeaf():
136
+ def loopingLeavesConnectedToActiveLeaf() -> bool:
136
137
  return my[indexMy.leafConnectee] != my[indexMy.leaf1ndex]
137
138
 
138
- def loopingTheDimensions():
139
+ def loopingTheDimensions() -> bool:
139
140
  return my[indexMy.dimension1ndex] <= the[indexThe.dimensionsTotal]
140
141
 
141
- def loopingToActiveGapCeiling():
142
+ def loopingToActiveGapCeiling() -> bool:
142
143
  return my[indexMy.indexMiniGap] < my[indexMy.gap1ndexCeiling]
143
144
 
144
- def placeLeaf():
145
+ def placeLeaf() -> None:
145
146
  my[indexMy.gap1ndex] -= 1
146
147
  track[indexTrack.leafAbove, my[indexMy.leaf1ndex]] = gapsWhere[my[indexMy.gap1ndex]]
147
148
  track[indexTrack.leafBelow, my[indexMy.leaf1ndex]] = track[indexTrack.leafBelow, track[indexTrack.leafAbove, my[indexMy.leaf1ndex]]]
@@ -150,13 +151,13 @@ def countFolds(listDimensions: Sequence[int], computationDivisions = None, CPUli
150
151
  track[indexTrack.gapRangeStart, my[indexMy.leaf1ndex]] = my[indexMy.gap1ndex]
151
152
  my[indexMy.leaf1ndex] += 1
152
153
 
153
- def placeLeafCondition():
154
+ def placeLeafCondition() -> bool:
154
155
  return my[indexMy.leaf1ndex] > 0
155
156
 
156
- def taskIndexCondition():
157
+ def taskIndexCondition() -> bool:
157
158
  return my[indexMy.leafConnectee] % the[indexThe.taskDivisions] == my[indexMy.taskIndex]
158
159
 
159
- def thereAreComputationDivisionsYouMightSkip():
160
+ def thereAreComputationDivisionsYouMightSkip() -> bool:
160
161
  if computationDivisionsCondition():
161
162
  return True
162
163
  if activeLeafNotEqualToTaskDivisionsCondition():
@@ -166,11 +167,11 @@ def countFolds(listDimensions: Sequence[int], computationDivisions = None, CPUli
166
167
  return False
167
168
 
168
169
  stateUniversal = outfitFoldings(listDimensions, computationDivisions=computationDivisions, CPUlimit=CPUlimit)
169
- connectionGraph: Final[numpy.ndarray] = stateUniversal['connectionGraph']
170
+ connectionGraph: Final[NDArray[numpy.integer[Any]]] = stateUniversal['connectionGraph']
170
171
  foldsSubTotals = stateUniversal['foldsSubTotals']
171
172
  gapsWhere = stateUniversal['gapsWhere']
172
173
  my = stateUniversal['my']
173
- the: Final[numpy.ndarray] = stateUniversal['the']
174
+ the: Final[NDArray[numpy.integer[Any]]] = stateUniversal['the']
174
175
  track = stateUniversal['track']
175
176
 
176
177
  if the[indexThe.taskDivisions] == int(False):
@@ -192,7 +193,7 @@ def countFolds(listDimensions: Sequence[int], computationDivisions = None, CPUli
192
193
  class EnumIndices(enum.IntEnum):
193
194
  """Base class for index enums."""
194
195
  @staticmethod
195
- def _generate_next_value_(name, start, count, last_values):
196
+ def _generate_next_value_(name: str, start: int, count: int, last_values: list[int]) -> int:
196
197
  """0-indexed."""
197
198
  return count
198
199
 
@@ -229,7 +230,7 @@ class indexTrack(EnumIndices):
229
230
  class computationState(TypedDict):
230
231
  connectionGraph: NDArray[integer[Any]]
231
232
  foldsSubTotals: NDArray[integer[Any]]
232
- mapShape: Tuple[int, ...]
233
+ mapShape: tuple[int, ...]
233
234
  my: NDArray[integer[Any]]
234
235
  gapsWhere: NDArray[integer[Any]]
235
236
  the: NDArray[integer[Any]]
@@ -262,7 +263,7 @@ def getLeavesTotal(listDimensions: Sequence[int]) -> int:
262
263
 
263
264
  return productDimensions
264
265
 
265
- def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrencyLimit: int, CPUlimit: Optional[Union[bool, float, int]], listDimensions: Sequence[int]):
266
+ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: int, CPUlimit: bool | float | int | None, listDimensions: Sequence[int]) -> int:
266
267
  if not computationDivisions:
267
268
  return 0
268
269
  else:
@@ -284,7 +285,7 @@ def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrenc
284
285
 
285
286
  return taskDivisions
286
287
 
287
- def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optional[Type]) -> NDArray[integer[Any]]:
288
+ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: type | None) -> NDArray[integer[Any]]:
288
289
  datatype = keywordArguments.get('datatype', dtypeMedium)
289
290
  mapShape = validateListDimensions(listDimensions)
290
291
  leavesTotal = getLeavesTotal(mapShape)
@@ -316,12 +317,12 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
316
317
  connectionGraph[dimension1ndex, activeLeaf1ndex, connectee1ndex] = connectee1ndex
317
318
  return connectionGraph
318
319
 
319
- def makeDataContainer(shape, datatype: Optional[Type] = None):
320
+ def makeDataContainer(shape: int | Sequence[int], datatype: type | None = None) -> NDArray[integer[Any]]:
320
321
  if datatype is None:
321
322
  datatype = dtypeMedium
322
323
  return numpy.zeros(shape, dtype=datatype)
323
324
 
324
- def outfitFoldings(listDimensions: Sequence[int], computationDivisions: Optional[Union[int, str]] = None, CPUlimit: Optional[Union[bool, float, int]] = None, **keywordArguments: Optional[Type]) -> computationState:
325
+ def outfitFoldings(listDimensions: Sequence[int], computationDivisions: int | str | None = None, CPUlimit: bool | float | int | None = None, **keywordArguments: type | None) -> computationState:
325
326
  datatypeMedium = keywordArguments.get('datatypeMedium', dtypeMedium)
326
327
  datatypeLarge = keywordArguments.get('datatypeLarge', dtypeLarge)
327
328
 
@@ -346,10 +347,10 @@ def outfitFoldings(listDimensions: Sequence[int], computationDivisions: Optional
346
347
  stateInitialized['my'][indexMy.leaf1ndex] = 1
347
348
  return stateInitialized
348
349
 
349
- def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed parameter') -> List[int]:
350
+ def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed parameter') -> list[int]:
350
351
  # listValidated = intInnit(dimensions, parameterName)
351
352
  listNOTValidated = dimensions if isinstance(dimensions, (list, tuple)) else list(dimensions)
352
- listNonNegative = []
353
+ listNonNegative: list[int] = []
353
354
  for dimension in listNOTValidated:
354
355
  if dimension < 0:
355
356
  raise ValueError(f"Dimension {dimension} must be non-negative")
@@ -358,7 +359,7 @@ def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed par
358
359
  raise ValueError("At least one dimension must be non-negative")
359
360
  return listNonNegative
360
361
 
361
- def setCPUlimit(CPUlimit: Union[bool, float, int, None]) -> int:
362
+ def setCPUlimit(CPUlimit: bool | float | int | None) -> int:
362
363
  # if not (CPUlimit is None or isinstance(CPUlimit, (bool, int, float))):
363
364
  # CPUlimit = oopsieKwargsie(CPUlimit)
364
365
  # concurrencyLimit = defineConcurrencyLimit(CPUlimit)
@@ -367,7 +368,7 @@ def setCPUlimit(CPUlimit: Union[bool, float, int, None]) -> int:
367
368
  concurrencyLimit = concurrencyLimitHARDCODED
368
369
  return concurrencyLimit
369
370
 
370
- def validateListDimensions(listDimensions: Sequence[int]) -> List[int]:
371
+ def validateListDimensions(listDimensions: Sequence[int]) -> list[int]:
371
372
  if not listDimensions:
372
373
  raise ValueError(f"listDimensions is a required parameter.")
373
374
  listNonNegative = parseDimensions(listDimensions, 'listDimensions')
@@ -1,9 +1,9 @@
1
- from typing import List
1
+ from typing import Any
2
2
  import numba
3
3
  import numpy
4
4
 
5
5
  @numba.jit(cache=True, nopython=True, fastmath=True)
6
- def countFolds(listDimensions: List[int]) -> int:
6
+ def countFolds(listDimensions: list[int]) -> int:
7
7
  """
8
8
  Count the number of distinct ways to fold a map with at least two positive dimensions.
9
9
 
@@ -13,10 +13,10 @@ def countFolds(listDimensions: List[int]) -> int:
13
13
  Returns:
14
14
  foldsTotal: The total number of distinct folds for the given map dimensions.
15
15
  """
16
- def integerSmall(value) -> numpy.uint8:
16
+ def integerSmall(value: numpy.integer[Any] | Any) -> numpy.uint8:
17
17
  return numpy.uint8(value)
18
18
 
19
- def integerLarge(value) -> numpy.uint64:
19
+ def integerLarge(value: numpy.integer[Any] | Any) -> numpy.uint64:
20
20
  return numpy.uint64(value)
21
21
 
22
22
  dtypeMedium = numpy.uint8
@@ -2,7 +2,7 @@
2
2
  Ported from the Java version by Sean A. Irvine:
3
3
  https://github.com/archmageirvine/joeis/blob/80e3e844b11f149704acbab520bc3a3a25ac34ff/src/irvine/oeis/a001/A001415.java
4
4
 
5
- Citation: mapFolding/citations/jOEIS.bibtex
5
+ Citation: https://github.com/hunterhogan/mapFolding/blob/134f2e6ecdf59fb6f6829c775475544a6aaaa800/citations/jOEIS.bibtex
6
6
  """
7
7
  def foldings(p: list[int], res: int = 0, mod: int = 0) -> int:
8
8
  """
@@ -2,10 +2,9 @@
2
2
  A generally faithful translation of the original Atlas Autocode code by W. F. Lunnon to Python using NumPy.
3
3
  W. F. Lunnon, Multi-dimensional map-folding, The Computer Journal, Volume 14, Issue 1, 1971, Pages 75-80, https://doi.org/10.1093/comjnl/14.1.75
4
4
  """
5
- from typing import List
6
5
  import numpy
7
6
 
8
- def foldings(p: List[int]) -> int:
7
+ def foldings(p: list[int]) -> int:
9
8
  """
10
9
  Run loop with (A, B) on each folding of a p[1] x ... x p[d] map, where A and B are the above and below vectors.
11
10
 
@@ -66,7 +65,7 @@ def foldings(p: List[int]) -> int:
66
65
  # D[i][l][m] = leaf connected to m in section i when inserting l;
67
66
 
68
67
  G: int = 0
69
- l: int = 1
68
+ l = 1
70
69
 
71
70
  # kick off with null folding
72
71
  while l > 0:
@@ -86,7 +85,7 @@ def foldings(p: List[int]) -> int:
86
85
  if D[i][l][l] == l:
87
86
  dd = dd + 1
88
87
  else:
89
- m: int = D[i][l][l]
88
+ m = D[i][l][l]
90
89
  while m != l:
91
90
  gap[gg] = m
92
91
  if count[m] == 0:
@@ -2,9 +2,7 @@
2
2
  A largely faithful translation of the original Atlas Autocode code by W. F. Lunnon to Python using `while`.
3
3
  W. F. Lunnon, Multi-dimensional map-folding, The Computer Journal, Volume 14, Issue 1, 1971, Pages 75-80, https://doi.org/10.1093/comjnl/14.1.75
4
4
  """
5
- from typing import Sequence
6
-
7
- def foldings(p: Sequence[int]) -> int:
5
+ def foldings(p: list[int]) -> int:
8
6
  """
9
7
  Run loop with (A, B) on each folding of a p[1] x ... x p[d] map, where A and B are the above and below vectors.
10
8
 
@@ -38,8 +36,8 @@ def foldings(p: Sequence[int]) -> int:
38
36
  # and later gap[gapter[l]] is the gap where leaf l is currently inserted
39
37
 
40
38
  P = [1] * (d + 1)
41
- C = [[0] * (n + 1) for dimension1 in range(d + 1)]
42
- D = [[[0] * (n + 1) for dimension2 in range(n + 1)] for dimension1 in range(d + 1)]
39
+ C = [[0] * (n + 1) for _dimension1 in range(d + 1)]
40
+ D = [[[0] * (n + 1) for _dimension2 in range(n + 1)] for _dimension1 in range(d + 1)]
43
41
 
44
42
  for i in range(1, d + 1):
45
43
  P[i] = P[i - 1] * p[i - 1]
@@ -65,7 +63,7 @@ def foldings(p: Sequence[int]) -> int:
65
63
  # D[i][l][m] = leaf connected to m in section i when inserting l;
66
64
 
67
65
  G: int = 0
68
- l: int = 1
66
+ l = 1
69
67
 
70
68
  # kick off with null folding
71
69
  while l > 0:
@@ -84,7 +82,7 @@ def foldings(p: Sequence[int]) -> int:
84
82
  if D[i][l][l] == l:
85
83
  dd = dd + 1
86
84
  else:
87
- m: int = D[i][l][l]
85
+ m = D[i][l][l]
88
86
  while m != l:
89
87
  gap[gg] = m
90
88
  if count[m] == 0:
@@ -1,6 +1,5 @@
1
1
  from mapFolding import outfitFoldings
2
2
  from numba import njit
3
- from typing import List
4
3
  import numpy
5
4
  from numpy.typing import NDArray
6
5
 
@@ -42,7 +41,7 @@ tricky = [
42
41
 
43
42
  COUNTindicesStatic = len(tricky)
44
43
 
45
- def countFolds(listDimensions: List[int]):
44
+ def countFolds(listDimensions: list[int]):
46
45
  static = numpy.zeros(COUNTindicesStatic, dtype=numpy.int64)
47
46
 
48
47
  listDimensions, static[leavesTotal], D, track,gapsWhere = outfitFoldings(listDimensions)
@@ -55,7 +54,7 @@ def countFolds(listDimensions: List[int]):
55
54
  return foldingsTotal
56
55
 
57
56
  # @recordBenchmarks()
58
- def _sherpa(track: NDArray, gap: NDArray, static: NDArray, D: NDArray, p: List[int]):
57
+ def _sherpa(track: NDArray, gap: NDArray, static: NDArray, D: NDArray, p: list[int]):
59
58
  """Performance critical section that counts foldings.
60
59
 
61
60
  Parameters:
@@ -1,16 +1,46 @@
1
- from mapFolding.someAssemblyRequired.whatWillBe import (
1
+ """
2
+ Code transformation framework for algorithmic optimization.
3
+
4
+ This package implements a comprehensive framework for programmatically analyzing,
5
+ transforming, and generating Python code. It enables sophisticated algorithm optimization
6
+ through abstract syntax tree (AST) manipulation, allowing algorithms to be transformed
7
+ from a readable, functional implementation into highly-optimized variants tailored for
8
+ different execution environments or specific computational tasks.
9
+
10
+ Core capabilities:
11
+ 1. AST Pattern Recognition - Precisely identify and match code patterns using composable predicates
12
+ 2. Algorithm Transformation - Convert functional state-based implementations to primitive operations
13
+ 3. Dataclass "Shattering" - Decompose complex state objects into primitive components
14
+ 4. Performance Optimization - Apply domain-specific optimizations for numerical computation
15
+ 5. Code Generation - Generate specialized implementations with appropriate imports and syntax
16
+
17
+ The transformation pipeline supports multiple optimization targets, from general-purpose
18
+ acceleration to generating highly-specialized variants optimized for specific input parameters.
19
+ This multi-level transformation approach allows for both development flexibility and
20
+ runtime performance, preserving algorithm readability in the source while enabling
21
+ maximum execution speed in production.
22
+
23
+ These tools were developed for map folding computation optimization but are designed as
24
+ general-purpose utilities applicable to a wide range of code transformation scenarios,
25
+ particularly for numerically-intensive algorithms that benefit from just-in-time compilation.
26
+ """
27
+ from mapFolding.someAssemblyRequired.transformationTools import (
2
28
  ast_Identifier as ast_Identifier,
3
29
  extractClassDef as extractClassDef,
4
30
  extractFunctionDef as extractFunctionDef,
5
- executeActionUnlessDescendantMatches as executeActionUnlessDescendantMatches,
6
31
  ifThis as ifThis,
7
32
  IngredientsFunction as IngredientsFunction,
8
33
  IngredientsModule as IngredientsModule,
34
+ inlineThisFunctionWithTheseValues as inlineThisFunctionWithTheseValues,
9
35
  LedgerOfImports as LedgerOfImports,
10
- listNumbaCallableDispatchees as listNumbaCallableDispatchees,
11
36
  Make as Make,
37
+ makeDictionaryReplacementStatements as makeDictionaryReplacementStatements,
12
38
  NodeCollector as NodeCollector,
13
39
  NodeReplacer as NodeReplacer,
40
+ RecipeSynthesizeFlow as RecipeSynthesizeFlow,
14
41
  strDotStrCuzPyStoopid as strDotStrCuzPyStoopid,
15
42
  Then as Then,
43
+ write_astModule as write_astModule,
44
+ Z0Z_executeActionUnlessDescendantMatches as Z0Z_executeActionUnlessDescendantMatches,
45
+ Z0Z_replaceMatchingASTnodes as Z0Z_replaceMatchingASTnodes,
16
46
  )
@@ -1,21 +1,38 @@
1
- import importlib
1
+ """
2
+ Utility for extracting LLVM IR from compiled Python modules.
3
+
4
+ This module provides functionality to extract and save the LLVM Intermediate Representation (IR)
5
+ generated when Numba compiles Python functions. It implements a simple interface that:
6
+
7
+ 1. Imports a specified Python module from its file path
8
+ 2. Extracts the LLVM IR from a specified function within that module
9
+ 3. Writes the IR to a file with the same base name but with the .ll extension
10
+
11
+ The extracted LLVM IR can be valuable for debugging, optimization analysis, or educational
12
+ purposes, as it provides a view into how high-level Python code is translated into
13
+ lower-level representations for machine execution.
14
+
15
+ While originally part of a tighter integration with the code generation pipeline,
16
+ this module now operates as a standalone utility that can be applied to any module
17
+ containing Numba-compiled functions.
18
+ """
2
19
  from importlib.machinery import ModuleSpec
20
+ from pathlib import Path
3
21
  from types import ModuleType
4
22
  import importlib.util
5
23
  import llvmlite.binding
6
- import pathlib
7
24
 
8
- def writeModuleLLVM(pathFilename: pathlib.Path, identifierCallable: str) -> pathlib.Path:
9
- """Import the generated module directly and get its LLVM IR."""
10
- specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
11
- if specTarget is None or specTarget.loader is None:
12
- raise ImportError(f"Could not create module spec or loader for {pathFilename}")
13
- moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
14
- specTarget.loader.exec_module(moduleTarget)
25
+ def writeModuleLLVM(pathFilename: Path, identifierCallable: str) -> Path:
26
+ """Import the generated module directly and get its LLVM IR."""
27
+ specTarget: ModuleSpec | None = importlib.util.spec_from_file_location("generatedModule", pathFilename)
28
+ if specTarget is None or specTarget.loader is None:
29
+ raise ImportError(f"Could not create module spec or loader for {pathFilename}")
30
+ moduleTarget: ModuleType = importlib.util.module_from_spec(specTarget)
31
+ specTarget.loader.exec_module(moduleTarget)
15
32
 
16
- # Get LLVM IR and write to file
17
- linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
18
- moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
19
- pathFilenameLLVM: pathlib.Path = pathFilename.with_suffix(".ll")
20
- pathFilenameLLVM.write_text(str(moduleLLVM))
21
- return pathFilenameLLVM
33
+ # Get LLVM IR and write to file
34
+ linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
35
+ moduleLLVM: llvmlite.binding.ModuleRef = llvmlite.binding.module.parse_assembly(linesLLVM)
36
+ pathFilenameLLVM: Path = pathFilename.with_suffix(".ll")
37
+ pathFilenameLLVM.write_text(str(moduleLLVM))
38
+ return pathFilenameLLVM
@@ -1,6 +1,29 @@
1
- from collections.abc import Callable
1
+ """
2
+ Numba-specific ingredients for optimized code generation.
3
+
4
+ This module provides specialized tools, constants, and types specifically designed
5
+ for transforming Python code into Numba-accelerated implementations. It implements:
6
+
7
+ 1. A range of Numba jit decorator configurations for different optimization scenarios
8
+ 2. Functions to identify and manipulate Numba decorators in abstract syntax trees
9
+ 3. Utilities for applying appropriate Numba typing to transformed code
10
+ 4. Parameter management for Numba compilation options
11
+
12
+ The configurations range from conservative options that prioritize compatibility and
13
+ error detection to aggressive optimizations that maximize performance at the cost of
14
+ flexibility. While this module specifically targets Numba, its design follows the pattern
15
+ of generic code transformation tools in the package, allowing similar approaches to be
16
+ applied to other acceleration technologies.
17
+
18
+ This module works in conjunction with transformation tools to convert the general-purpose
19
+ algorithm implementation into a highly-optimized Numba version.
20
+ """
21
+
22
+ from collections.abc import Callable, Sequence
23
+ from mapFolding.someAssemblyRequired import ifThis, IngredientsFunction, Make
2
24
  from numba.core.compiler import CompilerBase as numbaCompilerBase
3
- from typing import Any, TYPE_CHECKING, Final
25
+ from typing import Any, cast, Final, TYPE_CHECKING
26
+ import ast
4
27
 
5
28
  try:
6
29
  from typing import NotRequired
@@ -98,3 +121,86 @@ parametersNumbaMinimum: Final[ParametersNumba] = {
98
121
  'nopython': False,
99
122
  'forceobj': True,
100
123
  'parallel': False, }
124
+
125
+ Z0Z_numbaDataTypeModule = 'numba'
126
+ Z0Z_decoratorCallable = 'jit'
127
+
128
+ def thisIsNumbaDotJit(Ima: ast.AST) -> bool:
129
+ return ifThis.isCallNamespace_Identifier(Z0Z_numbaDataTypeModule, Z0Z_decoratorCallable)(Ima)
130
+
131
+ def thisIsJit(Ima: ast.AST) -> bool:
132
+ return ifThis.isCall_Identifier(Z0Z_decoratorCallable)(Ima)
133
+
134
+ def thisIsAnyNumbaJitDecorator(Ima: ast.AST) -> bool:
135
+ return thisIsNumbaDotJit(Ima) or thisIsJit(Ima)
136
+
137
+ def decorateCallableWithNumba(ingredientsFunction: IngredientsFunction, parametersNumba: ParametersNumba | None = None) -> IngredientsFunction:
138
+ def Z0Z_UnhandledDecorators(astCallable: ast.FunctionDef) -> ast.FunctionDef:
139
+ # TODO: more explicit handling of decorators. I'm able to ignore this because I know `algorithmSource` doesn't have any decorators.
140
+ for decoratorItem in astCallable.decorator_list.copy():
141
+ import warnings
142
+ astCallable.decorator_list.remove(decoratorItem)
143
+ warnings.warn(f"Removed decorator {ast.unparse(decoratorItem)} from {astCallable.name}")
144
+ return astCallable
145
+
146
+ def makeSpecialSignatureForNumba(signatureElement: ast.arg) -> ast.Subscript | ast.Name | None:
147
+ if isinstance(signatureElement.annotation, ast.Subscript) and isinstance(signatureElement.annotation.slice, ast.Tuple):
148
+ annotationShape: ast.expr = signatureElement.annotation.slice.elts[0]
149
+ if isinstance(annotationShape, ast.Subscript) and isinstance(annotationShape.slice, ast.Tuple):
150
+ shapeAsListSlices: list[ast.Slice] = [ast.Slice() for _axis in range(len(annotationShape.slice.elts))]
151
+ shapeAsListSlices[-1] = ast.Slice(step=ast.Constant(value=1))
152
+ shapeAST: ast.Slice | ast.Tuple = ast.Tuple(elts=list(shapeAsListSlices), ctx=ast.Load())
153
+ else:
154
+ shapeAST = ast.Slice(step=ast.Constant(value=1))
155
+
156
+ annotationDtype: ast.expr = signatureElement.annotation.slice.elts[1]
157
+ if (isinstance(annotationDtype, ast.Subscript) and isinstance(annotationDtype.slice, ast.Attribute)):
158
+ datatypeAST = annotationDtype.slice.attr
159
+ else:
160
+ datatypeAST = None
161
+
162
+ ndarrayName = signatureElement.arg
163
+ Z0Z_hacky_dtype: str = ndarrayName
164
+ datatype_attr = datatypeAST or Z0Z_hacky_dtype
165
+ ingredientsFunction.imports.addImportFromStr(datatypeModuleDecorator, datatype_attr)
166
+ datatypeNumba = ast.Name(id=datatype_attr, ctx=ast.Load())
167
+
168
+ return ast.Subscript(value=datatypeNumba, slice=shapeAST, ctx=ast.Load())
169
+
170
+ elif isinstance(signatureElement.annotation, ast.Name):
171
+ return signatureElement.annotation
172
+ return None
173
+
174
+ datatypeModuleDecorator: str = Z0Z_numbaDataTypeModule
175
+ list_argsDecorator: Sequence[ast.expr] = []
176
+
177
+ list_arg4signature_or_function: list[ast.expr] = []
178
+ for parameter in ingredientsFunction.astFunctionDef.args.args:
179
+ # Efficient translation of Python scalar types to Numba types https://github.com/hunterhogan/mapFolding/issues/8
180
+ # For now, let Numba infer them.
181
+ continue
182
+ signatureElement: ast.Subscript | ast.Name | None = makeSpecialSignatureForNumba(parameter)
183
+ if signatureElement:
184
+ list_arg4signature_or_function.append(signatureElement)
185
+
186
+ if ingredientsFunction.astFunctionDef.returns and isinstance(ingredientsFunction.astFunctionDef.returns, ast.Name):
187
+ theReturn: ast.Name = ingredientsFunction.astFunctionDef.returns
188
+ list_argsDecorator = [cast(ast.expr, ast.Call(func=ast.Name(id=theReturn.id, ctx=ast.Load())
189
+ , args=list_arg4signature_or_function if list_arg4signature_or_function else [], keywords=[] ) )]
190
+ elif list_arg4signature_or_function:
191
+ list_argsDecorator = [cast(ast.expr, ast.Tuple(elts=list_arg4signature_or_function, ctx=ast.Load()))]
192
+
193
+ ingredientsFunction.astFunctionDef = Z0Z_UnhandledDecorators(ingredientsFunction.astFunctionDef)
194
+ if parametersNumba is None:
195
+ parametersNumba = parametersNumbaDEFAULT
196
+ listDecoratorKeywords: list[ast.keyword] = [Make.ast_keyword(parameterName, Make.astConstant(parameterValue)) for parameterName, parameterValue in parametersNumba.items()]
197
+
198
+ decoratorModule: str = Z0Z_numbaDataTypeModule
199
+ decoratorCallable: str = Z0Z_decoratorCallable
200
+ ingredientsFunction.imports.addImportFromStr(decoratorModule, decoratorCallable)
201
+ # Leave this line in so that global edits will change it.
202
+ astDecorator: ast.Call = Make.astCall(Make.astName(decoratorCallable), list_argsDecorator, listDecoratorKeywords)
203
+ astDecorator: ast.Call = Make.astCall(Make.astName(decoratorCallable), list_astKeywords=listDecoratorKeywords) # type: ignore[no-redef]
204
+
205
+ ingredientsFunction.astFunctionDef.decorator_list = [astDecorator]
206
+ return ingredientsFunction