mapFolding 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mapFolding/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from .theSSOT import *
2
2
  from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
3
- from .beDRY import getFilenameFoldsTotal, outfitCountFolds
3
+ from .beDRY import getFilenameFoldsTotal, getPathFilenameFoldsTotal, outfitCountFolds, saveFoldsTotal
4
4
  from .startHere import countFolds
5
5
  from .oeis import oeisIDfor_n, getOEISids, clearOEIScache
6
6
 
mapFolding/babbage.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from mapFolding.importSelector import countSequential, countParallel, countInitialize
2
- from mapFolding import indexThe
2
+ from mapFolding import indexMy
3
3
  from numpy import integer
4
4
  from numpy.typing import NDArray
5
5
  from typing import Any, Tuple
@@ -7,7 +7,7 @@ import numba
7
7
  import numpy
8
8
 
9
9
  @numba.jit(cache=True)
10
- def _countFolds(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDArray[integer[Any]], gapsWhere: NDArray[integer[Any]], mapShape: Tuple[int, ...], my: NDArray[integer[Any]], the: NDArray[integer[Any]], track: NDArray[integer[Any]]):
10
+ def _countFolds(connectionGraph: NDArray[integer[Any]], foldGroups: NDArray[integer[Any]], gapsWhere: NDArray[integer[Any]], mapShape: Tuple[int, ...], my: NDArray[integer[Any]], track: NDArray[integer[Any]]):
11
11
  """
12
12
  What in tarnation is this stupid module and function?
13
13
 
@@ -27,9 +27,9 @@ def _countFolds(connectionGraph: NDArray[integer[Any]], foldsSubTotals: NDArray[
27
27
 
28
28
  """
29
29
  # print("babbage")
30
- countInitialize(connectionGraph=connectionGraph, gapsWhere=gapsWhere, my=my, the=the, track=track)
30
+ countInitialize(connectionGraph=connectionGraph, gapsWhere=gapsWhere, my=my, track=track)
31
31
 
32
- if the[indexThe.taskDivisions.value] > 0:
33
- countParallel(connectionGraph=connectionGraph, foldsSubTotals=foldsSubTotals, gapsWherePARALLEL=gapsWhere, myPARALLEL=my, the=the, trackPARALLEL=track)
32
+ if my[indexMy.taskDivisions.value] > 0:
33
+ countParallel(connectionGraph=connectionGraph, foldGroups=foldGroups, gapsWherePARALLEL=gapsWhere, myPARALLEL=my, trackPARALLEL=track)
34
34
  else:
35
- countSequential(connectionGraph=connectionGraph, foldsSubTotals=foldsSubTotals, gapsWhere=gapsWhere, my=my, the=the, track=track)
35
+ countSequential(connectionGraph=connectionGraph, foldGroups=foldGroups, gapsWhere=gapsWhere, my=my, track=track)
mapFolding/beDRY.py CHANGED
@@ -1,16 +1,18 @@
1
1
  """A relatively stable API for oft-needed functionality."""
2
- from mapFolding import dtypeDefault, dtypeLarge
3
- from mapFolding import indexMy, indexThe, indexTrack, computationState
2
+ from mapFolding import dtypeDefault, dtypeLarge, dtypeSmall, pathJobDEFAULT
3
+ from mapFolding import indexMy, indexTrack, computationState
4
4
  from mapFolding import intInnit, defineConcurrencyLimit, oopsieKwargsie
5
5
  from numpy import integer
6
6
  from numpy.typing import NDArray
7
7
  from typing import Any, List, Optional, Sequence, Type, Union
8
8
  import numba
9
9
  import numpy
10
+ import os
11
+ import pathlib
10
12
  import sys
11
13
 
12
14
  def getFilenameFoldsTotal(listDimensions: Sequence[int]) -> str:
13
- return str(sorted(listDimensions)).replace(' ', '') + '.foldsTotal'
15
+ return str(sorted(listDimensions)).replace(', ', 'x').replace('[', 'p').replace(']', '') + '.foldsTotal'
14
16
 
15
17
  def getLeavesTotal(listDimensions: Sequence[int]) -> int:
16
18
  """
@@ -36,6 +38,14 @@ def getLeavesTotal(listDimensions: Sequence[int]) -> int:
36
38
 
37
39
  return productDimensions
38
40
 
41
+ def getPathFilenameFoldsTotal(listDimensions: Sequence[int], pathishWriteFoldsTotal: Optional[Union[str, os.PathLike[str]]] = None) -> pathlib.Path:
42
+ pathFilenameFoldsTotal = pathlib.Path(pathishWriteFoldsTotal) if pathishWriteFoldsTotal is not None else pathJobDEFAULT
43
+ if pathFilenameFoldsTotal.is_dir():
44
+ filenameFoldsTotalDEFAULT = getFilenameFoldsTotal(listDimensions)
45
+ pathFilenameFoldsTotal = pathFilenameFoldsTotal / filenameFoldsTotalDEFAULT
46
+ pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
47
+ return pathFilenameFoldsTotal
48
+
39
49
  def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrencyLimit: int, CPUlimit: Optional[Union[bool, float, int]], listDimensions: Sequence[int]):
40
50
  """
41
51
  Determines whether or how to divide the computation into tasks.
@@ -91,10 +101,11 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
91
101
  Constructs a multi-dimensional connection graph representing the connections between the leaves of a map with the given dimensions.
92
102
  Also called a Cartesian product decomposition or dimensional product mapping.
93
103
 
94
- Parameters:
104
+ Parameters
95
105
  listDimensions: A sequence of integers representing the dimensions of the map.
96
- Returns:
97
- connectionGraph: A 3D numpy array with shape of (dimensionsTotal + 1, leavesTotal + 1, leavesTotal + 1).
106
+
107
+ Returns
108
+ connectionGraph: A 3D numpy array with shape of (dimensionsTotal, leavesTotal + 1, leavesTotal + 1).
98
109
  """
99
110
  datatype = keywordArguments.get('datatype', dtypeDefault)
100
111
  mapShape = validateListDimensions(listDimensions)
@@ -106,39 +117,37 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
106
117
  cumulativeProduct = numpy.multiply.accumulate([1] + mapShape, dtype=datatype)
107
118
 
108
119
  # Step 2: create a coordinate system
109
- coordinateSystem = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1), dtype=datatype)
120
+ coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=datatype)
110
121
 
111
- for dimension1ndex in range(1, dimensionsTotal + 1):
122
+ for indexDimension in range(dimensionsTotal):
112
123
  for leaf1ndex in range(1, leavesTotal + 1):
113
- coordinateSystem[dimension1ndex, leaf1ndex] = (
114
- ((leaf1ndex - 1) // cumulativeProduct[dimension1ndex - 1]) %
115
- arrayDimensions[dimension1ndex - 1] + 1
124
+ coordinateSystem[indexDimension, leaf1ndex] = (
125
+ ((leaf1ndex - 1) // cumulativeProduct[indexDimension]) %
126
+ arrayDimensions[indexDimension] + 1
116
127
  )
117
128
 
118
129
  # Step 3: create and fill the connection graph
119
- connectionGraph = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1, leavesTotal + 1), dtype=datatype)
130
+ connectionGraph = numpy.zeros((dimensionsTotal, leavesTotal + 1, leavesTotal + 1), dtype=datatype)
120
131
 
121
- for dimension1ndex in range(1, dimensionsTotal + 1):
132
+ for indexDimension in range(dimensionsTotal):
122
133
  for activeLeaf1ndex in range(1, leavesTotal + 1):
123
134
  for connectee1ndex in range(1, activeLeaf1ndex + 1):
124
135
  # Base coordinate conditions
125
- isFirstCoord = coordinateSystem[dimension1ndex, connectee1ndex] == 1
126
- isLastCoord = coordinateSystem[dimension1ndex, connectee1ndex] == arrayDimensions[dimension1ndex - 1]
127
- exceedsActive = connectee1ndex + cumulativeProduct[dimension1ndex - 1] > activeLeaf1ndex
136
+ isFirstCoord = coordinateSystem[indexDimension, connectee1ndex] == 1
137
+ isLastCoord = coordinateSystem[indexDimension, connectee1ndex] == arrayDimensions[indexDimension]
138
+ exceedsActive = connectee1ndex + cumulativeProduct[indexDimension] > activeLeaf1ndex
128
139
 
129
140
  # Parity check
130
- isEvenParity = (coordinateSystem[dimension1ndex, activeLeaf1ndex] & 1) == \
131
- (coordinateSystem[dimension1ndex, connectee1ndex] & 1)
141
+ isEvenParity = (coordinateSystem[indexDimension, activeLeaf1ndex] & 1) == \
142
+ (coordinateSystem[indexDimension, connectee1ndex] & 1)
132
143
 
133
144
  # Determine connection value
134
145
  if (isEvenParity and isFirstCoord) or (not isEvenParity and (isLastCoord or exceedsActive)):
135
- connectionGraph[dimension1ndex, activeLeaf1ndex, connectee1ndex] = connectee1ndex
146
+ connectionGraph[indexDimension, activeLeaf1ndex, connectee1ndex] = connectee1ndex
136
147
  elif isEvenParity and not isFirstCoord:
137
- connectionGraph[dimension1ndex, activeLeaf1ndex, connectee1ndex] = connectee1ndex - cumulativeProduct[dimension1ndex - 1]
148
+ connectionGraph[indexDimension, activeLeaf1ndex, connectee1ndex] = connectee1ndex - cumulativeProduct[indexDimension]
138
149
  elif not isEvenParity and not (isLastCoord or exceedsActive):
139
- connectionGraph[dimension1ndex, activeLeaf1ndex, connectee1ndex] = connectee1ndex + cumulativeProduct[dimension1ndex - 1]
140
- else:
141
- connectionGraph[dimension1ndex, activeLeaf1ndex, connectee1ndex] = connectee1ndex
150
+ connectionGraph[indexDimension, activeLeaf1ndex, connectee1ndex] = connectee1ndex + cumulativeProduct[indexDimension]
142
151
 
143
152
  return connectionGraph
144
153
 
@@ -148,7 +157,7 @@ def makeDataContainer(shape, datatype: Optional[Type] = None):
148
157
  datatype = dtypeDefault
149
158
  return numpy.zeros(shape, dtype=datatype)
150
159
 
151
- def outfitCountFolds(listDimensions: Sequence[int], computationDivisions: Optional[Union[int, str]] = None, CPUlimit: Optional[Union[bool, float, int]] = None, **keywordArguments: Optional[Type]) -> computationState:
160
+ def outfitCountFolds(listDimensions: Sequence[int], computationDivisions: Optional[Union[int, str]] = None, CPUlimit: Optional[Union[bool, float, int]] = None, **keywordArguments: Optional[Type[Any]]) -> computationState:
152
161
  """
153
162
  Initializes and configures the computation state for map folding computations.
154
163
 
@@ -187,36 +196,38 @@ def outfitCountFolds(listDimensions: Sequence[int], computationDivisions: Option
187
196
  """
188
197
  datatypeDefault = keywordArguments.get('datatypeDefault', dtypeDefault)
189
198
  datatypeLarge = keywordArguments.get('datatypeLarge', dtypeLarge)
199
+ datatypeSmall = keywordArguments.get('datatypeSmall', dtypeSmall)
190
200
 
191
- the = makeDataContainer(len(indexThe), datatypeDefault)
201
+ my = makeDataContainer(len(indexMy), datatypeDefault)
192
202
 
193
203
  mapShape = tuple(sorted(validateListDimensions(listDimensions)))
194
- the[indexThe.leavesTotal] = getLeavesTotal(mapShape)
195
- the[indexThe.dimensionsTotal] = len(mapShape)
196
204
  concurrencyLimit = setCPUlimit(CPUlimit)
197
- the[indexThe.taskDivisions] = getTaskDivisions(computationDivisions, concurrencyLimit, CPUlimit, listDimensions)
205
+ my[indexMy.taskDivisions] = getTaskDivisions(computationDivisions, concurrencyLimit, CPUlimit, mapShape)
198
206
 
207
+ foldGroups = makeDataContainer(max(my[indexMy.taskDivisions] + 1, 2), datatypeLarge)
208
+ foldGroups[-1] = leavesTotal = getLeavesTotal(mapShape)
209
+
210
+ my[indexMy.dimensionsTotal] = len(mapShape)
211
+ my[indexMy.leaf1ndex] = 1
199
212
  stateInitialized = computationState(
200
- connectionGraph = makeConnectionGraph(mapShape, datatype=datatypeDefault),
201
- foldsSubTotals = makeDataContainer(the[indexThe.leavesTotal], datatypeLarge),
213
+ connectionGraph = makeConnectionGraph(mapShape, datatype=datatypeSmall),
214
+ foldGroups = foldGroups,
202
215
  mapShape = mapShape,
203
- my = makeDataContainer(len(indexMy), datatypeLarge),
204
- gapsWhere = makeDataContainer(int(the[indexThe.leavesTotal]) * int(the[indexThe.leavesTotal]) + 1, datatypeDefault),
205
- the = the,
206
- track = makeDataContainer((len(indexTrack), the[indexThe.leavesTotal] + 1), datatypeLarge)
216
+ my = my,
217
+ gapsWhere = makeDataContainer(int(leavesTotal) * int(leavesTotal) + 1, datatypeSmall),
218
+ track = makeDataContainer((len(indexTrack), leavesTotal + 1), datatypeDefault)
207
219
  )
208
220
 
209
- stateInitialized['my'][indexMy.leaf1ndex.value] = 1
210
221
 
211
222
  return stateInitialized
212
223
 
213
- def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed parameter') -> List[int]:
224
+ def parseDimensions(dimensions: Sequence[int], parameterName: str = 'listDimensions') -> List[int]:
214
225
  """
215
226
  Parse and validate dimensions are non-negative integers.
216
227
 
217
228
  Parameters:
218
229
  dimensions: Sequence of integers representing dimensions
219
- parameterName ('unnamed parameter'): Name of the parameter for error messages. Defaults to 'unnamed parameter'
230
+ parameterName ('listDimensions'): Name of the parameter for error messages. Defaults to 'listDimensions'
220
231
  Returns:
221
232
  listNonNegative: List of validated non-negative integers
222
233
  Raises:
@@ -232,6 +243,33 @@ def parseDimensions(dimensions: Sequence[int], parameterName: str = 'unnamed par
232
243
 
233
244
  return listNonNegative
234
245
 
246
+ def saveFoldsTotal(pathFilename: Union[str, os.PathLike[str]], foldsTotal: int) -> None:
247
+ """
248
+ Save foldsTotal with multiple fallback mechanisms.
249
+
250
+ Parameters:
251
+ pathFilename: Target save location
252
+ foldsTotal: Critical computed value to save
253
+ """
254
+ try:
255
+ pathFilenameFoldsTotal = pathlib.Path(pathFilename)
256
+ pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
257
+ pathFilenameFoldsTotal.write_text(str(foldsTotal))
258
+ except Exception as ERRORmessage:
259
+ try:
260
+ print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
261
+ print(ERRORmessage)
262
+ print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
263
+ randomnessPlanB = (int(str(foldsTotal).strip()[-1]) + 1) * ['YO_']
264
+ filenameInfixUnique = ''.join(randomnessPlanB)
265
+ pathFilenamePlanB = os.path.join(os.getcwd(), 'foldsTotal' + filenameInfixUnique + '.txt')
266
+ writeStreamFallback = open(pathFilenamePlanB, 'w')
267
+ writeStreamFallback.write(str(foldsTotal))
268
+ writeStreamFallback.close()
269
+ print(str(pathFilenamePlanB))
270
+ except:
271
+ print(foldsTotal)
272
+
235
273
  def setCPUlimit(CPUlimit: Union[bool, float, int, None]) -> int:
236
274
  """Sets CPU limit for Numba concurrent operations. Note that it can only affect Numba-jitted functions that have not yet been imported.
237
275
 
@@ -1,12 +1,7 @@
1
- # useLovelace = True
2
- useLovelace = False
1
+ from mapFolding.lovelace import countSequential
2
+ from mapFolding.lovelace import countParallel
3
+ from mapFolding.lovelace import countInitialize
3
4
 
4
- if useLovelace:
5
- from mapFolding.lovelace import countSequential
6
- from mapFolding.lovelace import countParallel
7
- from mapFolding.lovelace import countInitialize
8
-
9
- else:
10
- from mapFolding.countSequential import countSequential
11
- from mapFolding.countParallel import countParallel
12
- from mapFolding.countInitialize import countInitialize
5
+ # from mapFolding.someAssemblyRequired.countSequential import countSequential
6
+ # from mapFolding.someAssemblyRequired.countParallel import countParallel
7
+ # from mapFolding.someAssemblyRequired.countInitialize import countInitialize
mapFolding/lovelace.py CHANGED
@@ -1,4 +1,4 @@
1
- from mapFolding import indexMy, indexThe, indexTrack
1
+ from mapFolding import indexMy, indexTrack
2
2
  import numba
3
3
 
4
4
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
@@ -10,16 +10,16 @@ def activeLeafGreaterThan0Condition(my):
10
10
  return my[indexMy.leaf1ndex.value] > 0
11
11
 
12
12
  @numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
13
- def activeLeafGreaterThanLeavesTotalCondition(my, the):
14
- return my[indexMy.leaf1ndex.value] > the[indexThe.leavesTotal.value]
13
+ def activeLeafGreaterThanLeavesTotalCondition(foldGroups, my):
14
+ return my[indexMy.leaf1ndex.value] > foldGroups[-1] # leavesTotal
15
15
 
16
16
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
17
17
  def activeLeafIsTheFirstLeafCondition(my):
18
18
  return my[indexMy.leaf1ndex.value] <= 1
19
19
 
20
20
  @numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
21
- def allDimensionsAreUnconstrained(my, the):
22
- return my[indexMy.dimensionsUnconstrained.value] == the[indexThe.dimensionsTotal.value]
21
+ def allDimensionsAreUnconstrained(my):
22
+ return my[indexMy.dimensionsUnconstrained.value] == my[indexMy.dimensionsTotal.value]
23
23
 
24
24
  @numba.jit((numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
25
25
  def backtrack(my, track):
@@ -44,20 +44,20 @@ def countGaps(gapsWhere, my, track):
44
44
 
45
45
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
46
46
  def dimension1ndexIncrement(my):
47
- my[indexMy.dimension1ndex.value] += 1
47
+ my[indexMy.indexDimension.value] += 1
48
48
 
49
49
  @numba.jit((numba.int64[:,:,::1], numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
50
50
  def dimensionsUnconstrainedCondition(connectionGraph, my):
51
- return connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]] == my[indexMy.leaf1ndex.value]
51
+ return connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]] == my[indexMy.leaf1ndex.value]
52
52
 
53
53
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
54
54
  def dimensionsUnconstrainedIncrement(my):
55
55
  my[indexMy.dimensionsUnconstrained.value] += 1
56
56
 
57
57
  @numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
58
- def filterCommonGaps(gapsWhere, my, the, track):
58
+ def filterCommonGaps(gapsWhere, my, track):
59
59
  gapsWhere[my[indexMy.gap1ndex.value]] = gapsWhere[my[indexMy.indexMiniGap.value]]
60
- if track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] == the[indexThe.dimensionsTotal.value] - my[indexMy.dimensionsUnconstrained.value]:
60
+ if track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] == my[indexMy.dimensionsTotal.value] - my[indexMy.dimensionsUnconstrained.value]:
61
61
  activeGapIncrement(my=my)
62
62
  track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] = 0
63
63
 
@@ -65,11 +65,11 @@ def filterCommonGaps(gapsWhere, my, the, track):
65
65
  def findGapsInitializeVariables(my, track):
66
66
  my[indexMy.dimensionsUnconstrained.value] = 0
67
67
  my[indexMy.gap1ndexCeiling.value] = track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
68
- my[indexMy.dimension1ndex.value] = 1
68
+ my[indexMy.indexDimension.value] = 0
69
69
 
70
70
  @numba.jit((numba.int64[::1],numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
71
- def foldsSubTotalIncrement(foldsSubTotals, my, the):
72
- foldsSubTotals[my[indexMy.taskIndex.value]] += the[indexThe.leavesTotal.value]
71
+ def foldsSubTotalIncrement(foldGroups, my):
72
+ foldGroups[my[indexMy.taskIndex.value]] += 1
73
73
 
74
74
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
75
75
  def indexMiniGapIncrement(my):
@@ -93,19 +93,19 @@ def leafBelowSentinelIs1Condition(track):
93
93
 
94
94
  @numba.jit((numba.int64[:,:,::1], numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
95
95
  def leafConnecteeInitialization(connectionGraph, my):
96
- my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]]
96
+ my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]]
97
97
 
98
98
  @numba.jit((numba.int64[:,:,::1], numba.int64[::1],numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
99
99
  def leafConnecteeUpdate(connectionGraph, my, track):
100
- my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.dimension1ndex.value], my[indexMy.leaf1ndex.value], track[indexTrack.leafBelow.value, my[indexMy.leafConnectee.value]]]
100
+ my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], track[indexTrack.leafBelow.value, my[indexMy.leafConnectee.value]]]
101
101
 
102
102
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
103
103
  def loopingLeavesConnectedToActiveLeaf(my):
104
104
  return my[indexMy.leafConnectee.value] != my[indexMy.leaf1ndex.value]
105
105
 
106
106
  @numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
107
- def loopingTheDimensions(my, the):
108
- return my[indexMy.dimension1ndex.value] <= the[indexThe.dimensionsTotal.value]
107
+ def loopingTheDimensions(my):
108
+ return my[indexMy.indexDimension.value] < my[indexMy.dimensionsTotal.value]
109
109
 
110
110
  @numba.jit((numba.int64[::1],), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
111
111
  def loopingToActiveGapCeiling(my):
@@ -126,15 +126,15 @@ def placeLeafCondition(my):
126
126
  return my[indexMy.leaf1ndex.value] > 0
127
127
 
128
128
  @numba.jit((numba.int64[::1],numba.int64[::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
129
- def thereAreComputationDivisionsYouMightSkip(my, the):
130
- return my[indexMy.leaf1ndex.value] != the[indexThe.taskDivisions.value] or my[indexMy.leafConnectee.value] % the[indexThe.taskDivisions.value] == my[indexMy.taskIndex.value]
129
+ def thereAreComputationDivisionsYouMightSkip(my):
130
+ return my[indexMy.leaf1ndex.value] != my[indexMy.taskDivisions.value] or my[indexMy.leafConnectee.value] % my[indexMy.taskDivisions.value] == my[indexMy.taskIndex.value]
131
131
 
132
- @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
133
- def countInitialize(connectionGraph, gapsWhere, my, the, track):
132
+ @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
133
+ def countInitialize(connectionGraph, gapsWhere, my, track):
134
134
  while activeLeafGreaterThan0Condition(my=my):
135
135
  if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
136
136
  findGapsInitializeVariables(my=my, track=track)
137
- while loopingTheDimensions(my=my, the=the):
137
+ while loopingTheDimensions(my=my):
138
138
  if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
139
139
  dimensionsUnconstrainedIncrement(my=my)
140
140
  else:
@@ -143,26 +143,26 @@ def countInitialize(connectionGraph, gapsWhere, my, the, track):
143
143
  countGaps(gapsWhere=gapsWhere, my=my, track=track)
144
144
  leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
145
145
  dimension1ndexIncrement(my=my)
146
- if allDimensionsAreUnconstrained(my=my, the=the):
146
+ if allDimensionsAreUnconstrained(my=my):
147
147
  insertUnconstrainedLeaf(gapsWhere=gapsWhere, my=my)
148
148
  indexMiniGapInitialization(my=my)
149
149
  while loopingToActiveGapCeiling(my=my):
150
- filterCommonGaps(gapsWhere=gapsWhere, my=my, the=the, track=track)
150
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
151
151
  indexMiniGapIncrement(my=my)
152
152
  if placeLeafCondition(my=my):
153
153
  placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
154
154
  if my[indexMy.gap1ndex.value] > 0:
155
155
  return
156
156
 
157
- @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
158
- def countSequential(connectionGraph, foldsSubTotals, gapsWhere, my, the, track):
157
+ @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=False, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
158
+ def countSequential(connectionGraph, foldGroups, gapsWhere, my, track):
159
159
  while activeLeafGreaterThan0Condition(my=my):
160
160
  if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
161
- if activeLeafGreaterThanLeavesTotalCondition(my=my, the=the):
162
- foldsSubTotalIncrement(foldsSubTotals=foldsSubTotals, my=my, the=the)
161
+ if activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my):
162
+ foldsSubTotalIncrement(foldGroups=foldGroups, my=my)
163
163
  else:
164
164
  findGapsInitializeVariables(my=my, track=track)
165
- while loopingTheDimensions(my=my, the=the):
165
+ while loopingTheDimensions(my=my):
166
166
  if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
167
167
  dimensionsUnconstrainedIncrement(my=my)
168
168
  else:
@@ -173,39 +173,39 @@ def countSequential(connectionGraph, foldsSubTotals, gapsWhere, my, the, track):
173
173
  dimension1ndexIncrement(my=my)
174
174
  indexMiniGapInitialization(my=my)
175
175
  while loopingToActiveGapCeiling(my=my):
176
- filterCommonGaps(gapsWhere=gapsWhere, my=my, the=the, track=track)
176
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
177
177
  indexMiniGapIncrement(my=my)
178
178
  while backtrackCondition(my=my, track=track):
179
179
  backtrack(my=my, track=track)
180
180
  if placeLeafCondition(my=my):
181
181
  placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
182
182
 
183
- @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1],numba.int64[::1],numba.int64[::1],numba.int64[:,::1]), parallel=True, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
184
- def countParallel(connectionGraph, foldsSubTotals, gapsWherePARALLEL, myPARALLEL, the, trackPARALLEL):
185
- for indexSherpa in numba.prange(the[indexThe.taskDivisions.value]):
183
+ @numba.jit((numba.int64[:,:,::1], numba.int64[::1], numba.int64[::1], numba.int64[::1], numba.int64[:,::1]), parallel=True, boundscheck=False, error_model='numpy', fastmath=True, looplift=False, nogil=True, nopython=True)
184
+ def countParallel(connectionGraph, foldGroups, gapsWherePARALLEL, myPARALLEL, trackPARALLEL):
185
+ for indexSherpa in numba.prange(myPARALLEL[indexMy.taskDivisions.value]):
186
186
  gapsWhere = gapsWherePARALLEL.copy()
187
187
  my = myPARALLEL.copy()
188
188
  my[indexMy.taskIndex.value] = indexSherpa
189
189
  track = trackPARALLEL.copy()
190
190
  while activeLeafGreaterThan0Condition(my=my):
191
191
  if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
192
- if activeLeafGreaterThanLeavesTotalCondition(my=my, the=the):
193
- foldsSubTotalIncrement(foldsSubTotals=foldsSubTotals, my=my, the=the)
192
+ if activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my):
193
+ foldsSubTotalIncrement(foldGroups=foldGroups, my=my)
194
194
  else:
195
195
  findGapsInitializeVariables(my=my, track=track)
196
- while loopingTheDimensions(my=my, the=the):
196
+ while loopingTheDimensions(my=my):
197
197
  if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
198
198
  dimensionsUnconstrainedIncrement(my=my)
199
199
  else:
200
200
  leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
201
201
  while loopingLeavesConnectedToActiveLeaf(my=my):
202
- if thereAreComputationDivisionsYouMightSkip(my=my, the=the):
202
+ if thereAreComputationDivisionsYouMightSkip(my=my):
203
203
  countGaps(gapsWhere=gapsWhere, my=my, track=track)
204
204
  leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
205
205
  dimension1ndexIncrement(my=my)
206
206
  indexMiniGapInitialization(my=my)
207
207
  while loopingToActiveGapCeiling(my=my):
208
- filterCommonGaps(gapsWhere=gapsWhere, my=my, the=the, track=track)
208
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
209
209
  indexMiniGapIncrement(my=my)
210
210
  while backtrackCondition(my=my, track=track):
211
211
  backtrack(my=my, track=track)
@@ -1,3 +1,5 @@
1
+ """I was able to implement the algorithm with JAX, but I didn't see an advantage and it's a pain in the ass.
2
+ I don't maintain this module."""
1
3
  from mapFolding import validateListDimensions, getLeavesTotal, makeConnectionGraph
2
4
  from typing import List, Tuple
3
5
  import jax
@@ -1,19 +1,22 @@
1
1
  from mapFolding import indexMy, indexThe, indexTrack
2
2
  import ast
3
+ import copy
3
4
  import pathlib
4
5
 
5
- dictionaryEnumValues = {}
6
- for enumIndex in [indexMy, indexThe, indexTrack]:
7
- for memberName, memberValue in enumIndex._member_map_.items():
8
- dictionaryEnumValues[f"{enumIndex.__name__}.{memberName}.value"] = memberValue.value
6
+ def getDictionaryEnumValues():
7
+ dictionaryEnumValues = {}
8
+ for enumIndex in [indexMy, indexThe, indexTrack]:
9
+ for memberName, memberValue in enumIndex._member_map_.items():
10
+ dictionaryEnumValues[f"{enumIndex.__name__}.{memberName}.value"] = memberValue.value
11
+ return dictionaryEnumValues
9
12
 
10
- class RecursiveInliner(ast.NodeTransformer):
13
+ class RecursiveInlinerWithEnum(ast.NodeTransformer):
11
14
  def __init__(self, dictionaryFunctions, dictionaryEnumValues):
12
15
  self.dictionaryFunctions = dictionaryFunctions
13
16
  self.dictionaryEnumValues = dictionaryEnumValues
14
17
  self.processed = set() # Track processed functions to avoid infinite recursion
15
18
 
16
- def inline_function_body(self, functionName):
19
+ def inlineFunctionBody(self, functionName):
17
20
  if functionName in self.processed:
18
21
  return None
19
22
 
@@ -35,7 +38,7 @@ class RecursiveInliner(ast.NodeTransformer):
35
38
  def visit_Call(self, node):
36
39
  callNode = self.generic_visit(node)
37
40
  if isinstance(callNode, ast.Call) and isinstance(callNode.func, ast.Name) and callNode.func.id in self.dictionaryFunctions:
38
- inlineDefinition = self.inline_function_body(callNode.func.id)
41
+ inlineDefinition = self.inlineFunctionBody(callNode.func.id)
39
42
  if inlineDefinition and inlineDefinition.body:
40
43
  lastStmt = inlineDefinition.body[-1]
41
44
  if isinstance(lastStmt, ast.Return) and lastStmt.value is not None:
@@ -48,23 +51,23 @@ class RecursiveInliner(ast.NodeTransformer):
48
51
  def visit_Expr(self, node):
49
52
  if isinstance(node.value, ast.Call):
50
53
  if isinstance(node.value.func, ast.Name) and node.value.func.id in self.dictionaryFunctions:
51
- inlineDefinition = self.inline_function_body(node.value.func.id)
54
+ inlineDefinition = self.inlineFunctionBody(node.value.func.id)
52
55
  if inlineDefinition:
53
56
  return [self.visit(stmt) for stmt in inlineDefinition.body]
54
57
  return self.generic_visit(node)
55
58
 
56
- def find_required_imports(node):
57
- """Find all modules that need to be imported based on AST analysis."""
59
+ def findRequiredImports(node):
60
+ """Find all modules that need to be imported based on AST analysis.
61
+ NOTE: due to hardcoding, this is a glorified regex. No, wait, this is less versatile than regex."""
58
62
  requiredImports = set()
59
63
 
60
64
  class ImportFinder(ast.NodeVisitor):
61
65
  def visit_Name(self, node):
62
- # Common modules we might need
63
66
  if node.id in {'numba'}:
64
67
  requiredImports.add(node.id)
65
68
  self.generic_visit(node)
66
69
 
67
- def visit_Decorator(self, node):
70
+ def visitDecorator(self, node):
68
71
  if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
69
72
  if node.func.id == 'jit':
70
73
  requiredImports.add('numba')
@@ -73,7 +76,7 @@ def find_required_imports(node):
73
76
  ImportFinder().visit(node)
74
77
  return requiredImports
75
78
 
76
- def generate_imports(requiredImports):
79
+ def generateImports(requiredImports):
77
80
  """Generate import statements based on required modules."""
78
81
  importStatements = []
79
82
 
@@ -88,7 +91,7 @@ def generate_imports(requiredImports):
88
91
 
89
92
  return '\n'.join(importStatements)
90
93
 
91
- def inline_functions(sourceCode, targetFunctionName, dictionaryEnumValues):
94
+ def inlineFunctions(sourceCode, targetFunctionName, dictionaryEnumValues):
92
95
  dictionaryParsed = ast.parse(sourceCode)
93
96
  dictionaryFunctions = {
94
97
  element.name: element
@@ -96,29 +99,54 @@ def inline_functions(sourceCode, targetFunctionName, dictionaryEnumValues):
96
99
  if isinstance(element, ast.FunctionDef)
97
100
  }
98
101
  nodeTarget = dictionaryFunctions[targetFunctionName]
99
- nodeInliner = RecursiveInliner(dictionaryFunctions, dictionaryEnumValues)
102
+ nodeInliner = RecursiveInlinerWithEnum(dictionaryFunctions, dictionaryEnumValues)
100
103
  nodeInlined = nodeInliner.visit(nodeTarget)
101
104
  ast.fix_missing_locations(nodeInlined)
102
105
 
103
106
  # Generate imports
104
- requiredImports = find_required_imports(nodeInlined)
105
- importStatements = generate_imports(requiredImports)
107
+ requiredImports = findRequiredImports(nodeInlined)
108
+ importStatements = generateImports(requiredImports)
106
109
 
107
110
  # Combine imports with inlined code
108
111
  inlinedCode = importStatements + '\n\n' + ast.unparse(ast.Module(body=[nodeInlined], type_ignores=[]))
109
112
  return inlinedCode
110
113
 
111
- pathFilenameSource = pathlib.Path("/apps/mapFolding/mapFolding/lovelace.py")
112
- codeSource = pathFilenameSource.read_text()
113
-
114
- listCallables = [
115
- 'countSequential',
116
- 'countParallel',
117
- 'countInitialize',
118
- ]
119
- listPathFilenamesDestination = []
120
- for callableTarget in listCallables:
121
- pathFilenameDestination = pathFilenameSource.with_stem(callableTarget)
122
- codeInlined = inline_functions(codeSource, callableTarget, dictionaryEnumValues)
123
- pathFilenameDestination.write_text(codeInlined)
124
- listPathFilenamesDestination.append(pathFilenameDestination)
114
+ def Z0Z_inlineMapFolding():
115
+ dictionaryEnumValues = getDictionaryEnumValues()
116
+
117
+ pathFilenameSource = pathlib.Path("/apps/mapFolding/mapFolding/lovelace.py")
118
+ codeSource = pathFilenameSource.read_text()
119
+
120
+ listCallables = [
121
+ 'countInitialize',
122
+ 'countParallel',
123
+ 'countSequential',
124
+ ]
125
+
126
+ listPathFilenamesDestination: list[pathlib.Path] = []
127
+ for callableTarget in listCallables:
128
+ pathFilenameDestination = pathFilenameSource.parent / "someAssemblyRequired" / pathFilenameSource.with_stem(callableTarget).name
129
+ codeInlined = inlineFunctions(codeSource, callableTarget, dictionaryEnumValues)
130
+ pathFilenameDestination.write_text(codeInlined)
131
+ listPathFilenamesDestination.append(pathFilenameDestination)
132
+
133
+ listNoNumba = [
134
+ 'countInitialize',
135
+ 'countSequential',
136
+ ]
137
+
138
+ listPathFilenamesNoNumba = []
139
+ for pathFilename in listPathFilenamesDestination:
140
+ if pathFilename.stem in listNoNumba:
141
+ pathFilenameNoNumba = pathFilename.with_name(pathFilename.stem + 'NoNumba' + pathFilename.suffix)
142
+ else:
143
+ continue
144
+ codeNoNumba = pathFilename.read_text()
145
+ for codeLine in copy.copy(codeNoNumba.splitlines()):
146
+ if 'numba' in codeLine:
147
+ codeNoNumba = codeNoNumba.replace(codeLine, '')
148
+ pathFilenameNoNumba.write_text(codeNoNumba)
149
+ listPathFilenamesNoNumba.append(pathFilenameNoNumba)
150
+
151
+ if __name__ == '__main__':
152
+ Z0Z_inlineMapFolding()