mapFolding 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. mapFolding/__init__.py +1 -1
  2. mapFolding/basecamp.py +2 -2
  3. mapFolding/beDRY.py +88 -85
  4. mapFolding/filesystem.py +37 -29
  5. mapFolding/noHomeYet.py +2 -2
  6. mapFolding/oeis.py +2 -2
  7. mapFolding/someAssemblyRequired/Z0Z_workbench.py +347 -31
  8. mapFolding/someAssemblyRequired/__init__.py +4 -3
  9. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +0 -1
  10. mapFolding/someAssemblyRequired/ingredientsNumba.py +87 -2
  11. mapFolding/someAssemblyRequired/synthesizeDataConverters.py +34 -52
  12. mapFolding/someAssemblyRequired/{synthesizeNumbaJob.py → synthesizeNumbaJobVESTIGIAL.py} +18 -21
  13. mapFolding/someAssemblyRequired/transformationTools.py +547 -209
  14. mapFolding/syntheticModules/numbaCount_doTheNeedful.py +197 -12
  15. mapFolding/theDao.py +57 -39
  16. mapFolding/theSSOT.py +59 -59
  17. {mapfolding-0.7.0.dist-info → mapfolding-0.8.0.dist-info}/METADATA +6 -7
  18. mapfolding-0.8.0.dist-info/RECORD +41 -0
  19. {mapfolding-0.7.0.dist-info → mapfolding-0.8.0.dist-info}/WHEEL +1 -1
  20. tests/conftest.py +2 -3
  21. tests/test_computations.py +9 -5
  22. tests/test_filesystem.py +0 -2
  23. tests/test_other.py +2 -3
  24. tests/test_tasks.py +7 -5
  25. mapFolding/someAssemblyRequired/synthesizeCountingFunctions.py +0 -7
  26. mapFolding/someAssemblyRequired/synthesizeNumba.py +0 -91
  27. mapFolding/someAssemblyRequired/synthesizeNumbaModules.py +0 -91
  28. mapFolding/someAssemblyRequired/whatWillBe.py +0 -311
  29. mapFolding/syntheticModules/__init__.py +0 -0
  30. mapFolding/syntheticModules/dataNamespaceFlattened.py +0 -30
  31. mapFolding/syntheticModules/numbaCount.py +0 -90
  32. mapFolding/syntheticModules/numbaCountSequential.py +0 -110
  33. mapFolding/syntheticModules/numba_doTheNeedful.py +0 -12
  34. mapfolding-0.7.0.dist-info/RECORD +0 -50
  35. /mapFolding/syntheticModules/{numbaCountExample.py → numbaCountHistoricalExample.py} +0 -0
  36. /mapFolding/syntheticModules/{numba_doTheNeedfulExample.py → numba_doTheNeedfulHistoricalExample.py} +0 -0
  37. {mapfolding-0.7.0.dist-info → mapfolding-0.8.0.dist-info}/LICENSE +0 -0
  38. {mapfolding-0.7.0.dist-info → mapfolding-0.8.0.dist-info}/entry_points.txt +0 -0
  39. {mapfolding-0.7.0.dist-info → mapfolding-0.8.0.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,198 @@
1
- from mapFolding import indexMy
2
- from mapFolding.syntheticModules.numbaCount import countInitialize, countSequential, countParallel
3
- from numba import uint16, int64, jit
4
- from numpy import ndarray, dtype, integer
5
- from typing import Any
6
-
7
- @jit((uint16[:, :, ::1], int64[::1], uint16[::1], uint16[::1], uint16[::1], uint16[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
8
- def doTheNeedful(connectionGraph: ndarray[tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[tuple[int], dtype[integer[Any]]], mapShape: ndarray[tuple[int], dtype[integer[Any]]], my: ndarray[tuple[int], dtype[integer[Any]]], track: ndarray[tuple[int, int], dtype[integer[Any]]]) -> None:
9
- countInitialize(connectionGraph, gapsWhere, my, track)
10
- if my[indexMy.taskDivisions] > 0:
11
- countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
1
+ from concurrent.futures import Future as ConcurrentFuture, ProcessPoolExecutor
2
+ from copy import deepcopy
3
+ from mapFolding.theSSOT import Array1DElephino, Array1DFoldsTotal, Array1DLeavesTotal, Array3D, ComputationState, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal
4
+ from numba import jit
5
+
6
+ def countInitialize(state: ComputationState) -> ComputationState:
7
+ while state.leaf1ndex > 0:
8
+ if state.leaf1ndex <= 1 or state.leafBelow[0] == 1:
9
+ state.dimensionsUnconstrained = state.dimensionsTotal
10
+ state.gap1ndexCeiling = state.gapRangeStart[state.leaf1ndex - 1]
11
+ state.indexDimension = 0
12
+ while state.indexDimension < state.dimensionsTotal:
13
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leaf1ndex]
14
+ if state.leafConnectee == state.leaf1ndex:
15
+ state.dimensionsUnconstrained -= 1
16
+ else:
17
+ while state.leafConnectee != state.leaf1ndex:
18
+ state.gapsWhere[state.gap1ndexCeiling] = state.leafConnectee
19
+ if state.countDimensionsGapped[state.leafConnectee] == 0:
20
+ state.gap1ndexCeiling += 1
21
+ state.countDimensionsGapped[state.leafConnectee] += 1
22
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leafBelow[state.leafConnectee]]
23
+ state.indexDimension += 1
24
+ if not state.dimensionsUnconstrained:
25
+ indexLeaf = 0
26
+ while indexLeaf < state.leaf1ndex:
27
+ state.gapsWhere[state.gap1ndexCeiling] = indexLeaf
28
+ state.gap1ndexCeiling += 1
29
+ indexLeaf += 1
30
+ state.indexMiniGap = state.gap1ndex
31
+ while state.indexMiniGap < state.gap1ndexCeiling:
32
+ state.gapsWhere[state.gap1ndex] = state.gapsWhere[state.indexMiniGap]
33
+ if state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] == state.dimensionsUnconstrained:
34
+ state.gap1ndex += 1
35
+ state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] = 0
36
+ state.indexMiniGap += 1
37
+ if state.leaf1ndex > 0:
38
+ state.gap1ndex -= 1
39
+ state.leafAbove[state.leaf1ndex] = state.gapsWhere[state.gap1ndex]
40
+ state.leafBelow[state.leaf1ndex] = state.leafBelow[state.leafAbove[state.leaf1ndex]]
41
+ state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leaf1ndex
42
+ state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leaf1ndex
43
+ state.gapRangeStart[state.leaf1ndex] = state.gap1ndex
44
+ state.leaf1ndex += 1
45
+ if state.gap1ndex > 0:
46
+ break
47
+ return state
48
+
49
+ @jit(_nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
50
+ def countParallel(mapShape: tuple[DatatypeLeavesTotal, ...], leavesTotal: DatatypeLeavesTotal, taskDivisions: DatatypeLeavesTotal, concurrencyLimit: DatatypeElephino, connectionGraph: Array3D, dimensionsTotal: DatatypeLeavesTotal, countDimensionsGapped: Array1DLeavesTotal, dimensionsUnconstrained: DatatypeLeavesTotal, gapRangeStart: Array1DElephino, gapsWhere: Array1DLeavesTotal, leafAbove: Array1DLeavesTotal, leafBelow: Array1DLeavesTotal, foldGroups: Array1DFoldsTotal, foldsTotal: DatatypeFoldsTotal, gap1ndex: DatatypeLeavesTotal, gap1ndexCeiling: DatatypeElephino, groupsOfFolds: DatatypeFoldsTotal, indexDimension: DatatypeLeavesTotal, indexLeaf: DatatypeLeavesTotal, indexMiniGap: DatatypeElephino, leaf1ndex: DatatypeElephino, leafConnectee: DatatypeElephino, taskIndex: DatatypeLeavesTotal) -> DatatypeFoldsTotal:
51
+ while leaf1ndex > 0:
52
+ if leaf1ndex <= 1 or leafBelow[0] == 1:
53
+ if leaf1ndex > leavesTotal:
54
+ groupsOfFolds += 1
55
+ else:
56
+ dimensionsUnconstrained = dimensionsTotal
57
+ gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
58
+ indexDimension = 0
59
+ while indexDimension < dimensionsTotal:
60
+ if connectionGraph[indexDimension, leaf1ndex, leaf1ndex] == leaf1ndex:
61
+ dimensionsUnconstrained -= 1
62
+ else:
63
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
64
+ while leafConnectee != leaf1ndex:
65
+ if leaf1ndex != taskDivisions or leafConnectee % taskDivisions == taskIndex:
66
+ gapsWhere[gap1ndexCeiling] = leafConnectee
67
+ if countDimensionsGapped[leafConnectee] == 0:
68
+ gap1ndexCeiling += 1
69
+ countDimensionsGapped[leafConnectee] += 1
70
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
71
+ indexDimension += 1
72
+ indexMiniGap = gap1ndex
73
+ while indexMiniGap < gap1ndexCeiling:
74
+ gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
75
+ if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
76
+ gap1ndex += 1
77
+ countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
78
+ indexMiniGap += 1
79
+ while leaf1ndex > 0 and gap1ndex == gapRangeStart[leaf1ndex - 1]:
80
+ leaf1ndex -= 1
81
+ leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
82
+ leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
83
+ if leaf1ndex > 0:
84
+ gap1ndex -= 1
85
+ leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
86
+ leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
87
+ leafBelow[leafAbove[leaf1ndex]] = leaf1ndex
88
+ leafAbove[leafBelow[leaf1ndex]] = leaf1ndex
89
+ gapRangeStart[leaf1ndex] = gap1ndex
90
+ leaf1ndex += 1
91
+ foldGroups[taskIndex] = groupsOfFolds
92
+ return groupsOfFolds
93
+
94
+ @jit(_nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
95
+ def countSequential(mapShape: tuple[DatatypeLeavesTotal, ...], leavesTotal: DatatypeLeavesTotal, taskDivisions: DatatypeLeavesTotal, concurrencyLimit: DatatypeElephino, connectionGraph: Array3D, dimensionsTotal: DatatypeLeavesTotal, countDimensionsGapped: Array1DLeavesTotal, dimensionsUnconstrained: DatatypeLeavesTotal, gapRangeStart: Array1DElephino, gapsWhere: Array1DLeavesTotal, leafAbove: Array1DLeavesTotal, leafBelow: Array1DLeavesTotal, foldGroups: Array1DFoldsTotal, foldsTotal: DatatypeFoldsTotal, gap1ndex: DatatypeLeavesTotal, gap1ndexCeiling: DatatypeElephino, groupsOfFolds: DatatypeFoldsTotal, indexDimension: DatatypeLeavesTotal, indexLeaf: DatatypeLeavesTotal, indexMiniGap: DatatypeElephino, leaf1ndex: DatatypeElephino, leafConnectee: DatatypeElephino, taskIndex: DatatypeLeavesTotal) -> tuple[tuple[DatatypeLeavesTotal, ...], DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeElephino, Array3D, DatatypeLeavesTotal, Array1DLeavesTotal, DatatypeLeavesTotal, Array1DElephino, Array1DLeavesTotal, Array1DLeavesTotal, Array1DLeavesTotal, Array1DFoldsTotal, DatatypeFoldsTotal, DatatypeLeavesTotal, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeElephino, DatatypeElephino, DatatypeElephino, DatatypeLeavesTotal]:
96
+ while leaf1ndex > 0:
97
+ if leaf1ndex <= 1 or leafBelow[0] == 1:
98
+ if leaf1ndex > leavesTotal:
99
+ groupsOfFolds += 1
100
+ else:
101
+ dimensionsUnconstrained = dimensionsTotal
102
+ gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
103
+ indexDimension = 0
104
+ while indexDimension < dimensionsTotal:
105
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
106
+ if leafConnectee == leaf1ndex:
107
+ dimensionsUnconstrained -= 1
108
+ else:
109
+ while leafConnectee != leaf1ndex:
110
+ gapsWhere[gap1ndexCeiling] = leafConnectee
111
+ if countDimensionsGapped[leafConnectee] == 0:
112
+ gap1ndexCeiling += 1
113
+ countDimensionsGapped[leafConnectee] += 1
114
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
115
+ indexDimension += 1
116
+ indexMiniGap = gap1ndex
117
+ while indexMiniGap < gap1ndexCeiling:
118
+ gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
119
+ if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
120
+ gap1ndex += 1
121
+ countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
122
+ indexMiniGap += 1
123
+ while leaf1ndex > 0 and gap1ndex == gapRangeStart[leaf1ndex - 1]:
124
+ leaf1ndex -= 1
125
+ leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
126
+ leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
127
+ if leaf1ndex > 0:
128
+ gap1ndex -= 1
129
+ leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
130
+ leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
131
+ leafBelow[leafAbove[leaf1ndex]] = leaf1ndex
132
+ leafAbove[leafBelow[leaf1ndex]] = leaf1ndex
133
+ gapRangeStart[leaf1ndex] = gap1ndex
134
+ leaf1ndex += 1
135
+ foldGroups[taskIndex] = groupsOfFolds
136
+ return (mapShape, leavesTotal, taskDivisions, concurrencyLimit, connectionGraph, dimensionsTotal, countDimensionsGapped, dimensionsUnconstrained, gapRangeStart, gapsWhere, leafAbove, leafBelow, foldGroups, foldsTotal, gap1ndex, gap1ndexCeiling, groupsOfFolds, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, taskIndex)
137
+
138
+ def doTheNeedful(state: ComputationState) -> ComputationState:
139
+ state = countInitialize(state)
140
+ if state.taskDivisions > 0:
141
+ dictionaryConcurrency: dict[int, ConcurrentFuture[ComputationState]] = {}
142
+ with ProcessPoolExecutor(state.concurrencyLimit) as concurrencyManager:
143
+ for indexSherpa in range(state.taskDivisions):
144
+ stateParallel = deepcopy(state)
145
+ stateParallel.taskIndex = indexSherpa
146
+ mapShape: tuple[DatatypeLeavesTotal, ...] = stateParallel.mapShape
147
+ leavesTotal: DatatypeLeavesTotal = stateParallel.leavesTotal
148
+ taskDivisions: DatatypeLeavesTotal = stateParallel.taskDivisions
149
+ concurrencyLimit: DatatypeElephino = stateParallel.concurrencyLimit
150
+ connectionGraph: Array3D = stateParallel.connectionGraph
151
+ dimensionsTotal: DatatypeLeavesTotal = stateParallel.dimensionsTotal
152
+ countDimensionsGapped: Array1DLeavesTotal = stateParallel.countDimensionsGapped
153
+ dimensionsUnconstrained: DatatypeLeavesTotal = stateParallel.dimensionsUnconstrained
154
+ gapRangeStart: Array1DElephino = stateParallel.gapRangeStart
155
+ gapsWhere: Array1DLeavesTotal = stateParallel.gapsWhere
156
+ leafAbove: Array1DLeavesTotal = stateParallel.leafAbove
157
+ leafBelow: Array1DLeavesTotal = stateParallel.leafBelow
158
+ foldGroups: Array1DFoldsTotal = stateParallel.foldGroups
159
+ foldsTotal: DatatypeFoldsTotal = stateParallel.foldsTotal
160
+ gap1ndex: DatatypeLeavesTotal = stateParallel.gap1ndex
161
+ gap1ndexCeiling: DatatypeElephino = stateParallel.gap1ndexCeiling
162
+ groupsOfFolds: DatatypeFoldsTotal = stateParallel.groupsOfFolds
163
+ indexDimension: DatatypeLeavesTotal = stateParallel.indexDimension
164
+ indexLeaf: DatatypeLeavesTotal = stateParallel.indexLeaf
165
+ indexMiniGap: DatatypeElephino = stateParallel.indexMiniGap
166
+ leaf1ndex: DatatypeElephino = stateParallel.leaf1ndex
167
+ leafConnectee: DatatypeElephino = stateParallel.leafConnectee
168
+ taskIndex: DatatypeLeavesTotal = stateParallel.taskIndex
169
+ dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, mapShape, leavesTotal, taskDivisions, concurrencyLimit, connectionGraph, dimensionsTotal, countDimensionsGapped, dimensionsUnconstrained, gapRangeStart, gapsWhere, leafAbove, leafBelow, foldGroups, foldsTotal, gap1ndex, gap1ndexCeiling, groupsOfFolds, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, taskIndex)
170
+ for indexSherpa in range(state.taskDivisions):
171
+ state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result()
12
172
  else:
13
- countSequential(connectionGraph, foldGroups, gapsWhere, my, track)
173
+ mapShape: tuple[DatatypeLeavesTotal, ...] = state.mapShape
174
+ leavesTotal: DatatypeLeavesTotal = state.leavesTotal
175
+ taskDivisions: DatatypeLeavesTotal = state.taskDivisions
176
+ concurrencyLimit: DatatypeElephino = state.concurrencyLimit
177
+ connectionGraph: Array3D = state.connectionGraph
178
+ dimensionsTotal: DatatypeLeavesTotal = state.dimensionsTotal
179
+ countDimensionsGapped: Array1DLeavesTotal = state.countDimensionsGapped
180
+ dimensionsUnconstrained: DatatypeLeavesTotal = state.dimensionsUnconstrained
181
+ gapRangeStart: Array1DElephino = state.gapRangeStart
182
+ gapsWhere: Array1DLeavesTotal = state.gapsWhere
183
+ leafAbove: Array1DLeavesTotal = state.leafAbove
184
+ leafBelow: Array1DLeavesTotal = state.leafBelow
185
+ foldGroups: Array1DFoldsTotal = state.foldGroups
186
+ foldsTotal: DatatypeFoldsTotal = state.foldsTotal
187
+ gap1ndex: DatatypeLeavesTotal = state.gap1ndex
188
+ gap1ndexCeiling: DatatypeElephino = state.gap1ndexCeiling
189
+ groupsOfFolds: DatatypeFoldsTotal = state.groupsOfFolds
190
+ indexDimension: DatatypeLeavesTotal = state.indexDimension
191
+ indexLeaf: DatatypeLeavesTotal = state.indexLeaf
192
+ indexMiniGap: DatatypeElephino = state.indexMiniGap
193
+ leaf1ndex: DatatypeElephino = state.leaf1ndex
194
+ leafConnectee: DatatypeElephino = state.leafConnectee
195
+ taskIndex: DatatypeLeavesTotal = state.taskIndex
196
+ mapShape, leavesTotal, taskDivisions, concurrencyLimit, connectionGraph, dimensionsTotal, countDimensionsGapped, dimensionsUnconstrained, gapRangeStart, gapsWhere, leafAbove, leafBelow, foldGroups, foldsTotal, gap1ndex, gap1ndexCeiling, groupsOfFolds, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, taskIndex = countSequential(mapShape, leavesTotal, taskDivisions, concurrencyLimit, connectionGraph, dimensionsTotal, countDimensionsGapped, dimensionsUnconstrained, gapRangeStart, gapsWhere, leafAbove, leafBelow, foldGroups, foldsTotal, gap1ndex, gap1ndexCeiling, groupsOfFolds, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, taskIndex)
197
+ state = ComputationState(mapShape=mapShape, leavesTotal=leavesTotal, taskDivisions=taskDivisions, concurrencyLimit=concurrencyLimit, countDimensionsGapped=countDimensionsGapped, dimensionsUnconstrained=dimensionsUnconstrained, gapRangeStart=gapRangeStart, gapsWhere=gapsWhere, leafAbove=leafAbove, leafBelow=leafBelow, foldGroups=foldGroups, foldsTotal=foldsTotal, gap1ndex=gap1ndex, gap1ndexCeiling=gap1ndexCeiling, groupsOfFolds=groupsOfFolds, indexDimension=indexDimension, indexLeaf=indexLeaf, indexMiniGap=indexMiniGap, leaf1ndex=leaf1ndex, leafConnectee=leafConnectee, taskIndex=taskIndex)
198
+ return state
mapFolding/theDao.py CHANGED
@@ -1,5 +1,18 @@
1
+ from concurrent.futures import Future as ConcurrentFuture, ProcessPoolExecutor
2
+ from copy import deepcopy
1
3
  from mapFolding.theSSOT import ComputationState
2
- import copy
4
+ from multiprocessing import set_start_method as multiprocessing_set_start_method
5
+
6
+ """
7
+ - A "leaf" is a unit square in the map
8
+ - A "gap" is a potential position where a new leaf can be folded
9
+ - Connections track how leaves can connect above/below each other
10
+ - Leaves are enumerated starting from 1, not 0; hence, leaf1ndex not leafIndex
11
+ """
12
+
13
+ # When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
14
+ if __name__ == '__main__':
15
+ multiprocessing_set_start_method('spawn')
3
16
 
4
17
  def activeLeafConnectedToItself(state: ComputationState) -> bool:
5
18
  return state.leafConnectee == state.leaf1ndex
@@ -16,7 +29,7 @@ def activeLeafIsTheFirstLeaf(state: ComputationState) -> bool:
16
29
  def allDimensionsAreUnconstrained(state: ComputationState) -> bool:
17
30
  return not state.dimensionsUnconstrained
18
31
 
19
- def backtrack(state: ComputationState) -> ComputationState:
32
+ def undoLastLeafPlacement(state: ComputationState) -> ComputationState:
20
33
  state.leaf1ndex -= 1
21
34
  state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leafBelow[state.leaf1ndex]
22
35
  state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leafAbove[state.leaf1ndex]
@@ -96,7 +109,7 @@ def loopUpToDimensionsTotal(state: ComputationState) -> bool:
96
109
  def noGapsHere(state: ComputationState) -> bool:
97
110
  return (state.leaf1ndex > 0) and (state.gap1ndex == state.gapRangeStart[state.leaf1ndex - 1])
98
111
 
99
- def placeLeaf(state: ComputationState) -> ComputationState:
112
+ def insertLeafAtGap(state: ComputationState) -> ComputationState:
100
113
  state.gap1ndex -= 1
101
114
  state.leafAbove[state.leaf1ndex] = state.gapsWhere[state.gap1ndex]
102
115
  state.leafBelow[state.leaf1ndex] = state.leafBelow[state.leafAbove[state.leaf1ndex]]
@@ -136,42 +149,38 @@ def countInitialize(state: ComputationState) -> ComputationState:
136
149
  state = filterCommonGaps(state)
137
150
  state = incrementIndexMiniGap(state)
138
151
  if thereIsAnActiveLeaf(state):
139
- state = placeLeaf(state)
152
+ state = insertLeafAtGap(state)
140
153
  if state.gap1ndex > 0:
141
154
  break
142
155
  return state
143
156
 
144
- def countParallel(statePARALLEL: ComputationState) -> ComputationState:
145
- stateCOMPLETE = copy.deepcopy(statePARALLEL)
146
- for indexSherpa in range(statePARALLEL.taskDivisions):
147
- state = copy.deepcopy(statePARALLEL)
148
- state.taskIndex = indexSherpa
149
- while activeLeafGreaterThan0(state):
150
- if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
151
- if activeLeafGreaterThanLeavesTotal(state):
152
- state.groupsOfFolds += 1
153
- else:
154
- state = initializeVariablesToFindGaps(state)
155
- while loopUpToDimensionsTotal(state):
156
- if dimensionsUnconstrainedCondition(state):
157
- state = decrementDimensionsUnconstrained(state)
158
- else:
159
- state = initializeLeafConnectee(state)
160
- while loopingLeavesConnectedToActiveLeaf(state):
161
- if thisIsMyTaskIndex(state):
162
- state = countGaps(state)
163
- state = updateLeafConnectee(state)
164
- state = incrementIndexDimension(state)
165
- state = initializeIndexMiniGap(state)
166
- while loopingToActiveGapCeiling(state):
167
- state = filterCommonGaps(state)
168
- state = incrementIndexMiniGap(state)
169
- while noGapsHere(state):
170
- state = backtrack(state)
171
- if thereIsAnActiveLeaf(state):
172
- state = placeLeaf(state)
173
- stateCOMPLETE.foldGroups[state.taskIndex] = state.groupsOfFolds
174
- return stateCOMPLETE
157
+ def countParallel(state: ComputationState) -> ComputationState:
158
+ while activeLeafGreaterThan0(state):
159
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
160
+ if activeLeafGreaterThanLeavesTotal(state):
161
+ state.groupsOfFolds += 1
162
+ else:
163
+ state = initializeVariablesToFindGaps(state)
164
+ while loopUpToDimensionsTotal(state):
165
+ if dimensionsUnconstrainedCondition(state):
166
+ state = decrementDimensionsUnconstrained(state)
167
+ else:
168
+ state = initializeLeafConnectee(state)
169
+ while loopingLeavesConnectedToActiveLeaf(state):
170
+ if thisIsMyTaskIndex(state):
171
+ state = countGaps(state)
172
+ state = updateLeafConnectee(state)
173
+ state = incrementIndexDimension(state)
174
+ state = initializeIndexMiniGap(state)
175
+ while loopingToActiveGapCeiling(state):
176
+ state = filterCommonGaps(state)
177
+ state = incrementIndexMiniGap(state)
178
+ while noGapsHere(state):
179
+ state = undoLastLeafPlacement(state)
180
+ if thereIsAnActiveLeaf(state):
181
+ state = insertLeafAtGap(state)
182
+ state.foldGroups[state.taskIndex] = state.groupsOfFolds
183
+ return state
175
184
 
176
185
  def countSequential(state: ComputationState) -> ComputationState:
177
186
  while activeLeafGreaterThan0(state):
@@ -194,15 +203,24 @@ def countSequential(state: ComputationState) -> ComputationState:
194
203
  state = filterCommonGaps(state)
195
204
  state = incrementIndexMiniGap(state)
196
205
  while noGapsHere(state):
197
- state = backtrack(state)
206
+ state = undoLastLeafPlacement(state)
198
207
  if thereIsAnActiveLeaf(state):
199
- state = placeLeaf(state)
208
+ state = insertLeafAtGap(state)
200
209
  state.foldGroups[state.taskIndex] = state.groupsOfFolds
201
210
  return state
202
211
 
203
212
  def doTheNeedful(state: ComputationState) -> ComputationState:
204
213
  state = countInitialize(state)
205
214
  if state.taskDivisions > 0:
206
- return countParallel(state)
215
+ dictionaryConcurrency: dict[int, ConcurrentFuture[ComputationState]] = {}
216
+ with ProcessPoolExecutor(state.concurrencyLimit) as concurrencyManager:
217
+ for indexSherpa in range(state.taskDivisions):
218
+ stateParallel = deepcopy(state)
219
+ stateParallel.taskIndex = indexSherpa
220
+ dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, stateParallel)
221
+ for indexSherpa in range(state.taskDivisions):
222
+ state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result().foldGroups[indexSherpa]
207
223
  else:
208
- return countSequential(state)
224
+ state = countSequential(state)
225
+
226
+ return state
mapFolding/theSSOT.py CHANGED
@@ -1,3 +1,4 @@
1
+ from collections.abc import Callable
1
2
  from importlib import import_module as importlib_import_module
2
3
  from inspect import getfile as inspect_getfile
3
4
  from numpy import dtype, int64 as numpy_int64, int16 as numpy_int16, ndarray, signedinteger
@@ -19,33 +20,33 @@ Identifiers: scope and resolution, LEGB (Local, Enclosing, Global, Builtin)
19
20
  """
20
21
 
21
22
  # I _think_, in theSSOT, I have abstracted the flow settings to only these couple of lines:
23
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
22
24
  packageFlowSynthetic = 'numba'
23
- Z0Z_packageFlow = 'algorithm'
24
- # Z0Z_packageFlow = packageFlowSynthetic
25
-
25
+ # Z0Z_packageFlow = 'algorithm'
26
+ Z0Z_packageFlow = packageFlowSynthetic
27
+ Z0Z_concurrencyPackage = 'multiprocessing'
26
28
  # =============================================================================
27
29
  # The Wrong Way The Wrong Way The Wrong Way The Wrong Way The Wrong Way
28
30
  # Evaluate When Packaging Evaluate When Packaging Evaluate When Packaging
29
31
 
30
32
  sourceAlgorithmPACKAGING: str = 'theDao'
31
33
  datatypePackagePACKAGING: Final[str] = 'numpy'
32
- dispatcherCallableAsStrPACKAGING: str = 'doTheNeedful'
34
+ dispatcherCallablePACKAGING: str = 'doTheNeedful'
33
35
  moduleOfSyntheticModulesPACKAGING: Final[str] = 'syntheticModules'
34
36
 
35
- dataclassModuleAsStrPACKAGING: str = 'theSSOT'
36
- dataclassIdentifierAsStrPACKAGING: str = 'ComputationState'
37
- dataclassInstanceAsStrPACKAGING: str = 'state'
38
- dataclassInstance_Pre_ParallelAsStrPACKAGING = dataclassInstanceAsStrPACKAGING + 'PARALLEL'
39
- dataclassInstance_Post_ParallelAsStrPACKAGING = dataclassInstanceAsStrPACKAGING + 'COMPLETE'
37
+ dataclassModulePACKAGING: str = 'theSSOT'
38
+ dataclassIdentifierPACKAGING: str = 'ComputationState'
39
+ dataclassInstancePACKAGING: str = 'state'
40
+ dataclassInstanceTaskDistributionPACKAGING = dataclassInstancePACKAGING + 'Parallel'
40
41
 
41
- Z0Z_initializeCallableAsStrPACKAGING = 'countInitialize'
42
- Z0Z_sequentialCallableAsStrPACKAGING = 'countSequential'
43
- Z0Z_parallelCallableAsStrPACKAGING = 'countParallel'
42
+ sourceInitializeCallablePACKAGING = 'countInitialize'
43
+ sourceSequentialCallablePACKAGING = 'countSequential'
44
+ sourceParallelCallablePACKAGING = 'countParallel'
44
45
 
45
46
  try:
46
- thePackageNameIsPACKAGING: str = tomli_load(Path("../pyproject.toml").open('rb'))["project"]["name"]
47
+ thePackageNamePACKAGING: str = tomli_load(Path("../pyproject.toml").open('rb'))["project"]["name"]
47
48
  except Exception:
48
- thePackageNameIsPACKAGING: str = "mapFolding"
49
+ thePackageNamePACKAGING = "mapFolding"
49
50
 
50
51
  # =============================================================================
51
52
  # The Wrong Way The Wrong Way The Wrong Way The Wrong Way The Wrong Way
@@ -54,21 +55,16 @@ except Exception:
54
55
  fileExtensionINSTALLING: str = '.py'
55
56
 
56
57
  def getPathPackageINSTALLING() -> Path:
57
- pathPackage: Path = Path(inspect_getfile(importlib_import_module(thePackageNameIsPACKAGING)))
58
+ pathPackage: Path = Path(inspect_getfile(importlib_import_module(thePackageNamePACKAGING)))
58
59
  if pathPackage.is_file():
59
60
  pathPackage = pathPackage.parent
60
61
  return pathPackage
61
62
 
62
- # =============================================================================
63
- # The Wrong Way The Wrong Way The Wrong Way The Wrong Way The Wrong Way
64
- # Hardcoding Hardcoding Hardcoding Hardcoding Hardcoding Hardcoding Hardcoding
65
-
66
63
  # =============================================================================
67
64
  # The right way, perhaps.
68
65
 
69
- # =====================
70
66
  # Create enduring identifiers from the hopefully transient identifiers above.
71
- thePackageName: Final[str] = thePackageNameIsPACKAGING
67
+ thePackageName: Final[str] = thePackageNamePACKAGING
72
68
  thePathPackage: Path = getPathPackageINSTALLING()
73
69
 
74
70
  """
@@ -81,27 +77,27 @@ NOTE on semiotics: `theIdentifier` vs `identifier`
81
77
  """
82
78
 
83
79
  theSourceAlgorithm: str = sourceAlgorithmPACKAGING
80
+ theSourceInitializeCallable = sourceInitializeCallablePACKAGING
81
+ theSourceSequentialCallable = sourceSequentialCallablePACKAGING
82
+ theSourceParallelCallable = sourceParallelCallablePACKAGING
84
83
  theDatatypePackage: Final[str] = datatypePackagePACKAGING
85
84
 
86
- theDispatcherCallableAsStr: str = dispatcherCallableAsStrPACKAGING
85
+ theDispatcherCallable: str = dispatcherCallablePACKAGING
87
86
 
88
- theDataclassModuleAsStr: str = dataclassModuleAsStrPACKAGING
89
- theDataclassIdentifierAsStr: str = dataclassIdentifierAsStrPACKAGING
90
- theDataclassInstanceAsStr: str = dataclassInstanceAsStrPACKAGING
91
- theDataclassInstance_Pre_ParallelAsStr: str = dataclassInstance_Pre_ParallelAsStrPACKAGING
92
- theDataclassInstance_Post_ParallelAsStr: str = dataclassInstance_Post_ParallelAsStrPACKAGING
87
+ theDataclassModule: str = dataclassModulePACKAGING
88
+ theDataclassIdentifier: str = dataclassIdentifierPACKAGING
89
+ theDataclassInstance: str = dataclassInstancePACKAGING
90
+ theDataclassInstanceTaskDistribution: str = dataclassInstanceTaskDistributionPACKAGING
93
91
 
94
92
  theFileExtension: str = fileExtensionINSTALLING
95
93
 
96
94
  theModuleOfSyntheticModules: Final[str] = moduleOfSyntheticModulesPACKAGING
97
95
 
98
- Z0Z_initializeCallableAsStr = Z0Z_initializeCallableAsStrPACKAGING
99
- Z0Z_sequentialCallableAsStr = Z0Z_sequentialCallableAsStrPACKAGING
100
- Z0Z_parallelCallableAsStr = Z0Z_parallelCallableAsStrPACKAGING
101
-
102
96
  # =============================================================================
103
- # The right way.
97
+
98
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
104
99
  concurrencyPackage: str = Z0Z_packageFlow
100
+ concurrencyPackage = Z0Z_concurrencyPackage
105
101
 
106
102
  # =============================================================================
107
103
  # The relatively flexible type system needs a different paradigm, but I don't
@@ -137,17 +133,18 @@ class ComputationState:
137
133
  mapShape: tuple[DatatypeLeavesTotal, ...]
138
134
  leavesTotal: DatatypeLeavesTotal
139
135
  taskDivisions: DatatypeLeavesTotal
136
+ concurrencyLimit: DatatypeElephino
140
137
 
141
- connectionGraph: Array3D = dataclasses.field(init=False, metadata={'description': 'A 3D array representing the connection graph of the map.'})
138
+ connectionGraph: Array3D = dataclasses.field(init=False)
142
139
  dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
143
140
 
144
- countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
145
- dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
146
- gapRangeStart: Array1DElephino = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
147
- gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
148
- leafAbove: Array1DLeavesTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
149
- leafBelow: Array1DLeavesTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
150
- foldGroups: Array1DFoldsTotal = dataclasses.field(default=None) # pyright: ignore[reportAssignmentType]
141
+ countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
142
+ dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[assignment, reportAssignmentType]
143
+ gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
144
+ gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
145
+ leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
146
+ leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
147
+ foldGroups: Array1DFoldsTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
151
148
 
152
149
  foldsTotal: DatatypeFoldsTotal = DatatypeFoldsTotal(0)
153
150
  gap1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
@@ -158,10 +155,10 @@ class ComputationState:
158
155
  indexMiniGap: DatatypeElephino = DatatypeElephino(0)
159
156
  leaf1ndex: DatatypeElephino = DatatypeElephino(1)
160
157
  leafConnectee: DatatypeElephino = DatatypeElephino(0)
161
- taskIndex: DatatypeLeavesTotal = dataclasses.field(default=DatatypeLeavesTotal(0), metadata={'myType': DatatypeLeavesTotal})
162
- # taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
158
+ taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
159
+ # Efficient translation of Python scalar types to Numba types https://github.com/hunterhogan/mapFolding/issues/8
163
160
 
164
- def __post_init__(self):
161
+ def __post_init__(self) -> None:
165
162
  from mapFolding.beDRY import makeConnectionGraph, makeDataContainer
166
163
  self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
167
164
  self.connectionGraph = makeConnectionGraph(self.mapShape, self.leavesTotal, numpyLeavesTotal)
@@ -186,29 +183,23 @@ class ComputationState:
186
183
  if self.leafBelow is None:
187
184
  self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, numpyLeavesTotal)
188
185
 
189
- def getFoldsTotal(self):
186
+ def getFoldsTotal(self) -> None:
190
187
  self.foldsTotal = DatatypeFoldsTotal(self.foldGroups[0:-1].sum() * self.leavesTotal)
191
188
 
192
- # factory? constructor?
193
- # state.taskIndex = state.taskIndex.type(indexSherpa)
194
- # self.fieldName = self.fieldName.fieldType(indexSherpa)
195
- # state.taskIndex.toMyType(indexSherpa)
196
-
197
189
  # =============================================================================
198
190
  # The most right way I know how to implement.
199
191
 
200
192
  theLogicalPathModuleSourceAlgorithm: str = '.'.join([thePackageName, theSourceAlgorithm])
201
193
  theLogicalPathModuleDispatcher: str = theLogicalPathModuleSourceAlgorithm
202
- theLogicalPathModuleDataclass: str = '.'.join([thePackageName, theDataclassModuleAsStr])
194
+ theLogicalPathModuleDataclass: str = '.'.join([thePackageName, theDataclassModule])
203
195
 
204
196
  def getSourceAlgorithm() -> ModuleType:
205
197
  moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleSourceAlgorithm)
206
198
  return moduleImported
207
199
 
208
- def getAlgorithmDispatcher():
200
+ def getAlgorithmDispatcher() -> Callable[[ComputationState], ComputationState]:
209
201
  moduleImported: ModuleType = getSourceAlgorithm()
210
- # TODO I think I need to use `inspect` to type the return value
211
- dispatcherCallable = getattr(moduleImported, theDispatcherCallableAsStr)
202
+ dispatcherCallable = getattr(moduleImported, theDispatcherCallable)
212
203
  return dispatcherCallable
213
204
 
214
205
  def getPathSyntheticModules() -> Path:
@@ -235,29 +226,36 @@ def getNumpyDtypeDefault() -> type[signedinteger[Any]]:
235
226
  # =============================================================================
236
227
  # The coping way.
237
228
 
238
- class FREAKOUT(Exception): pass
229
+ class raiseIfNoneGitHubIssueNumber3(Exception): pass
239
230
 
240
231
  # =============================================================================
241
232
  # Temporary or transient or something; probably still the wrong way
242
233
 
243
234
  # THIS IS A STUPID SYSTEM BUT I CAN'T FIGURE OUT AN IMPROVEMENT
235
+ # NOTE This section for _default_ values probably has value
236
+ # https://github.com/hunterhogan/mapFolding/issues/4
244
237
  theFormatStrModuleSynthetic = "{packageFlow}Count"
245
238
  theFormatStrModuleForCallableSynthetic = theFormatStrModuleSynthetic + "_{callableTarget}"
246
239
 
247
- theModuleDispatcherSynthetic: str = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=theDispatcherCallableAsStr)
240
+ theModuleDispatcherSynthetic: str = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=theDispatcherCallable)
248
241
  theLogicalPathModuleDispatcherSynthetic: str = '.'.join([thePackageName, theModuleOfSyntheticModules, theModuleDispatcherSynthetic])
249
242
 
250
243
  # =============================================================================
251
244
  # The most right way I know how to implement.
252
245
 
246
+ # https://github.com/hunterhogan/mapFolding/issues/4
253
247
  if Z0Z_packageFlow == packageFlowSynthetic: # pyright: ignore [reportUnnecessaryComparison]
248
+ # NOTE this as a default value _might_ have value
254
249
  theLogicalPathModuleDispatcher = theLogicalPathModuleDispatcherSynthetic
255
250
 
256
- def getPackageDispatcher():
251
+ # https://github.com/hunterhogan/mapFolding/issues/4
252
+ # dynamically set the return type https://github.com/hunterhogan/mapFolding/issues/5
253
+ def getPackageDispatcher() -> Callable[[ComputationState], ComputationState]:
254
+ # NOTE but this part, if the package flow is synthetic, probably needs to be delegated
255
+ # to the authority for creating _that_ synthetic flow.
256
+
257
257
  moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleDispatcher)
258
- dispatcherCallable = getattr(moduleImported, theDispatcherCallableAsStr)
259
- from mapFolding.syntheticModules.numbaCountSequential import flattenData
260
- dispatcherCallable = flattenData
258
+ dispatcherCallable = getattr(moduleImported, theDispatcherCallable)
261
259
  return dispatcherCallable
262
260
 
263
261
  """Technical concepts I am likely using and likely want to use more effectively:
@@ -266,8 +264,10 @@ def getPackageDispatcher():
266
264
  - Lazy Initialization
267
265
  - Separate configuration from business logic
268
266
 
267
+ ----
269
268
  theSSOT and yourSSOT
270
269
 
270
+ ----
271
271
  delay realization/instantiation until a concrete value is desired
272
272
  moment of truth: when the value is needed, not when the value is defined
273
273
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mapFolding
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: Count distinct ways to fold a map (or a strip of stamps)
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -38,12 +38,11 @@ Requires-Dist: tomli
38
38
  Requires-Dist: Z0Z_tools
39
39
  Provides-Extra: testing
40
40
  Requires-Dist: mypy; extra == "testing"
41
+ Requires-Dist: pytest; extra == "testing"
41
42
  Requires-Dist: pytest-cov; extra == "testing"
42
43
  Requires-Dist: pytest-env; extra == "testing"
43
44
  Requires-Dist: pytest-xdist; extra == "testing"
44
- Requires-Dist: pytest; extra == "testing"
45
45
  Requires-Dist: pyupgrade; extra == "testing"
46
- Requires-Dist: updateCitation; extra == "testing"
47
46
 
48
47
  # mapFolding: Algorithms for enumerating distinct map/stamp folding patterns 🗺️
49
48
 
@@ -113,14 +112,14 @@ Available OEIS sequences:
113
112
 
114
113
  ### 4. **Customizing your algorithm**
115
114
 
116
- - mapFolding\someAssemblyRequired\synthesizeNumbaJob.py (and/or synthesizeNumba____.py, as applicable)
115
+ - Renovations in progress: ~~mapFolding\someAssemblyRequired\synthesizeNumbaJob.py (and/or synthesizeNumba____.py, as applicable)~~
117
116
  - Synthesize a Numba-optimized module for a specific mapShape
118
117
  - Synthesize _from_ a module in mapFolding\syntheticModules or from any source you select
119
118
  - Use the existing transformation options
120
119
  - Or create new ways of transforming the algorithm from its source to a specific job
121
- - mapFolding\someAssemblyRequired\makeJob.py
120
+ - Renovations in progress: ~~mapFolding\someAssemblyRequired\makeJob.py~~
122
121
  - Initialize data for a specific mapShape
123
- - mapFolding\someAssemblyRequired\synthesizeNumbaModules.py (and/or synthesizeNumba____.py, as applicable)
122
+ - Renovations in progress: ~~mapFolding\someAssemblyRequired\synthesizeNumbaModules.py (and/or synthesizeNumba____.py, as applicable)~~
124
123
  - Synthesize one or more Numba-optimized modules for parallel or sequential computation
125
124
  - Overwrite the modules in mapFolding\syntheticModules or save the module(s) to a custom path
126
125
  - Synthesize _from_ the algorithm(s) in mapFolding\theDao.py or from any source you select
@@ -131,7 +130,7 @@ Available OEIS sequences:
131
130
  - Modify the algorithms for initializing values, parallel computation, and/or sequential computation
132
131
  - Use the modified algorithm(s) in synthesizeNumbaModules.py, above, to create Numba-optimized version(s)
133
132
  - Then use a Numba-optimized version in synthesizeNumbaJob.py, above, to create a hyper-optimized version for a specific mapShape
134
- - mapFolding\theSSOT.py (and/or theSSOTnumba.py and/ or theSSOT____.py, if they exist)
133
+ - mapFolding\theSSOT.py
135
134
  - Modify broad settings or find functions to modify broad settings, such as data structures and their data types
136
135
  - Create new settings or groups of settings
137
136
  - mapFolding\beDRY.py