mapFolding 0.8.2__py3-none-any.whl → 0.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. mapFolding/__init__.py +6 -2
  2. mapFolding/basecamp.py +11 -5
  3. mapFolding/filesystem.py +134 -109
  4. mapFolding/oeis.py +1 -1
  5. mapFolding/reference/__init__.py +7 -0
  6. mapFolding/reference/jobsCompleted/[2x19]/p2x19.py +197 -0
  7. mapFolding/reference/jobsCompleted/__init__.py +50 -0
  8. mapFolding/reference/jobsCompleted/p2x19/p2x19.py +29 -0
  9. mapFolding/someAssemblyRequired/__init__.py +37 -18
  10. mapFolding/someAssemblyRequired/_theTypes.py +35 -0
  11. mapFolding/someAssemblyRequired/_tool_Make.py +92 -0
  12. mapFolding/someAssemblyRequired/_tool_Then.py +65 -0
  13. mapFolding/someAssemblyRequired/_toolboxAntecedents.py +326 -0
  14. mapFolding/someAssemblyRequired/_toolboxContainers.py +306 -0
  15. mapFolding/someAssemblyRequired/_toolboxPython.py +76 -0
  16. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +20 -1
  17. mapFolding/someAssemblyRequired/ingredientsNumba.py +17 -24
  18. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +112 -149
  19. mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +247 -0
  20. mapFolding/someAssemblyRequired/transformDataStructures.py +167 -100
  21. mapFolding/someAssemblyRequired/transformationTools.py +63 -678
  22. mapFolding/syntheticModules/__init__.py +1 -0
  23. mapFolding/syntheticModules/numbaCount_doTheNeedful.py +36 -33
  24. mapFolding/theDao.py +13 -11
  25. mapFolding/theSSOT.py +69 -119
  26. {mapfolding-0.8.2.dist-info → mapfolding-0.8.4.dist-info}/METADATA +4 -2
  27. mapfolding-0.8.4.dist-info/RECORD +49 -0
  28. {mapfolding-0.8.2.dist-info → mapfolding-0.8.4.dist-info}/WHEEL +1 -1
  29. tests/conftest.py +34 -29
  30. tests/test_computations.py +40 -31
  31. tests/test_filesystem.py +3 -3
  32. tests/test_other.py +4 -3
  33. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +0 -413
  34. mapfolding-0.8.2.dist-info/RECORD +0 -39
  35. {mapfolding-0.8.2.dist-info → mapfolding-0.8.4.dist-info}/entry_points.txt +0 -0
  36. {mapfolding-0.8.2.dist-info → mapfolding-0.8.4.dist-info}/licenses/LICENSE +0 -0
  37. {mapfolding-0.8.2.dist-info → mapfolding-0.8.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1 @@
1
+ """Everything in this directory is synthesized by other modules in the package."""
@@ -2,6 +2,7 @@ from concurrent.futures import Future as ConcurrentFuture, ProcessPoolExecutor
2
2
  from copy import deepcopy
3
3
  from mapFolding.theSSOT import Array1DElephino, Array1DFoldsTotal, Array1DLeavesTotal, Array3D, ComputationState, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal
4
4
  from numba import jit
5
+ from numpy import array, int16, int64
5
6
 
6
7
  def countInitialize(state: ComputationState) -> ComputationState:
7
8
  while state.leaf1ndex > 0:
@@ -22,11 +23,11 @@ def countInitialize(state: ComputationState) -> ComputationState:
22
23
  state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leafBelow[state.leafConnectee]]
23
24
  state.indexDimension += 1
24
25
  if not state.dimensionsUnconstrained:
25
- indexLeaf = 0
26
- while indexLeaf < state.leaf1ndex:
27
- state.gapsWhere[state.gap1ndexCeiling] = indexLeaf
26
+ state.indexLeaf = 0
27
+ while state.indexLeaf < state.leaf1ndex:
28
+ state.gapsWhere[state.gap1ndexCeiling] = state.indexLeaf
28
29
  state.gap1ndexCeiling += 1
29
- indexLeaf += 1
30
+ state.indexLeaf += 1
30
31
  state.indexMiniGap = state.gap1ndex
31
32
  while state.indexMiniGap < state.gap1ndexCeiling:
32
33
  state.gapsWhere[state.gap1ndex] = state.gapsWhere[state.indexMiniGap]
@@ -139,36 +140,38 @@ def doTheNeedful(state: ComputationState) -> ComputationState:
139
140
  state = countInitialize(state)
140
141
  if state.taskDivisions > 0:
141
142
  dictionaryConcurrency: dict[int, ConcurrentFuture[ComputationState]] = {}
142
- with ProcessPoolExecutor(state.concurrencyLimit) as concurrencyManager:
143
- for indexSherpa in range(state.taskDivisions):
144
- stateParallel = deepcopy(state)
145
- stateParallel.taskIndex = indexSherpa
146
- mapShape: tuple[DatatypeLeavesTotal, ...] = stateParallel.mapShape
147
- leavesTotal: DatatypeLeavesTotal = stateParallel.leavesTotal
148
- taskDivisions: DatatypeLeavesTotal = stateParallel.taskDivisions
149
- concurrencyLimit: DatatypeElephino = stateParallel.concurrencyLimit
150
- connectionGraph: Array3D = stateParallel.connectionGraph
151
- dimensionsTotal: DatatypeLeavesTotal = stateParallel.dimensionsTotal
152
- countDimensionsGapped: Array1DLeavesTotal = stateParallel.countDimensionsGapped
153
- dimensionsUnconstrained: DatatypeLeavesTotal = stateParallel.dimensionsUnconstrained
154
- gapRangeStart: Array1DElephino = stateParallel.gapRangeStart
155
- gapsWhere: Array1DLeavesTotal = stateParallel.gapsWhere
156
- leafAbove: Array1DLeavesTotal = stateParallel.leafAbove
157
- leafBelow: Array1DLeavesTotal = stateParallel.leafBelow
158
- foldGroups: Array1DFoldsTotal = stateParallel.foldGroups
159
- foldsTotal: DatatypeFoldsTotal = stateParallel.foldsTotal
160
- gap1ndex: DatatypeLeavesTotal = stateParallel.gap1ndex
161
- gap1ndexCeiling: DatatypeElephino = stateParallel.gap1ndexCeiling
162
- groupsOfFolds: DatatypeFoldsTotal = stateParallel.groupsOfFolds
163
- indexDimension: DatatypeLeavesTotal = stateParallel.indexDimension
164
- indexLeaf: DatatypeLeavesTotal = stateParallel.indexLeaf
165
- indexMiniGap: DatatypeElephino = stateParallel.indexMiniGap
166
- leaf1ndex: DatatypeElephino = stateParallel.leaf1ndex
167
- leafConnectee: DatatypeElephino = stateParallel.leafConnectee
168
- taskIndex: DatatypeLeavesTotal = stateParallel.taskIndex
143
+ stateParallel = deepcopy(state)
144
+ with ProcessPoolExecutor(stateParallel.concurrencyLimit) as concurrencyManager:
145
+ for indexSherpa in range(stateParallel.taskDivisions):
146
+ state = deepcopy(stateParallel)
147
+ state.taskIndex = indexSherpa
148
+ mapShape: tuple[DatatypeLeavesTotal, ...] = state.mapShape
149
+ leavesTotal: DatatypeLeavesTotal = state.leavesTotal
150
+ taskDivisions: DatatypeLeavesTotal = state.taskDivisions
151
+ concurrencyLimit: DatatypeElephino = state.concurrencyLimit
152
+ connectionGraph: Array3D = state.connectionGraph
153
+ dimensionsTotal: DatatypeLeavesTotal = state.dimensionsTotal
154
+ countDimensionsGapped: Array1DLeavesTotal = state.countDimensionsGapped
155
+ dimensionsUnconstrained: DatatypeLeavesTotal = state.dimensionsUnconstrained
156
+ gapRangeStart: Array1DElephino = state.gapRangeStart
157
+ gapsWhere: Array1DLeavesTotal = state.gapsWhere
158
+ leafAbove: Array1DLeavesTotal = state.leafAbove
159
+ leafBelow: Array1DLeavesTotal = state.leafBelow
160
+ foldGroups: Array1DFoldsTotal = state.foldGroups
161
+ foldsTotal: DatatypeFoldsTotal = state.foldsTotal
162
+ gap1ndex: DatatypeLeavesTotal = state.gap1ndex
163
+ gap1ndexCeiling: DatatypeElephino = state.gap1ndexCeiling
164
+ groupsOfFolds: DatatypeFoldsTotal = state.groupsOfFolds
165
+ indexDimension: DatatypeLeavesTotal = state.indexDimension
166
+ indexLeaf: DatatypeLeavesTotal = state.indexLeaf
167
+ indexMiniGap: DatatypeElephino = state.indexMiniGap
168
+ leaf1ndex: DatatypeElephino = state.leaf1ndex
169
+ leafConnectee: DatatypeElephino = state.leafConnectee
170
+ taskIndex: DatatypeLeavesTotal = state.taskIndex
169
171
  dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, mapShape, leavesTotal, taskDivisions, concurrencyLimit, connectionGraph, dimensionsTotal, countDimensionsGapped, dimensionsUnconstrained, gapRangeStart, gapsWhere, leafAbove, leafBelow, foldGroups, foldsTotal, gap1ndex, gap1ndexCeiling, groupsOfFolds, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, taskIndex)
170
- for indexSherpa in range(state.taskDivisions):
171
- state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result()
172
+ for indexSherpa in range(stateParallel.taskDivisions):
173
+ stateParallel.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result()
174
+ state = stateParallel
172
175
  else:
173
176
  mapShape: tuple[DatatypeLeavesTotal, ...] = state.mapShape
174
177
  leavesTotal: DatatypeLeavesTotal = state.leavesTotal
mapFolding/theDao.py CHANGED
@@ -101,11 +101,11 @@ def initializeVariablesToFindGaps(state: ComputationState) -> ComputationState:
101
101
  return state
102
102
 
103
103
  def insertUnconstrainedLeaf(state: ComputationState) -> ComputationState:
104
- indexLeaf = 0
105
- while indexLeaf < state.leaf1ndex:
106
- state.gapsWhere[state.gap1ndexCeiling] = indexLeaf
104
+ state.indexLeaf = 0
105
+ while state.indexLeaf < state.leaf1ndex:
106
+ state.gapsWhere[state.gap1ndexCeiling] = state.indexLeaf
107
107
  state.gap1ndexCeiling += 1
108
- indexLeaf += 1
108
+ state.indexLeaf += 1
109
109
  return state
110
110
 
111
111
  def leafBelowSentinelIs1(state: ComputationState) -> bool:
@@ -227,13 +227,15 @@ def doTheNeedful(state: ComputationState) -> ComputationState:
227
227
  state = countInitialize(state)
228
228
  if state.taskDivisions > 0:
229
229
  dictionaryConcurrency: dict[int, ConcurrentFuture[ComputationState]] = {}
230
- with ProcessPoolExecutor(state.concurrencyLimit) as concurrencyManager:
231
- for indexSherpa in range(state.taskDivisions):
232
- stateParallel = deepcopy(state)
233
- stateParallel.taskIndex = indexSherpa
234
- dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, stateParallel)
235
- for indexSherpa in range(state.taskDivisions):
236
- state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result().foldGroups[indexSherpa]
230
+ stateParallel = deepcopy(state)
231
+ with ProcessPoolExecutor(stateParallel.concurrencyLimit) as concurrencyManager:
232
+ for indexSherpa in range(stateParallel.taskDivisions):
233
+ state = deepcopy(stateParallel)
234
+ state.taskIndex = indexSherpa
235
+ dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, state)
236
+ for indexSherpa in range(stateParallel.taskDivisions):
237
+ stateParallel.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result().foldGroups[indexSherpa]
238
+ state = stateParallel
237
239
  else:
238
240
  state = countSequential(state)
239
241
 
mapFolding/theSSOT.py CHANGED
@@ -21,19 +21,11 @@ from importlib import import_module as importlib_import_module
21
21
  from inspect import getfile as inspect_getfile
22
22
  from numpy import dtype, int64 as numpy_int64, int16 as numpy_int16, ndarray
23
23
  from pathlib import Path
24
- from sys import modules as sysModules
25
24
  from tomli import load as tomli_load
26
25
  from types import ModuleType
27
26
  from typing import TypeAlias
28
27
  import dataclasses
29
28
 
30
- # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
31
- # I _think_, in theSSOT, I have abstracted the flow settings to only these couple of lines:
32
- packageFlowSynthetic = 'numba'
33
- # Z0Z_packageFlow = 'algorithm'
34
- Z0Z_packageFlow = packageFlowSynthetic
35
- Z0Z_concurrencyPackage = 'multiprocessing'
36
-
37
29
  # =============================================================================
38
30
  # The Wrong Way: Evaluate When Packaging
39
31
 
@@ -50,84 +42,113 @@ def getPathPackageINSTALLING() -> Path:
50
42
  pathPackage = pathPackage.parent
51
43
  return pathPackage
52
44
 
45
+ # =============================================================================
46
+ # The Wrong Way: HARDCODED
47
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
48
+
49
+ # from mapFolding.someAssemblyRequired.synthesizeNumbaFlow.theNumbaFlow
50
+ logicalPathModuleDispatcherHARDCODED: str = 'mapFolding.syntheticModules.numbaCount_doTheNeedful'
51
+ callableDispatcherHARDCODED: str = 'doTheNeedful'
52
+ concurrencyPackageHARDCODED = 'multiprocessing'
53
+
54
+ # =============================================================================
53
55
  # The following is an improvement, but it is not the full solution.
54
56
  # I hope that the standardized markers, `metadata={'evaluateWhen': 'packaging'}` will help to automate
55
57
  # whatever needs to happen so that the following is well implemented.
56
58
  @dataclasses.dataclass(frozen=True)
57
59
  class PackageSettings:
58
- concurrencyPackage = Z0Z_concurrencyPackage
60
+
61
+ logicalPathModuleDispatcher: str | None = None
62
+ callableDispatcher: str | None = None
63
+ concurrencyPackage: str |None = None
59
64
  dataclassIdentifier: str = dataclasses.field(default='ComputationState', metadata={'evaluateWhen': 'packaging'})
60
65
  dataclassInstance: str = dataclasses.field(default='state', metadata={'evaluateWhen': 'packaging'})
61
66
  dataclassInstanceTaskDistributionSuffix: str = dataclasses.field(default='Parallel', metadata={'evaluateWhen': 'packaging'})
62
67
  dataclassModule: str = dataclasses.field(default='theSSOT', metadata={'evaluateWhen': 'packaging'})
63
68
  datatypePackage: str = dataclasses.field(default='numpy', metadata={'evaluateWhen': 'packaging'})
64
- dispatcherCallable: str = dataclasses.field(default='doTheNeedful', metadata={'evaluateWhen': 'packaging'})
65
69
  fileExtension: str = dataclasses.field(default='.py', metadata={'evaluateWhen': 'installing'})
66
- moduleOfSyntheticModules: str = dataclasses.field(default='syntheticModules', metadata={'evaluateWhen': 'packaging'})
67
70
  packageName: str = dataclasses.field(default = packageNamePACKAGING, metadata={'evaluateWhen': 'packaging'})
68
71
  pathPackage: Path = dataclasses.field(default_factory=getPathPackageINSTALLING, init=False, metadata={'evaluateWhen': 'installing'})
69
72
  sourceAlgorithm: str = dataclasses.field(default='theDao', metadata={'evaluateWhen': 'packaging'})
73
+ sourceCallableDispatcher: str = dataclasses.field(default='doTheNeedful', metadata={'evaluateWhen': 'packaging'})
74
+ sourceCallableInitialize: str = dataclasses.field(default='countInitialize', metadata={'evaluateWhen': 'packaging'})
75
+ sourceCallableParallel: str = dataclasses.field(default='countParallel', metadata={'evaluateWhen': 'packaging'})
76
+ sourceCallableSequential: str = dataclasses.field(default='countSequential', metadata={'evaluateWhen': 'packaging'})
70
77
  sourceConcurrencyManagerIdentifier: str = dataclasses.field(default='submit', metadata={'evaluateWhen': 'packaging'})
71
78
  sourceConcurrencyManagerNamespace: str = dataclasses.field(default='concurrencyManager', metadata={'evaluateWhen': 'packaging'})
72
- sourceInitializeCallable: str = dataclasses.field(default='countInitialize', metadata={'evaluateWhen': 'packaging'})
73
- sourceParallelCallable: str = dataclasses.field(default='countParallel', metadata={'evaluateWhen': 'packaging'})
74
- sourceSequentialCallable: str = dataclasses.field(default='countSequential', metadata={'evaluateWhen': 'packaging'})
79
+ sourceConcurrencyPackage: str = dataclasses.field(default='multiprocessing', metadata={'evaluateWhen': 'packaging'})
75
80
 
76
81
  @property # These are not fields, and that annoys me.
77
82
  def dataclassInstanceTaskDistribution(self) -> str:
78
- """ Compute the task distribution identifier by concatenating dataclassInstance and dataclassInstanceTaskDistributionSuffix. """
83
+ """ During parallel computation, this identifier helps to create deep copies of the dataclass instance. """
79
84
  # it follows that `metadata={'evaluateWhen': 'packaging'}`
80
85
  return self.dataclassInstance + self.dataclassInstanceTaskDistributionSuffix
81
86
 
82
- @property # These are not fields, and that annoys me.
83
- def logicalPathModuleSourceAlgorithm(self) -> str:
84
- """ Compute the logical path module for the source algorithm by joining packageName and sourceAlgorithm. """
85
- # it follows that `metadata={'evaluateWhen': 'packaging'}`
86
- return '.'.join([self.packageName, self.sourceAlgorithm])
87
-
88
87
  @property # These are not fields, and that annoys me.
89
88
  def logicalPathModuleDataclass(self) -> str:
90
- """ Compute the logical path module for the dataclass by joining packageName and dataclassModule. """
89
+ """ The package.module.name logical path to the dataclass. """
91
90
  # it follows that `metadata={'evaluateWhen': 'packaging'}`
92
91
  return '.'.join([self.packageName, self.dataclassModule])
93
92
 
94
- The = PackageSettings()
93
+ @property # These are not fields, and that annoys me.
94
+ def logicalPathModuleSourceAlgorithm(self) -> str:
95
+ """ The package.module.name logical path to the source algorithm. """
96
+ # it follows that `metadata={'evaluateWhen': 'packaging'}`
97
+ return '.'.join([self.packageName, self.sourceAlgorithm])
95
98
 
99
+ @property # These are not fields, and that annoys me.
100
+ def dispatcher(self) -> Callable[['ComputationState'], 'ComputationState']:
101
+ """ _The_ callable that connects `countFolds` to the logic that does the work."""
102
+ logicalPath: str = self.logicalPathModuleDispatcher or self.logicalPathModuleSourceAlgorithm
103
+ identifier: str = self.callableDispatcher or self.sourceCallableDispatcher
104
+ moduleImported: ModuleType = importlib_import_module(logicalPath)
105
+ return getattr(moduleImported, identifier)
106
+
107
+ The = PackageSettings(logicalPathModuleDispatcher=logicalPathModuleDispatcherHARDCODED, callableDispatcher=callableDispatcherHARDCODED, concurrencyPackage=concurrencyPackageHARDCODED)
108
+
109
+ # To remove this function, I need to learn how to change "conftest.py" to patch this.
110
+ def getPackageDispatcher() -> Callable[['ComputationState'], 'ComputationState']:
111
+ """Get the dispatcher callable for the package.
112
+
113
+ This function retrieves the dispatcher callable for the package based on the
114
+ logical path module and callable dispatcher defined in the PackageSettings.
115
+ """
116
+ return The.dispatcher
96
117
  # =============================================================================
97
118
  # Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
119
+ # Efficient translation of Python scalar types to Numba types https://github.com/hunterhogan/mapFolding/issues/8
98
120
 
99
121
  DatatypeLeavesTotal: TypeAlias = int
100
- # this would be uint8, but mapShape (2,2,2,2, 2,2,2,2) has 256 leaves, so generic containers must accommodate at least 256 leaves
101
- numpyLeavesTotal: TypeAlias = numpy_int16
122
+ NumPyLeavesTotal: TypeAlias = numpy_int16 # this would be uint8, but mapShape (2,2,2,2, 2,2,2,2) has 256 leaves, so generic containers must accommodate at least 256 leaves
102
123
 
103
124
  DatatypeElephino: TypeAlias = int
104
- numpyElephino: TypeAlias = numpy_int16
125
+ NumPyElephino: TypeAlias = numpy_int16
105
126
 
106
127
  DatatypeFoldsTotal: TypeAlias = int
107
- numpyFoldsTotal: TypeAlias = numpy_int64
128
+ NumPyFoldsTotal: TypeAlias = numpy_int64
108
129
 
109
- Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[numpyLeavesTotal]]
110
- Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[numpyLeavesTotal]]
111
- Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[numpyElephino]]
112
- Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[numpyFoldsTotal]]
130
+ Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]]
131
+ Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyLeavesTotal]]
132
+ Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]]
133
+ Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]]
113
134
 
114
135
  @dataclasses.dataclass
115
136
  class ComputationState:
116
- mapShape: tuple[DatatypeLeavesTotal, ...]
137
+ mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'}) # NOTE Python is anti-DRY, again, `DatatypeLeavesTotal` needs to match the type
117
138
  leavesTotal: DatatypeLeavesTotal
118
139
  taskDivisions: DatatypeLeavesTotal
119
140
  concurrencyLimit: DatatypeElephino
120
141
 
121
- connectionGraph: Array3D = dataclasses.field(init=False)
142
+ connectionGraph: Array3D = dataclasses.field(init=False, metadata={'dtype': Array3D.__args__[1].__args__[0]}) # pyright: ignore[reportAttributeAccessIssue]
122
143
  dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
123
144
 
124
- countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
145
+ countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
125
146
  dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[assignment, reportAssignmentType]
126
- gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
127
- gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
128
- leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
129
- leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
130
- foldGroups: Array1DFoldsTotal = dataclasses.field(default=None, init=True) # type: ignore[arg-type, reportAssignmentType]
147
+ gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DElephino.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
148
+ gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
149
+ leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
150
+ leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
151
+ foldGroups: Array1DFoldsTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DFoldsTotal.__args__[1].__args__[0]}) # type: ignore[arg-type, reportAssignmentType]
131
152
 
132
153
  foldsTotal: DatatypeFoldsTotal = DatatypeFoldsTotal(0)
133
154
  gap1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
@@ -139,102 +160,31 @@ class ComputationState:
139
160
  leaf1ndex: DatatypeElephino = DatatypeElephino(1)
140
161
  leafConnectee: DatatypeElephino = DatatypeElephino(0)
141
162
  taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
142
- # Efficient translation of Python scalar types to Numba types https://github.com/hunterhogan/mapFolding/issues/8
143
163
 
144
164
  def __post_init__(self) -> None:
145
165
  from mapFolding.beDRY import makeConnectionGraph, makeDataContainer
146
166
  self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
147
- self.connectionGraph = makeConnectionGraph(self.mapShape, self.leavesTotal, numpyLeavesTotal)
167
+ leavesTotalAsInt = int(self.leavesTotal)
168
+ self.connectionGraph = makeConnectionGraph(self.mapShape, leavesTotalAsInt, self.__dataclass_fields__['connectionGraph'].metadata['dtype'])
148
169
 
149
- if self.dimensionsUnconstrained is None: # pyright: ignore[reportUnnecessaryComparison]
170
+ if self.dimensionsUnconstrained is None:
150
171
  self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal))
151
172
 
152
173
  if self.foldGroups is None:
153
- self.foldGroups = makeDataContainer(max(2, int(self.taskDivisions) + 1), numpyFoldsTotal)
174
+ self.foldGroups = makeDataContainer(max(2, int(self.taskDivisions) + 1), self.__dataclass_fields__['foldGroups'].metadata['dtype'])
154
175
  self.foldGroups[-1] = self.leavesTotal
155
176
 
156
- leavesTotalAsInt = int(self.leavesTotal)
177
+ if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype'])
157
178
 
158
- if self.countDimensionsGapped is None:
159
- self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, numpyLeavesTotal)
160
- if self.gapRangeStart is None:
161
- self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, numpyElephino)
162
- if self.gapsWhere is None:
163
- self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, numpyLeavesTotal)
164
- if self.leafAbove is None:
165
- self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, numpyLeavesTotal)
166
- if self.leafBelow is None:
167
- self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, numpyLeavesTotal)
179
+ if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype'])
180
+ if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype'])
181
+ if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype'])
182
+ if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype'])
168
183
 
169
184
  def getFoldsTotal(self) -> None:
170
185
  self.foldsTotal = DatatypeFoldsTotal(self.foldGroups[0:-1].sum() * self.leavesTotal)
171
186
 
172
- # =============================================================================
173
-
174
- # TODO learn how to see this from the user's perspective
175
- def getPathJobRootDEFAULT() -> Path:
176
- if 'google.colab' in sysModules:
177
- pathJobDEFAULT: Path = Path("/content/drive/MyDrive") / "jobs"
178
- else:
179
- pathJobDEFAULT = The.pathPackage / "jobs"
180
- return pathJobDEFAULT
181
-
182
- _datatypePackage: str = ''
183
- def getDatatypePackage() -> str:
184
- global _datatypePackage
185
- if not _datatypePackage:
186
- _datatypePackage = The.datatypePackage
187
- return _datatypePackage
188
-
189
187
  # =============================================================================
190
188
  # The coping way.
191
189
 
192
190
  class raiseIfNoneGitHubIssueNumber3(Exception): pass
193
-
194
- # =============================================================================
195
- # THIS IS A STUPID SYSTEM BUT I CAN'T FIGURE OUT AN IMPROVEMENT
196
- # NOTE This section for _default_ values probably has value
197
- # https://github.com/hunterhogan/mapFolding/issues/4
198
- theFormatStrModuleSynthetic = "{packageFlow}Count"
199
- theFormatStrModuleForCallableSynthetic = theFormatStrModuleSynthetic + "_{callableTarget}"
200
-
201
- theLogicalPathModuleDispatcher: str = The.logicalPathModuleSourceAlgorithm
202
-
203
- theModuleDispatcherSynthetic: str = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=The.dispatcherCallable)
204
- theLogicalPathModuleDispatcherSynthetic: str = '.'.join([The.packageName, The.moduleOfSyntheticModules, theModuleDispatcherSynthetic])
205
-
206
- if Z0Z_packageFlow == packageFlowSynthetic: # pyright: ignore [reportUnnecessaryComparison]
207
- # NOTE this as a default value _might_ have value
208
- theLogicalPathModuleDispatcher = theLogicalPathModuleDispatcherSynthetic
209
-
210
- # dynamically set the return type https://github.com/hunterhogan/mapFolding/issues/5
211
- def getPackageDispatcher() -> Callable[[ComputationState], ComputationState]:
212
- # NOTE but this part, if the package flow is synthetic, probably needs to be delegated
213
- # to the authority for creating _that_ synthetic flow.
214
-
215
- moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleDispatcher)
216
- dispatcherCallable = getattr(moduleImported, The.dispatcherCallable)
217
- return dispatcherCallable
218
-
219
- """Technical concepts I am likely using and likely want to use more effectively:
220
- - Configuration Registry
221
- - Write-Once, Read-Many (WORM) / Immutable Initialization
222
- - Lazy Initialization
223
- - Separate configuration from business logic
224
-
225
- ----
226
- theSSOT and yourSSOT
227
-
228
- ----
229
- delay realization/instantiation until a concrete value is desired
230
- moment of truth: when the value is needed, not when the value is defined
231
-
232
- ----
233
- 2025 March 11
234
- Note to self: fundamental concept in Python:
235
- Identifiers: scope and resolution, LEGB (Local, Enclosing, Global, Builtin)
236
- - Local: Inside the function
237
- - Enclosing: Inside enclosing functions
238
- - Global: At the uppermost level
239
- - Builtin: Python's built-in names
240
- """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mapFolding
3
- Version: 0.8.2
3
+ Version: 0.8.4
4
4
  Summary: Map folding algorithm with code transformation framework for optimizing numerical computations
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -45,6 +45,7 @@ Requires-Dist: pytest-cov; extra == "testing"
45
45
  Requires-Dist: pytest-env; extra == "testing"
46
46
  Requires-Dist: pytest-xdist; extra == "testing"
47
47
  Requires-Dist: pyupgrade; extra == "testing"
48
+ Requires-Dist: ruff; extra == "testing"
48
49
  Dynamic: license-file
49
50
 
50
51
  # mapFolding: Algorithms for enumerating distinct map/stamp folding patterns 🗺️
@@ -92,7 +93,7 @@ Use `getOEISids` to get the most up-to-date list of available OEIS IDs.
92
93
  (mapFolding) C:\apps\mapFolding> getOEISids
93
94
 
94
95
  Available OEIS sequences:
95
- A001415: Number of ways of folding a 2 X n strip of stamps.
96
+ A001415: Number of ways of folding a 2 X n strip of stamps. (Now extended to n=20!)
96
97
  A001416: Number of ways of folding a 3 X n strip of stamps.
97
98
  A001417: Number of ways of folding a 2 X 2 X ... X 2 n-dimensional map.
98
99
  A001418: Number of ways of folding an n X n sheet of stamps.
@@ -119,6 +120,7 @@ This package offers a comprehensive collection of map folding algorithm implemen
119
120
  - **Performance Optimized**:
120
121
  - Numba-JIT accelerated implementations up to 1000× faster than pure Python (see [benchmarks](https://github.com/hunterhogan/mapFolding/blob/mapFolding/notes/Speed%20highlights.md))
121
122
  - Algorithmic optimizations showcasing subtle yet powerful performance differences (`total_countPlus1vsPlusN.py`)
123
+ - **New Computations**: First-ever calculations for 2×19 and 2×20 maps in the `reference/jobsCompleted/` directory
122
124
 
123
125
  The `reference` directory serves as both a historical archive and an educational resource for understanding algorithm evolution.
124
126
 
@@ -0,0 +1,49 @@
1
+ mapFolding/__init__.py,sha256=z3joPk4hgIbSEsIWGgkOukl-nrgI5u4wg0mRJ7aSRHM,1982
2
+ mapFolding/basecamp.py,sha256=CGJOUE0eQsS4EzHjptzJbxg-Oy4t6TsE7P3vgTRuAww,4763
3
+ mapFolding/beDRY.py,sha256=UhH52BryHQNRjphf_PirtMkV45rhdemdC9PmnpACq7I,9397
4
+ mapFolding/filesystem.py,sha256=JyeFLlkeMqhsYGp80ViwDjrrDgFaTTdGTewLtla-m00,7132
5
+ mapFolding/noHomeYet.py,sha256=UKZeWlyn0SKlF9dhYoud7E6gWXpiSEekZOOoJp88WeI,1362
6
+ mapFolding/oeis.py,sha256=F1tGo2kT9xR3eRaXyxH0bnAWscbn38VS2fIY8-VlLZs,12616
7
+ mapFolding/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ mapFolding/theDao.py,sha256=MVopt1LzhdIQYA97SEoq9bdzct6hbK0lEyPxBAAlVTc,9934
9
+ mapFolding/theSSOT.py,sha256=JCejbNNVP6s9dR2vkdttKXleyZrQroYRWUjGQrXyFaI,11845
10
+ mapFolding/reference/__init__.py,sha256=UIEU8BJR_YDzjFQcLel3XtHzOCJiOUGlGiWzOzbvhik,2206
11
+ mapFolding/reference/flattened.py,sha256=QK1xG9SllqCoi68e86Hyl9d9ATUAAFNpTQI-3zmcp5I,16072
12
+ mapFolding/reference/hunterNumba.py,sha256=espFiX92EPZ1Ub1YQVoBnNYvh2kFg1HR6Qa4djx8Ixg,7253
13
+ mapFolding/reference/irvineJavaPort.py,sha256=UEfIX4QbPLl5jnyfYIyX5YRR3_rYvPUikK8jLehsFko,4076
14
+ mapFolding/reference/jaxCount.py,sha256=TuDNKOnyhQfuixKmIxO9Algv7dvy7KMGhgsV3h96FGE,14853
15
+ mapFolding/reference/lunnanNumpy.py,sha256=mMgrgbrBpe4nmo72ThEI-MGH0OwEHmfMPczSXHp2qKo,4357
16
+ mapFolding/reference/lunnanWhile.py,sha256=ZL8GAQtPs5nJZSgoDl5USrLSS_zs03y98y1Z9E4jOmQ,3799
17
+ mapFolding/reference/rotatedEntryPoint.py,sha256=5ughpKUT2JQhoAKgoDUdYNjgWQYPGV8v-7dWEAdDmfE,10274
18
+ mapFolding/reference/total_countPlus1vsPlusN.py,sha256=yJZAVLVdoXqHag2_N6_6CT-Q6HXBgRro-eny93-Rlpw,9307
19
+ mapFolding/reference/jobsCompleted/__init__.py,sha256=TU93ZGUW1xEkT6d9mQFn_rp5DvRy0ZslEB2Q6MF5ZDc,2596
20
+ mapFolding/reference/jobsCompleted/[2x19]/p2x19.py,sha256=_tvYtfzMWVo2VtUbIAieoscb4N8FFflgTdW4-ljBUuA,19626
21
+ mapFolding/reference/jobsCompleted/p2x19/p2x19.py,sha256=eZEw4Me4ocTt6VXoK2-Sbd5SowZtxRIbN9dZmc7OCVg,6395
22
+ mapFolding/someAssemblyRequired/__init__.py,sha256=v8aBRcRa4d_34SUNu_mZC5K_NC7-buy849k2nnk9lTE,2704
23
+ mapFolding/someAssemblyRequired/_theTypes.py,sha256=nHFgpts-hVy2lNbyvIyP5JeG-ikaIlbRaMUPnEDwzhc,1924
24
+ mapFolding/someAssemblyRequired/_tool_Make.py,sha256=nIAjiq-Q2sBvO_bVEGFh3Z79vXmcqcfJCtuhR4Vyzqw,7081
25
+ mapFolding/someAssemblyRequired/_tool_Then.py,sha256=8ZCI8A6-EtUo02m71h60iJwnWfdwxWiQT0_OWJDFBho,2566
26
+ mapFolding/someAssemblyRequired/_toolboxAntecedents.py,sha256=USqFn2HKiu2aUYUrUI-UVRq2tAvVw2hLtH2uLJ61ZbY,19110
27
+ mapFolding/someAssemblyRequired/_toolboxContainers.py,sha256=jjGyKhblHqDiCzjHRfyqnI2UHNzJTe3aBwNofukcf88,15998
28
+ mapFolding/someAssemblyRequired/_toolboxPython.py,sha256=OSFMNBiyBofzckF6J8OYibm9Zh3GHXB8bJrMggVT8fw,3801
29
+ mapFolding/someAssemblyRequired/getLLVMforNoReason.py,sha256=CDbesDJSQE-P8uznXIAttRw9f413UpUt-RowK38hqbY,2735
30
+ mapFolding/someAssemblyRequired/ingredientsNumba.py,sha256=Qb2WVDv5XszwcQCs3zFrodS7G0GKstSqVrKDXBzdMaw,8128
31
+ mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py,sha256=A9H6gbTNtK0bihxJ98Xv5ePzGeOyYP6e0Fxmj2NTh-M,11186
32
+ mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=_TPANqYkWS4O2IhU4EOatRv-OetfFCfk5b6rrxB5i48,13210
33
+ mapFolding/someAssemblyRequired/transformDataStructures.py,sha256=I4OtFUF7ZNpEzsAbIbcqTifnY1vIwN0v1f-XFQQp3Wk,13558
34
+ mapFolding/someAssemblyRequired/transformationTools.py,sha256=fXr3XKHuFntKLTxoau9ZnbZJRPOG3aS2BhjJvkgRXRs,7932
35
+ mapFolding/syntheticModules/__init__.py,sha256=evVFqhCGa-WZKDiLcnQWjs-Bj34eRnfSLqz_d7dFYZY,83
36
+ mapFolding/syntheticModules/numbaCount_doTheNeedful.py,sha256=3thXThbv2Xo0t_cRGzMbHPFXTBmLClmKejR_Ibu_jOo,15697
37
+ mapfolding-0.8.4.dist-info/licenses/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
38
+ tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ tests/conftest.py,sha256=-5rAnacJAuDcG84jeoQAA35GvUTW6YTcT6E_xU7EXQ0,11587
40
+ tests/test_computations.py,sha256=R5gea8liIE_rBYvgRDIby6GljBazuGgqCeYcqKRjORg,3449
41
+ tests/test_filesystem.py,sha256=RplMT0GULGQxomQSbk5wLlvNsj7ehDlZmzayatJopp4,3150
42
+ tests/test_oeis.py,sha256=uxvwmgbnylSDdsVJfuAT0LuYLbIVFwSgdLxHm-xUGBM,5043
43
+ tests/test_other.py,sha256=AzsCXiX8x5WJ7i0SocWQY6lT30IJg1lKoybx03X2eqU,4281
44
+ tests/test_tasks.py,sha256=hkZygihT8bCEO2zc-2VcxReQrZJBwgLNbYx0YP4lTDg,2853
45
+ mapfolding-0.8.4.dist-info/METADATA,sha256=NMXRV8HTCO8Nab_q1Bo3W6i0eAirJ9K453LRGOv86i8,9326
46
+ mapfolding-0.8.4.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
47
+ mapfolding-0.8.4.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
48
+ mapfolding-0.8.4.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
49
+ mapfolding-0.8.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (77.0.3)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5