mapFolding 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. mapFolding/__init__.py +6 -3
  2. mapFolding/basecamp.py +13 -7
  3. mapFolding/beDRY.py +241 -68
  4. mapFolding/oeis.py +4 -4
  5. mapFolding/reference/hunterNumba.py +1 -1
  6. mapFolding/someAssemblyRequired/__init__.py +40 -20
  7. mapFolding/someAssemblyRequired/_theTypes.py +53 -0
  8. mapFolding/someAssemblyRequired/_tool_Make.py +99 -0
  9. mapFolding/someAssemblyRequired/_tool_Then.py +72 -0
  10. mapFolding/someAssemblyRequired/_toolboxAntecedents.py +358 -0
  11. mapFolding/someAssemblyRequired/_toolboxContainers.py +334 -0
  12. mapFolding/someAssemblyRequired/_toolboxPython.py +62 -0
  13. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +2 -2
  14. mapFolding/someAssemblyRequired/newInliner.py +22 -0
  15. mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +158 -0
  16. mapFolding/someAssemblyRequired/toolboxNumba.py +358 -0
  17. mapFolding/someAssemblyRequired/transformationTools.py +289 -698
  18. mapFolding/syntheticModules/numbaCount_doTheNeedful.py +36 -33
  19. mapFolding/theDao.py +13 -11
  20. mapFolding/theSSOT.py +83 -128
  21. mapFolding/toolboxFilesystem.py +219 -0
  22. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/METADATA +4 -2
  23. mapfolding-0.8.5.dist-info/RECORD +48 -0
  24. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/WHEEL +1 -1
  25. tests/conftest.py +56 -52
  26. tests/test_computations.py +42 -32
  27. tests/test_filesystem.py +4 -4
  28. tests/test_other.py +2 -2
  29. tests/test_tasks.py +2 -2
  30. mapFolding/filesystem.py +0 -129
  31. mapFolding/someAssemblyRequired/ingredientsNumba.py +0 -206
  32. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +0 -211
  33. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +0 -413
  34. mapFolding/someAssemblyRequired/transformDataStructures.py +0 -168
  35. mapfolding-0.8.3.dist-info/RECORD +0 -43
  36. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/entry_points.txt +0 -0
  37. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/licenses/LICENSE +0 -0
  38. {mapfolding-0.8.3.dist-info → mapfolding-0.8.5.dist-info}/top_level.txt +0 -0
mapFolding/__init__.py CHANGED
@@ -10,8 +10,11 @@ transformation tools.
10
10
  Core modules:
11
11
  - basecamp: Public API with simplified interfaces for end users
12
12
  - theDao: Core computational algorithm using a functional state-transformation approach
13
- - beDRY: Utility functions for common operations and parameter management
13
+ - beDRY: Core utility functions implementing consistent data handling, validation, and
14
+ resource management across the package's computational assembly-line
14
15
  - theSSOT: Single Source of Truth for configuration, types, and state management
16
+ - toolboxFilesystem: Cross-platform file management services for storing and retrieving
17
+ computation results with robust error handling and fallback mechanisms
15
18
  - oeis: Interface to the Online Encyclopedia of Integer Sequences for known results
16
19
 
17
20
  Extended functionality:
@@ -24,13 +27,13 @@ Special directories:
24
27
  core algorithm created by the code transformation framework
25
28
  - reference/: Historical implementations and educational resources for algorithm exploration
26
29
  - reference/jobsCompleted/: Contains successful computations for previously unknown values,
27
- including first-ever calculations for 2×19 and 2×20 maps (OEIS A001415)
30
+ including first-ever calculations for 2x19 and 2x20 maps (OEIS A001415)
28
31
 
29
32
  This package strives to balance algorithm readability and understandability with
30
33
  high-performance computation capabilities, allowing users to compute map folding
31
34
  totals for larger dimensions than previously feasible.
32
35
  """
33
- from mapFolding.basecamp import countFolds as countFolds
36
+ from mapFolding.basecamp import countFolds
34
37
  from mapFolding.oeis import clearOEIScache, getOEISids, OEIS_for_n, oeisIDfor_n
35
38
 
36
39
  __all__ = [
mapFolding/basecamp.py CHANGED
@@ -13,14 +13,14 @@ implementation, and optional persistence of results.
13
13
  """
14
14
 
15
15
  from collections.abc import Sequence
16
- from mapFolding.beDRY import outfitCountFolds, setCPUlimit, validateListDimensions
17
- from mapFolding.filesystem import getPathFilenameFoldsTotal, saveFoldsTotal
18
16
  from mapFolding.theSSOT import ComputationState, getPackageDispatcher, The
17
+ from mapFolding.beDRY import outfitCountFolds, setProcessorLimit, validateListDimensions
18
+ from mapFolding.toolboxFilesystem import getPathFilenameFoldsTotal, saveFoldsTotal, saveFoldsTotalFAILearly
19
19
  from os import PathLike
20
- from pathlib import Path
20
+ from pathlib import PurePath
21
21
 
22
22
  def countFolds(listDimensions: Sequence[int]
23
- , pathLikeWriteFoldsTotal: str | PathLike[str] | None = None
23
+ , pathLikeWriteFoldsTotal: PathLike[str] | PurePath | None = None
24
24
  , computationDivisions: int | str | None = None
25
25
  , CPUlimit: int | float | bool | None = None
26
26
  ) -> int:
@@ -54,16 +54,22 @@ def countFolds(listDimensions: Sequence[int]
54
54
  If you want to compute a large `foldsTotal`, dividing the computation into tasks is usually a bad idea. Dividing the algorithm into tasks is inherently inefficient: efficient division into tasks means there would be no overlap in the work performed by each task. When dividing this algorithm, the amount of overlap is between 50% and 90% by all tasks: at least 50% of the work done by every task must be done by _all_ tasks. If you improve the computation time, it will only change by -10 to -50% depending on (at the very least) the ratio of the map dimensions and the number of leaves. If an undivided computation would take 10 hours on your computer, for example, the computation will still take at least 5 hours but you might reduce the time to 9 hours. Most of the time, however, you will increase the computation time. If logicalCores >= leavesTotal, it will probably be faster. If logicalCores <= 2 * leavesTotal, it will almost certainly be slower for all map dimensions.
55
55
  """
56
56
  mapShape: tuple[int, ...] = validateListDimensions(listDimensions)
57
- concurrencyLimit: int = setCPUlimit(CPUlimit, The.concurrencyPackage)
57
+ concurrencyLimit: int = setProcessorLimit(CPUlimit, The.concurrencyPackage)
58
58
  computationStateInitialized: ComputationState = outfitCountFolds(mapShape, computationDivisions, concurrencyLimit)
59
59
 
60
+ if pathLikeWriteFoldsTotal is not None:
61
+ pathFilenameFoldsTotal = getPathFilenameFoldsTotal(computationStateInitialized.mapShape, pathLikeWriteFoldsTotal)
62
+ saveFoldsTotalFAILearly(pathFilenameFoldsTotal)
63
+ else:
64
+ pathFilenameFoldsTotal = None
65
+
60
66
  dispatcherCallableProxy = getPackageDispatcher()
61
67
  computationStateComplete: ComputationState = dispatcherCallableProxy(computationStateInitialized)
68
+ # computationStateComplete: ComputationState = The.dispatcher(computationStateInitialized)
62
69
 
63
70
  computationStateComplete.getFoldsTotal()
64
71
 
65
- if pathLikeWriteFoldsTotal is not None:
66
- pathFilenameFoldsTotal: Path = getPathFilenameFoldsTotal(computationStateComplete.mapShape, pathLikeWriteFoldsTotal)
72
+ if pathFilenameFoldsTotal is not None:
67
73
  saveFoldsTotal(pathFilenameFoldsTotal, computationStateComplete.foldsTotal)
68
74
 
69
75
  return computationStateComplete.foldsTotal
mapFolding/beDRY.py CHANGED
@@ -1,34 +1,60 @@
1
1
  """
2
- Utility functions for maintaining DRY (Don't Repeat Yourself) principles in the mapFolding package.
2
+ Core utility functions implementing DRY (Don't Repeat Yourself) principles for the mapFolding package.
3
3
 
4
- This module provides a collection of helper functions that abstract common operations needed
5
- throughout the package, preventing code duplication and ensuring consistency. The functions
6
- manage core aspects of the computation process, including:
4
+ This module serves as the foundation for consistent data management and parameter validation
5
+ across the entire mapFolding computation assembly-line. It provides critical utility functions that:
7
6
 
8
- 1. Resource allocation and system limits management
9
- 2. Data structure initialization and manipulation
10
- 3. Parameter validation and interpretation
11
- 4. Construction of specialized arrays and matrices for the folding algorithm
7
+ 1. Calculate and validate fundamental computational parameters such as leaves total and task divisions.
8
+ 2. Generate specialized connection graphs that define the folding algorithm's constraints.
9
+ 3. Provide centralized resource allocation and system limits management.
10
+ 4. Construct and manage uniform data structures for the computation state.
11
+ 5. Ensure parameter validation and safe type conversion.
12
12
 
13
- The functions in this module serve as a relatively stable API for other modules to use,
14
- particularly for initializing computation state, validating inputs, and creating data
15
- structures needed by the folding algorithms.
13
+ The functions in this module maintain a clear separation between data initialization and algorithm
14
+ implementation, enabling the package to support multiple computational strategies (sequential,
15
+ parallel, and JIT-compiled) while ensuring consistent input handling and state management.
16
+
17
+ These utilities form a stable internal API that other modules depend on, particularly theSSOT
18
+ (Single Source of Truth), theDao (core algorithm), and the synthetic module generators that
19
+ produce optimized implementations.
16
20
  """
17
21
  from collections.abc import Sequence
18
- from mapFolding.theSSOT import ComputationState
19
- from numpy import dtype as numpy_dtype, integer, ndarray
22
+ from mapFolding.theSSOT import ComputationState, numpyIntegerType
23
+ from numpy import dtype as numpy_dtype, int64 as numpy_int64, ndarray
20
24
  from sys import maxsize as sysMaxsize
21
- from typing import Any, TypeVar
25
+ from typing import Any
22
26
  from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
23
27
  import numpy
24
28
 
25
- numpyIntegerType = TypeVar('numpyIntegerType', bound=integer[Any])
26
-
27
29
  def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
30
+ """
31
+ Calculate the total number of leaves in a map with the given dimensions.
32
+
33
+ The total number of leaves is the product of all dimensions in the map shape.
34
+ This value is foundational for initializing the computation state and determining
35
+ task divisions.
36
+
37
+ Parameters
38
+ ----------
39
+ mapShape
40
+ A tuple of integers representing the dimensions of the map.
41
+
42
+ Returns
43
+ -------
44
+ leavesTotal
45
+ The total number of leaves in the map, calculated as the product of all dimensions.
46
+
47
+ Raises
48
+ ------
49
+ OverflowError
50
+ If the product of dimensions would exceed the system's maximum integer size.
51
+ This check prevents silent numeric overflow issues that could lead to incorrect results.
52
+ """
28
53
  productDimensions = 1
29
54
  for dimension in mapShape:
55
+ # NOTE this check is one-degree short of absurd, but three lines of early absurdity is better than invalid output later. I'd add more checks if I could think of more.
30
56
  if dimension > sysMaxsize // productDimensions:
31
- raise OverflowError(f"I received {dimension=} in {mapShape=}, but the product of the dimensions exceeds the maximum size of an integer on this system.")
57
+ raise OverflowError(f"I received `{dimension = }` in `{mapShape = }`, but the product of the dimensions exceeds the maximum size of an integer on this system.")
32
58
  productDimensions *= dimension
33
59
  return productDimensions
34
60
 
@@ -38,18 +64,14 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
38
64
 
39
65
  Parameters
40
66
  ----------
41
- computationDivisions (None)
67
+ computationDivisions: None
42
68
  Specifies how to divide computations:
43
69
  - `None`: no division of the computation into tasks; sets task divisions to 0.
44
- - int: direct set the number of task divisions; cannot exceed the map's total leaves.
70
+ - int: directly set the number of task divisions; cannot exceed the map's total leaves.
45
71
  - `'maximum'`: divides into `leavesTotal`-many `taskDivisions`.
46
- - `'cpu'`: limits the divisions to the number of available CPUs, i.e. `concurrencyLimit`.
72
+ - `'cpu'`: limits the divisions to the number of available CPUs: i.e., `concurrencyLimit`.
47
73
  concurrencyLimit
48
74
  Maximum number of concurrent tasks allowed.
49
- CPUlimit
50
- for error reporting.
51
- listDimensions
52
- for error reporting.
53
75
 
54
76
  Returns
55
77
  -------
@@ -59,41 +81,74 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
59
81
  Raises
60
82
  ------
61
83
  ValueError
62
- If computationDivisions is an unsupported type or if resulting task divisions exceed total leaves.
84
+ If `computationDivisions` is an unsupported type or if resulting task divisions exceed total leaves.
63
85
 
64
86
  Notes
65
87
  -----
66
88
  Task divisions should not exceed total leaves or the folds will be over-counted.
67
89
  """
68
90
  taskDivisions = 0
69
- if not computationDivisions:
70
- pass
71
- elif isinstance(computationDivisions, int):
72
- taskDivisions = computationDivisions
73
- elif isinstance(computationDivisions, str): # type: ignore
74
- # 'Unnecessary isinstance call; "str" is always an instance of "str", so sayeth Pylance'. Yeah, well "User is not always an instance of "correct input" so sayeth the programmer.
75
- computationDivisions = computationDivisions.lower()
76
- if computationDivisions == 'maximum':
77
- taskDivisions = leavesTotal
78
- elif computationDivisions == 'cpu':
79
- taskDivisions = min(concurrencyLimit, leavesTotal)
80
- else:
81
- raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the so-called programmer didn't implement code for that.")
91
+ match computationDivisions:
92
+ case None | 0 | False:
93
+ pass
94
+ case int() as intComputationDivisions:
95
+ taskDivisions = intComputationDivisions
96
+ case str() as strComputationDivisions:
97
+ strComputationDivisions = strComputationDivisions.lower()
98
+ match strComputationDivisions:
99
+ case 'maximum':
100
+ taskDivisions = leavesTotal
101
+ case 'cpu':
102
+ taskDivisions = min(concurrencyLimit, leavesTotal)
103
+ case _:
104
+ raise ValueError(f"I received '{strComputationDivisions}' for the parameter, `computationDivisions`, but the string value is not supported.")
105
+ case _:
106
+ raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the type {type(computationDivisions).__name__} is not supported.")
82
107
 
83
108
  if taskDivisions > leavesTotal:
84
- raise ValueError(f"Problem: `taskDivisions`, ({taskDivisions}), is greater than `leavesTotal`, ({leavesTotal}), which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`. They are derived from parameters that may or may not still be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from dubious-quality Python code.")
109
+ raise ValueError(f"Problem: `{taskDivisions = }`, is greater than `{leavesTotal = }`, which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`: they are derived from parameters that may or may not be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from my dubious-quality Python code.")
85
110
  return int(max(0, taskDivisions))
86
111
 
87
- def makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[numpyIntegerType]) -> ndarray[tuple[int, int, int], numpy_dtype[numpyIntegerType]]:
112
+ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray[tuple[int, int, int], numpy_dtype[numpy_int64]]:
113
+ """
114
+ Implementation of connection graph generation for map folding.
115
+
116
+ This is the internal implementation that calculates all possible connections between
117
+ leaves in a map folding problem based on Lunnon's algorithm. The function constructs a
118
+ three-dimensional array representing which leaves can be connected to each other for each
119
+ dimension of the map.
120
+
121
+ Parameters
122
+ ----------
123
+ mapShape
124
+ A tuple of integers representing the dimensions of the map.
125
+ leavesTotal
126
+ The total number of leaves in the map.
127
+
128
+ Returns
129
+ -------
130
+ connectionGraph
131
+ A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1)
132
+ where each entry [d,i,j] represents the leaf that would be connected to leaf j
133
+ when inserting leaf i in dimension d.
134
+
135
+ Notes
136
+ -----
137
+ This is an implementation detail and shouldn't be called directly by external code.
138
+ Use `getConnectionGraph` instead, which applies proper typing.
139
+
140
+ The algorithm calculates a coordinate system first, then determines connections
141
+ based on parity rules, boundary conditions, and dimensional constraints.
142
+ """
88
143
  dimensionsTotal = len(mapShape)
89
- cumulativeProduct = numpy.multiply.accumulate([1] + list(mapShape), dtype=datatype)
90
- arrayDimensions = numpy.array(mapShape, dtype=datatype)
91
- coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=datatype)
144
+ cumulativeProduct = numpy.multiply.accumulate([1] + list(mapShape), dtype=numpy_int64)
145
+ arrayDimensions = numpy.array(mapShape, dtype=numpy_int64)
146
+ coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=numpy_int64)
92
147
  for indexDimension in range(dimensionsTotal):
93
148
  for leaf1ndex in range(1, leavesTotal + 1):
94
149
  coordinateSystem[indexDimension, leaf1ndex] = (((leaf1ndex - 1) // cumulativeProduct[indexDimension]) % arrayDimensions[indexDimension] + 1)
95
150
 
96
- connectionGraph = numpy.zeros((dimensionsTotal, leavesTotal + 1, leavesTotal + 1), dtype=datatype)
151
+ connectionGraph = numpy.zeros((dimensionsTotal, leavesTotal + 1, leavesTotal + 1), dtype=numpy_int64)
97
152
  for indexDimension in range(dimensionsTotal):
98
153
  for activeLeaf1ndex in range(1, leavesTotal + 1):
99
154
  for connectee1ndex in range(1, activeLeaf1ndex + 1):
@@ -110,39 +165,132 @@ def makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: t
110
165
  connectionGraph[indexDimension, activeLeaf1ndex, connectee1ndex] = connectee1ndex + cumulativeProduct[indexDimension]
111
166
  return connectionGraph
112
167
 
168
+ def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[numpyIntegerType]) -> ndarray[tuple[int, int, int], numpy_dtype[numpyIntegerType]]:
169
+ """
170
+ Create a properly typed connection graph for the map folding algorithm.
171
+
172
+ This function serves as a typed wrapper around the internal implementation that
173
+ generates connection graphs. It provides the correct type information for the
174
+ returned array, ensuring consistency throughout the computation assembly-line.
175
+
176
+ Parameters
177
+ ----------
178
+ mapShape
179
+ A tuple of integers representing the dimensions of the map.
180
+ leavesTotal
181
+ The total number of leaves in the map.
182
+ datatype
183
+ The NumPy integer type to use for the array elements, ensuring proper
184
+ memory usage and compatibility with the computation state.
185
+
186
+ Returns
187
+ -------
188
+ connectionGraph
189
+ A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1)
190
+ with the specified `datatype`, representing all possible connections between leaves.
191
+ """
192
+ connectionGraph = _makeConnectionGraph(mapShape, leavesTotal)
193
+ connectionGraph = connectionGraph.astype(datatype)
194
+ return connectionGraph
195
+
113
196
  def makeDataContainer(shape: int | tuple[int, ...], datatype: type[numpyIntegerType]) -> ndarray[Any, numpy_dtype[numpyIntegerType]]:
197
+ """
198
+ Create a typed NumPy array container with initialized values.
199
+
200
+ This function centralizes the creation of data containers used throughout the
201
+ computation assembly-line, enabling easy switching between different container types
202
+ or implementation strategies if needed in the future.
203
+
204
+ Parameters
205
+ ----------
206
+ shape
207
+ Either an integer (for 1D arrays) or a tuple of integers (for multi-dimensional arrays)
208
+ specifying the dimensions of the array.
209
+ datatype
210
+ The NumPy integer type to use for the array elements, ensuring proper type
211
+ consistency and memory efficiency.
212
+
213
+ Returns
214
+ -------
215
+ container
216
+ A NumPy array of zeros with the specified shape and `datatype`.
217
+ """
114
218
  return numpy.zeros(shape, dtype=datatype)
115
219
 
116
220
  def outfitCountFolds(mapShape: tuple[int, ...], computationDivisions: int | str | None = None, concurrencyLimit: int = 1) -> ComputationState:
221
+ """
222
+ Initialize a `ComputationState` with validated parameters for map folding calculation.
223
+
224
+ This function serves as the central initialization point for creating a properly
225
+ configured `ComputationState` object, ensuring consistent calculation of the fundamental
226
+ parameters (`leavesTotal` and `taskDivisions`) across the entire package.
227
+
228
+ Parameters
229
+ ----------
230
+ mapShape
231
+ A tuple of integers representing the dimensions of the map.
232
+ computationDivisions: None
233
+ Controls how to divide the computation into parallel tasks. I know it is annoying,
234
+ but please see `getTaskDivisions` for details, so that you and I both know you have the most
235
+ accurate information.
236
+ concurrencyLimit: 1
237
+ Maximum number of concurrent processes to use during computation.
238
+
239
+ Returns
240
+ -------
241
+ computationStateInitialized
242
+ A fully initialized `ComputationState` object that's ready for computation.
243
+
244
+ Notes
245
+ -----
246
+ This function maintains the Single Source of Truth principle for `leavesTotal`
247
+ and `taskDivisions` calculation, ensuring these values are derived consistently
248
+ throughout the package.
249
+ """
117
250
  leavesTotal = getLeavesTotal(mapShape)
118
251
  taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
119
252
  computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions, concurrencyLimit)
120
253
  return computationStateInitialized
121
254
 
122
- def setCPUlimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) -> int:
123
- """Sets CPU limit for concurrent operations.
255
+ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) -> int:
256
+ """
257
+ Sets processor limit for concurrent operations.
124
258
 
125
- If the concurrency is managed by `numba`, the maximum number of CPUs is retrieved from `numba.get_num_threads()` and not by polling the hardware. Therefore, if there are
126
- numba environment variables limiting the number of available CPUs, that will effect this function. That _should_ be a good thing: you control the number of CPUs available
127
- to numba. But if you're not aware of that, you might be surprised by the results.
259
+ Parameters
260
+ ----------
261
+ CPUlimit: None
262
+ Controls processor usage limits:
263
+ - `False`, `None`, or `0`: No limits on processor usage; uses all available processors. All other values will potentially limit processor usage.
264
+ - `True`: Yes, limit the processor usage; limits to 1 processor.
265
+ - Integer `>= 1`: Limits usage to the specified number of processors.
266
+ - Decimal value (`float`) between 0 and 1: Fraction of total processors to use.
267
+ - Decimal value (`float`) between -1 and 0: Fraction of processors to _not_ use.
268
+ - Integer `<= -1`: Subtract the absolute value from total processors.
269
+ concurrencyPackage: None
270
+ Specifies which concurrency package to use:
271
+ - `None` or `'multiprocessing'`: Uses standard `multiprocessing`.
272
+ - `'numba'`: Uses Numba's threading system.
128
273
 
129
- If you are designing custom modules that use numba, note that you must call `numba.set_num_threads()` (i.e., this function) before executing an `import` statement
130
- on a Numba-jitted function. Otherwise, the `numba.set_num_threads()` call will have no effect on the imported function.
274
+ Returns
275
+ -------
276
+ concurrencyLimit
277
+ The actual concurrency limit that was set.
131
278
 
132
- Parameters:
133
- CPUlimit: whether and how to limit the CPU usage. See notes for details.
134
- Returns:
135
- concurrencyLimit: The actual concurrency limit that was set
136
- Raises:
137
- TypeError: If CPUlimit is not of the expected types
279
+ Raises
280
+ ------
281
+ TypeError
282
+ If `CPUlimit` is not of the expected types.
283
+ NotImplementedError
284
+ If `concurrencyPackage` is not supported.
138
285
 
139
- Limits on CPU usage `CPUlimit`:
140
- - `False`, `None`, or `0`: No limits on CPU usage; uses all available CPUs. All other values will potentially limit CPU usage.
141
- - `True`: Yes, limit the CPU usage; limits to 1 CPU.
142
- - Integer `>= 1`: Limits usage to the specified number of CPUs.
143
- - Decimal value (`float`) between 0 and 1: Fraction of total CPUs to use.
144
- - Decimal value (`float`) between -1 and 0: Fraction of CPUs to *not* use.
145
- - Integer `<= -1`: Subtract the absolute value from total CPUs.
286
+ Notes
287
+ -----
288
+ If using `'numba'` as the concurrency package, the maximum number of processors is
289
+ retrieved from `numba.get_num_threads()` rather than by polling the hardware.
290
+ If Numba environment variables limit available processors, that will affect this function.
291
+
292
+ When using Numba, this function must be called before importing any Numba-jitted
293
+ function for this processor limit to affect the Numba-jitted function.
146
294
  """
147
295
  if not (CPUlimit is None or isinstance(CPUlimit, (bool, int, float))):
148
296
  CPUlimit = oopsieKwargsie(CPUlimit)
@@ -157,19 +305,44 @@ def setCPUlimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) ->
157
305
  set_num_threads(concurrencyLimit)
158
306
  concurrencyLimit = get_num_threads()
159
307
  case _:
160
- raise NotImplementedError(f"I received {concurrencyPackage=} but I don't know what to do with that.")
308
+ raise NotImplementedError(f"I received `{concurrencyPackage = }` but I don't know what to do with that.")
161
309
  return concurrencyLimit
162
310
 
163
311
  def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
312
+ """
313
+ Validate and normalize dimensions for a map folding problem.
314
+
315
+ This function serves as the gatekeeper for dimension inputs, ensuring that all
316
+ map dimensions provided to the package meet the requirements for valid computation.
317
+ It performs multiple validation steps and normalizes the dimensions into a consistent format.
318
+
319
+ Parameters
320
+ ----------
321
+ listDimensions
322
+ A sequence of integers representing the dimensions of the map.
323
+
324
+ Returns
325
+ -------
326
+ tuple[int, ...]
327
+ A sorted tuple of positive integers representing the validated dimensions.
328
+
329
+ Raises
330
+ ------
331
+ ValueError
332
+ If the input is empty or contains negative values.
333
+ NotImplementedError
334
+ If fewer than two positive dimensions are provided, as this would not
335
+ represent a valid map folding problem.
336
+ """
164
337
  if not listDimensions:
165
- raise ValueError("listDimensions is a required parameter.")
338
+ raise ValueError("`listDimensions` is a required parameter.")
166
339
  listValidated: list[int] = intInnit(listDimensions, 'listDimensions')
167
340
  listNonNegative: list[int] = []
168
341
  for dimension in listValidated:
169
342
  if dimension < 0:
170
- raise ValueError(f"Dimension {dimension} must be non-negative")
343
+ raise ValueError(f"`{dimension = }` in `{listDimensions = }`, must be a non-negative integer.")
171
344
  listNonNegative.append(dimension)
172
345
  dimensionsValid = [dimension for dimension in listNonNegative if dimension > 0]
173
346
  if len(dimensionsValid) < 2:
174
- raise NotImplementedError(f"This function requires listDimensions, {listDimensions}, to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
347
+ raise NotImplementedError(f"This function requires `{listDimensions = }` to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
175
348
  return tuple(sorted(dimensionsValid))
mapFolding/oeis.py CHANGED
@@ -17,6 +17,7 @@ literature and extend sequences beyond their currently known terms.
17
17
  from collections.abc import Callable
18
18
  from datetime import datetime, timedelta
19
19
  from mapFolding.theSSOT import The
20
+ from mapFolding.toolboxFilesystem import writeStringToHere
20
21
  from pathlib import Path
21
22
  from typing import Any, Final, TYPE_CHECKING
22
23
  import argparse
@@ -31,9 +32,9 @@ import warnings
31
32
  if TYPE_CHECKING:
32
33
  from typing import TypedDict
33
34
  else:
34
- TypedDict = dict
35
+ TypedDict = dict[Any, Any]
35
36
 
36
- cacheDays = 7
37
+ cacheDays = 30
37
38
 
38
39
  """
39
40
  Section: make `settingsOEIS`"""
@@ -174,8 +175,7 @@ def getOEISofficial(pathFilenameCache: pathlib.Path, url: str) -> None | str:
174
175
  if not tryCache:
175
176
  httpResponse: urllib.response.addinfourl = urllib.request.urlopen(url)
176
177
  oeisInformation = httpResponse.read().decode('utf-8')
177
- pathFilenameCache.parent.mkdir(parents=True, exist_ok=True)
178
- pathFilenameCache.write_text(oeisInformation)
178
+ writeStringToHere(oeisInformation, pathFilenameCache)
179
179
 
180
180
  if not oeisInformation:
181
181
  warnings.warn(f"Failed to retrieve OEIS sequence information for {pathFilenameCache.stem}.")
@@ -19,7 +19,7 @@ Performance considerations:
19
19
  - Incorporates lessons from multiple implementation strategies
20
20
 
21
21
  Note: This serves as a reference for manually-optimized code before the development of
22
- the automated transformation pipeline in the main package.
22
+ the automated transformation assembly-line in the main package.
23
23
  """
24
24
 
25
25
  from typing import Any
@@ -14,7 +14,7 @@ Core capabilities:
14
14
  4. Performance Optimization - Apply domain-specific optimizations for numerical computation
15
15
  5. Code Generation - Generate specialized implementations with appropriate imports and syntax
16
16
 
17
- The transformation pipeline supports multiple optimization targets, from general-purpose
17
+ The transformation assembly-line supports multiple optimization targets, from general-purpose
18
18
  acceleration to generating highly-specialized variants optimized for specific input parameters.
19
19
  This multi-level transformation approach allows for both development flexibility and
20
20
  runtime performance, preserving algorithm readability in the source while enabling
@@ -24,23 +24,43 @@ These tools were developed for map folding computation optimization but are desi
24
24
  general-purpose utilities applicable to a wide range of code transformation scenarios,
25
25
  particularly for numerically-intensive algorithms that benefit from just-in-time compilation.
26
26
  """
27
- from mapFolding.someAssemblyRequired.transformationTools import (
28
- ast_Identifier as ast_Identifier,
29
- extractClassDef as extractClassDef,
30
- extractFunctionDef as extractFunctionDef,
31
- ifThis as ifThis,
32
- IngredientsFunction as IngredientsFunction,
33
- IngredientsModule as IngredientsModule,
34
- inlineThisFunctionWithTheseValues as inlineThisFunctionWithTheseValues,
35
- LedgerOfImports as LedgerOfImports,
36
- Make as Make,
37
- makeDictionaryReplacementStatements as makeDictionaryReplacementStatements,
38
- NodeCollector as NodeCollector,
39
- NodeReplacer as NodeReplacer,
40
- RecipeSynthesizeFlow as RecipeSynthesizeFlow,
41
- strDotStrCuzPyStoopid as strDotStrCuzPyStoopid,
42
- Then as Then,
43
- write_astModule as write_astModule,
44
- Z0Z_executeActionUnlessDescendantMatches as Z0Z_executeActionUnlessDescendantMatches,
45
- Z0Z_replaceMatchingASTnodes as Z0Z_replaceMatchingASTnodes,
27
+ from mapFolding.someAssemblyRequired._theTypes import (
28
+ ast_expr_Slice,
29
+ ast_Identifier,
30
+ astClassHasDOTnameNotName,
31
+ astClassHasDOTtarget,
32
+ astClassHasDOTvalue,
33
+ astClassOptionallyHasDOTnameNotName,
34
+ astMosDef,
35
+ Ima_funcTypeUNEDITED,
36
+ Ima_targetTypeUNEDITED,
37
+ ImaAnnotationType,
38
+ ImaAnnotationTypeVar,
39
+ intORlist_ast_type_paramORstr_orNone,
40
+ intORstr_orNone,
41
+ list_ast_type_paramORstr_orNone,
42
+ str_nameDOTname,
43
+ TypeCertified,
44
+ 个,
46
45
  )
46
+
47
+ from mapFolding.someAssemblyRequired._toolboxPython import (
48
+ importLogicalPath2Callable,
49
+ importPathFilename2Callable,
50
+ NodeChanger,
51
+ NodeTourist,
52
+ parseLogicalPath2astModule,
53
+ parsePathFilename2astModule,
54
+ )
55
+
56
+ from mapFolding.someAssemblyRequired._toolboxAntecedents import be, DOT, ifThis, 又
57
+ from mapFolding.someAssemblyRequired._tool_Make import Make
58
+ from mapFolding.someAssemblyRequired._tool_Then import Then
59
+
60
+ from mapFolding.someAssemblyRequired._toolboxContainers import (
61
+ IngredientsFunction,
62
+ IngredientsModule,
63
+ LedgerOfImports,
64
+ RecipeSynthesizeFlow,
65
+ ShatteredDataclass,
66
+ )
@@ -0,0 +1,53 @@
1
+ """It's still wrong, but typing information is being transmitted between functions, methods, and modules."""
2
+ from typing import Any, TYPE_CHECKING, TypeAlias as typing_TypeAlias, TypeVar as typing_TypeVar
3
+ import ast
4
+
5
+ stuPyd: typing_TypeAlias = str
6
+
7
+ if TYPE_CHECKING:
8
+ """ 3.12 new: ast.ParamSpec, ast.type_param, ast.TypeAlias, ast.TypeVar, ast.TypeVarTuple
9
+ 3.11 new: ast.TryStar"""
10
+ astClassHasDOTnameNotName: typing_TypeAlias = ast.alias | ast.AsyncFunctionDef | ast.ClassDef | ast.FunctionDef | ast.ParamSpec | ast.TypeVar | ast.TypeVarTuple
11
+ astClassHasDOTvalue: typing_TypeAlias = ast.AnnAssign | ast.Assign | ast.Attribute | ast.AugAssign | ast.Await | ast.Constant | ast.DictComp | ast.Expr | ast.FormattedValue | ast.keyword | ast.MatchValue | ast.NamedExpr | ast.Return | ast.Starred | ast.Subscript | ast.TypeAlias | ast.Yield | ast.YieldFrom
12
+ else:
13
+ astClassHasDOTnameNotName = stuPyd
14
+ astClassHasDOTvalue = stuPyd
15
+
16
+ astClassOptionallyHasDOTnameNotName: typing_TypeAlias = ast.ExceptHandler | ast.MatchAs | ast.MatchStar
17
+ astClassHasDOTtarget: typing_TypeAlias = ast.AnnAssign | ast.AsyncFor | ast.AugAssign | ast.comprehension | ast.For | ast.NamedExpr
18
+
19
+ ast_expr_Slice: typing_TypeAlias = ast.expr
20
+ ast_Identifier: typing_TypeAlias = str
21
+ intORlist_ast_type_paramORstr_orNone: typing_TypeAlias = Any
22
+ intORstr_orNone: typing_TypeAlias = Any
23
+ list_ast_type_paramORstr_orNone: typing_TypeAlias = Any
24
+ str_nameDOTname: typing_TypeAlias = stuPyd
25
+ ImaAnnotationType: typing_TypeAlias = ast.Attribute | ast.Constant | ast.Name | ast.Subscript
26
+ ImaAnnotationTypeVar = typing_TypeVar('ImaAnnotationTypeVar', ast.Attribute, ast.Constant, ast.Name, ast.Subscript)
27
+
28
+ Ima_funcTypeUNEDITED: typing_TypeAlias = ast.Attribute | ast.Await | ast.BinOp | ast.BoolOp | ast.Call | ast.Compare | ast.Constant | ast.Dict | ast.DictComp | ast.FormattedValue | ast.GeneratorExp | ast.IfExp | ast.JoinedStr | ast.Lambda | ast.List | ast.ListComp | ast.Name | ast.NamedExpr | ast.Set | ast.SetComp | ast.Slice | ast.Starred | ast.Subscript | ast.Tuple | ast.UnaryOp | ast.Yield | ast.YieldFrom
29
+ Ima_targetTypeUNEDITED: typing_TypeAlias = ast.AST
30
+
31
+ # TODO understand whatever the fuck `typing.TypeVar` is _supposed_ to fucking do.
32
+ TypeCertified = typing_TypeVar('TypeCertified', bound = ast.AST, covariant=True)
33
+ astMosDef = typing_TypeVar('astMosDef', bound=astClassHasDOTnameNotName)
34
+
35
+ 个 = typing_TypeVar('个', bound= ast.AST | ast_Identifier, covariant=True)
36
+
37
+ Ima_ast_boolop: typing_TypeAlias = ast.boolop | ast.And | ast.Or
38
+ Ima_ast_cmpop: typing_TypeAlias = ast.cmpop | ast.Eq | ast.NotEq | ast.Lt | ast.LtE | ast.Gt | ast.GtE | ast.Is | ast.IsNot | ast.In | ast.NotIn
39
+ Ima_ast_excepthandler: typing_TypeAlias = ast.excepthandler | ast.ExceptHandler
40
+ Ima_ast_expr_context: typing_TypeAlias = ast.expr_context | ast.Load | ast.Store | ast.Del
41
+ Ima_ast_expr: typing_TypeAlias = ast.expr | ast.Attribute | ast.Await | ast.BinOp | ast.BoolOp | ast.Call | ast.Compare | ast.Constant | ast.Dict | ast.DictComp | ast.FormattedValue | ast.GeneratorExp | ast.IfExp | ast.JoinedStr | ast.Lambda | ast.List | ast.ListComp | ast.Name | ast.NamedExpr | ast.Set | ast.SetComp | ast.Slice | ast.Starred | ast.Subscript | ast.Tuple | ast.UnaryOp | ast.Yield | ast.YieldFrom
42
+ Ima_ast_mod: typing_TypeAlias = ast.mod | ast.Expression | ast.FunctionType | ast.Interactive | ast.Module
43
+ Ima_ast_operator: typing_TypeAlias = ast.operator | ast.Add | ast.Sub | ast.Mult | ast.MatMult | ast.Div | ast.Mod | ast.Pow | ast.LShift | ast.RShift | ast.BitOr | ast.BitXor | ast.BitAnd | ast.FloorDiv
44
+ Ima_ast_orphan = ast.alias | ast.arg | ast.arguments | ast.comprehension | ast.keyword | ast.match_case | ast.withitem
45
+ iMa_ast_pattern: typing_TypeAlias = ast.pattern | ast.MatchAs | ast.MatchClass | ast.MatchMapping | ast.MatchOr | ast.MatchSequence | ast.MatchSingleton | ast.MatchStar | ast.MatchValue
46
+ Ima_ast_type_ignore: typing_TypeAlias = ast.type_ignore | ast.TypeIgnore
47
+ Ima_ast_unaryop: typing_TypeAlias = ast.unaryop | ast.Invert | ast.Not | ast.UAdd | ast.USub
48
+ if TYPE_CHECKING:
49
+ Ima_ast_stmt: typing_TypeAlias = ast.stmt | ast.AnnAssign | ast.Assert | ast.Assign | ast.AsyncFor | ast.AsyncFunctionDef | ast.AsyncWith | ast.AugAssign | ast.Break | ast.ClassDef | ast.Continue | ast.Delete | ast.Expr | ast.For | ast.FunctionDef | ast.Global | ast.If | ast.Import | ast.ImportFrom | ast.Match | ast.Nonlocal | ast.Pass | ast.Raise | ast.Return | ast.Try | ast.TryStar | ast.TypeAlias | ast.While | ast.With
50
+ Ima_ast_type_param: typing_TypeAlias = ast.type_param | ast.ParamSpec | ast.TypeVar | ast.TypeVarTuple
51
+ else:
52
+ Ima_ast_stmt = stuPyd
53
+ Ima_ast_type_param = stuPyd