mapFolding 0.12.1__py3-none-any.whl → 0.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. mapFolding/__init__.py +46 -20
  2. mapFolding/_theSSOT.py +81 -0
  3. mapFolding/_theTypes.py +148 -0
  4. mapFolding/basecamp.py +62 -47
  5. mapFolding/beDRY.py +100 -73
  6. mapFolding/dataBaskets.py +226 -31
  7. mapFolding/filesystemToolkit.py +161 -107
  8. mapFolding/oeis.py +388 -174
  9. mapFolding/reference/flattened.py +1 -1
  10. mapFolding/someAssemblyRequired/RecipeJob.py +146 -20
  11. mapFolding/someAssemblyRequired/__init__.py +60 -38
  12. mapFolding/someAssemblyRequired/_toolIfThis.py +125 -35
  13. mapFolding/someAssemblyRequired/_toolkitContainers.py +125 -44
  14. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +35 -26
  15. mapFolding/someAssemblyRequired/infoBooth.py +37 -2
  16. mapFolding/someAssemblyRequired/makeAllModules.py +785 -0
  17. mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +161 -74
  18. mapFolding/someAssemblyRequired/toolkitNumba.py +218 -36
  19. mapFolding/someAssemblyRequired/transformationTools.py +125 -58
  20. mapfolding-0.12.3.dist-info/METADATA +163 -0
  21. mapfolding-0.12.3.dist-info/RECORD +53 -0
  22. {mapfolding-0.12.1.dist-info → mapfolding-0.12.3.dist-info}/WHEEL +1 -1
  23. tests/__init__.py +28 -44
  24. tests/conftest.py +66 -61
  25. tests/test_computations.py +64 -89
  26. tests/test_filesystem.py +25 -1
  27. tests/test_oeis.py +37 -7
  28. tests/test_other.py +29 -2
  29. tests/test_tasks.py +30 -2
  30. mapFolding/datatypes.py +0 -18
  31. mapFolding/someAssemblyRequired/Z0Z_makeAllModules.py +0 -433
  32. mapFolding/theSSOT.py +0 -34
  33. mapfolding-0.12.1.dist-info/METADATA +0 -184
  34. mapfolding-0.12.1.dist-info/RECORD +0 -53
  35. {mapfolding-0.12.1.dist-info → mapfolding-0.12.3.dist-info}/entry_points.txt +0 -0
  36. {mapfolding-0.12.1.dist-info → mapfolding-0.12.3.dist-info}/licenses/LICENSE +0 -0
  37. {mapfolding-0.12.1.dist-info → mapfolding-0.12.3.dist-info}/top_level.txt +0 -0
mapFolding/beDRY.py CHANGED
@@ -1,45 +1,50 @@
1
1
  """
2
- Core utility functions implementing DRY (Don't Repeat Yourself) principles for the mapFolding package.
3
-
4
- This module serves as the foundation for consistent data management and parameter validation across the entire
5
- mapFolding computation assembly-line. It provides critical utility functions that:
6
-
7
- 1. Calculate and validate fundamental computational parameters such as leaves total and task divisions.
8
- 2. Generate specialized connection graphs that define the folding algorithm's constraints.
9
- 3. Provide centralized resource allocation and system limits management.
10
- 4. Construct and manage uniform data structures for the computation state.
11
- 5. Ensure parameter validation and safe type conversion.
12
-
13
- The functions in this module maintain a clear separation between data initialization and algorithm implementation,
14
- enabling the package to support multiple computational strategies (sequential, parallel, and JIT-compiled) while
15
- ensuring consistent input handling and state management.
16
-
17
- These utilities form a stable internal API that other modules depend on, particularly theSSOT (Single Source of Truth),
18
- theDao (core algorithm), and the synthetic module generators that produce optimized implementations.
2
+ Core computational utilities implementing Lunnon's map folding algorithm.
3
+
4
+ (AI generated docstring)
5
+
6
+ With the configuration foundation established and the type system defined, this
7
+ module provides the essential building blocks that transform mathematical theory
8
+ into executable computation. These utilities implement the fundamental operations
9
+ required by Lunnon's 1971 algorithm, handling dimension validation, connection
10
+ graph generation, and computational resource management.
11
+
12
+ The connection graph generation represents the mathematical heart of the algorithm,
13
+ calculating how leaves connect across dimensions using coordinate systems, parity
14
+ rules, and boundary conditions. This graph becomes the foundation upon which the
15
+ recursive folding analysis operates. Validation functions ensure computational
16
+ of large-scale problems. Validation functions ensure computational
17
+ integrity, while task division management enables experimental task division strategies.
18
+
19
+ These utilities follow DRY and SSOT principles, providing reusable functions that
20
+ serve as the computational assembly-line components. They prepare the essential
21
+ data structures and computational parameters that the state management system
22
+ requires to orchestrate the complex recursive algorithms.
19
23
  """
24
+
20
25
  from collections.abc import Sequence
21
- from mapFolding import Array1DElephino, Array1DFoldsTotal, Array1DLeavesTotal, Array3D, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal, NumPyIntegerType
26
+ from hunterMakesPy import defineConcurrencyLimit, intInnit, oopsieKwargsie
27
+ from mapFolding import NumPyIntegerType
22
28
  from numpy import dtype as numpy_dtype, int64 as numpy_int64, ndarray
23
29
  from sys import maxsize as sysMaxsize
24
30
  from typing import Any
25
- from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
26
31
  import numpy
27
- import dataclasses
28
32
 
29
33
  def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
30
- """
31
- Calculate the total number of leaves in a map with the given dimensions.
34
+ """Calculate the total number of leaves in a map with the given dimensions.
35
+
36
+ (AI generated docstring)
32
37
 
33
38
  The total number of leaves is the product of all dimensions in the map shape.
34
39
 
35
40
  Parameters
36
41
  ----------
37
- mapShape
42
+ mapShape : tuple[int, ...]
38
43
  A tuple of integers representing the dimensions of the map.
39
44
 
40
45
  Returns
41
46
  -------
42
- leavesTotal
47
+ leavesTotal : int
43
48
  The total number of leaves in the map, calculated as the product of all dimensions.
44
49
 
45
50
  Raises
@@ -47,31 +52,36 @@ def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
47
52
  OverflowError
48
53
  If the product of dimensions would exceed the system's maximum integer size. This check prevents silent numeric
49
54
  overflow issues that could lead to incorrect results.
55
+
50
56
  """
51
57
  productDimensions = 1
52
58
  for dimension in mapShape:
53
59
  # NOTE this check is one-degree short of absurd, but three lines of early absurdity is better than invalid output later. I'd add more checks if I could think of more.
54
60
  if dimension > sysMaxsize // productDimensions:
55
- raise OverflowError(f"I received `{dimension = }` in `{mapShape = }`, but the product of the dimensions exceeds the maximum size of an integer on this system.")
61
+ message = f"I received `{dimension = }` in `{mapShape = }`, but the product of the dimensions exceeds the maximum size of an integer on this system."
62
+ raise OverflowError(message)
56
63
  productDimensions *= dimension
57
64
  return productDimensions
58
65
 
59
66
  def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: int, leavesTotal: int) -> int:
60
- """
61
- Determines whether to divide the computation into tasks and how many divisions.
67
+ """Determine whether to divide the computation into tasks and how many divisions.
68
+
69
+ (AI generated docstring)
62
70
 
63
71
  Parameters
64
72
  ----------
65
- computationDivisions: None
66
- Specifies how to divide computations: Please see the documentation in `countFolds` for details. I know it is
73
+ computationDivisions : int | str | None
74
+ Specifies how to divide computations. Please see the documentation in `countFolds` for details. I know it is
67
75
  annoying, but I want to be sure you have the most accurate information.
68
- concurrencyLimit
76
+ concurrencyLimit : int
69
77
  Maximum number of concurrent tasks allowed.
78
+ leavesTotal : int
79
+ Total number of leaves in the map.
70
80
 
71
81
  Returns
72
82
  -------
73
- taskDivisions
74
- How many tasks must finish before the job can compute the total number of folds; `0` means no tasks, only job.
83
+ taskDivisions : int
84
+ How many tasks must finish before the job can compute the total number of folds. `0` means no tasks, only job.
75
85
 
76
86
  Raises
77
87
  ------
@@ -81,6 +91,7 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
81
91
  Notes
82
92
  -----
83
93
  Task divisions should not exceed total leaves or the folds will be over-counted.
94
+
84
95
  """
85
96
  taskDivisions = 0
86
97
  match computationDivisions:
@@ -96,17 +107,21 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
96
107
  case 'cpu':
97
108
  taskDivisions = min(concurrencyLimit, leavesTotal)
98
109
  case _:
99
- raise ValueError(f"I received '{strComputationDivisions}' for the parameter, `computationDivisions`, but the string value is not supported.")
110
+ message = f"I received '{strComputationDivisions}' for the parameter, `computationDivisions`, but the string value is not supported."
111
+ raise ValueError(message)
100
112
  case _:
101
- raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the type {type(computationDivisions).__name__} is not supported.")
113
+ message = f"I received {computationDivisions} for the parameter, `computationDivisions`, but the type {type(computationDivisions).__name__} is not supported."
114
+ raise ValueError(message)
102
115
 
103
116
  if taskDivisions > leavesTotal:
104
- raise ValueError(f"Problem: `{taskDivisions = }`, is greater than `{leavesTotal = }`, which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`: they are derived from parameters that may or may not be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from my dubious-quality Python code.")
117
+ message = f"Problem: `{taskDivisions = }`, is greater than `{leavesTotal = }`, which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`: they are derived from parameters that may or may not be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from my dubious-quality Python code." # noqa: E501
118
+ raise ValueError(message)
105
119
  return int(max(0, taskDivisions))
106
120
 
107
121
  def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray[tuple[int, int, int], numpy_dtype[numpy_int64]]:
108
- """
109
- Implementation of connection graph generation for map folding.
122
+ """Implement connection graph generation for map folding.
123
+
124
+ (AI generated docstring)
110
125
 
111
126
  This is the internal implementation that calculates all possible connections between leaves in a map folding problem
112
127
  based on Lunnon's algorithm. The function constructs a three-dimensional array representing which leaves can be
@@ -114,14 +129,14 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
114
129
 
115
130
  Parameters
116
131
  ----------
117
- mapShape
132
+ mapShape : tuple[int, ...]
118
133
  A tuple of integers representing the dimensions of the map.
119
- leavesTotal
134
+ leavesTotal : int
120
135
  The total number of leaves in the map.
121
136
 
122
137
  Returns
123
138
  -------
124
- connectionGraph
139
+ connectionGraph : ndarray[tuple[int, int, int], numpy_dtype[numpy_int64]]
125
140
  A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1) where each entry [d,i,j]
126
141
  represents the leaf that would be connected to leaf j when inserting leaf i in dimension d.
127
142
 
@@ -132,9 +147,10 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
132
147
 
133
148
  The algorithm calculates a coordinate system first, then determines connections based on parity rules, boundary
134
149
  conditions, and dimensional constraints.
150
+
135
151
  """
136
152
  dimensionsTotal = len(mapShape)
137
- cumulativeProduct = numpy.multiply.accumulate([1] + list(mapShape), dtype=numpy_int64)
153
+ cumulativeProduct = numpy.multiply.accumulate([1, *list(mapShape)], dtype=numpy_int64)
138
154
  arrayDimensions = numpy.array(mapShape, dtype=numpy_int64)
139
155
  coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=numpy_int64)
140
156
  for indexDimension in range(dimensionsTotal):
@@ -159,8 +175,9 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
159
175
  return connectionGraph
160
176
 
161
177
  def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[NumPyIntegerType]) -> ndarray[tuple[int, int, int], numpy_dtype[NumPyIntegerType]]:
162
- """
163
- Create a properly typed connection graph for the map folding algorithm.
178
+ """Create a properly typed connection graph for the map folding algorithm.
179
+
180
+ (AI generated docstring)
164
181
 
165
182
  This function serves as a typed wrapper around the internal implementation that generates connection graphs. It
166
183
  provides the correct type information for the returned array, ensuring consistency throughout the computation
@@ -168,63 +185,66 @@ def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: ty
168
185
 
169
186
  Parameters
170
187
  ----------
171
- mapShape
188
+ mapShape : tuple[int, ...]
172
189
  A tuple of integers representing the dimensions of the map.
173
- leavesTotal
190
+ leavesTotal : int
174
191
  The total number of leaves in the map.
175
- datatype
192
+ datatype : type[NumPyIntegerType]
176
193
  The NumPy integer type to use for the array elements, ensuring proper memory usage and compatibility with the
177
194
  computation state.
178
195
 
179
196
  Returns
180
197
  -------
181
- connectionGraph
198
+ connectionGraph : ndarray[tuple[int, int, int], numpy_dtype[NumPyIntegerType]]
182
199
  A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1) with the specified `datatype`,
183
200
  representing all possible connections between leaves.
201
+
184
202
  """
185
203
  connectionGraph = _makeConnectionGraph(mapShape, leavesTotal)
186
- connectionGraph = connectionGraph.astype(datatype)
187
- return connectionGraph
204
+ return connectionGraph.astype(datatype)
188
205
 
189
206
  def makeDataContainer(shape: int | tuple[int, ...], datatype: type[NumPyIntegerType]) -> ndarray[Any, numpy_dtype[NumPyIntegerType]]:
190
- """
191
- Create a typed NumPy array container with initialized values.
207
+ """Create a typed NumPy array container with initialized values.
208
+
209
+ (AI generated docstring)
192
210
 
193
211
  This function centralizes the creation of data containers used throughout the computation assembly-line, enabling
194
212
  easy switching between different container types or implementation strategies if needed in the future.
195
213
 
196
214
  Parameters
197
215
  ----------
198
- shape
216
+ shape : int | tuple[int, ...]
199
217
  Either an integer (for 1D arrays) or a tuple of integers (for multi-dimensional arrays) specifying the
200
218
  dimensions of the array.
201
- datatype
219
+ datatype : type[NumPyIntegerType]
202
220
  The NumPy integer type to use for the array elements, ensuring proper type consistency and memory efficiency.
203
221
 
204
222
  Returns
205
223
  -------
206
- container
224
+ container : ndarray[Any, numpy_dtype[NumPyIntegerType]]
207
225
  A NumPy array of zeros with the specified shape and `datatype`.
226
+
208
227
  """
209
228
  return numpy.zeros(shape, dtype=datatype)
210
229
 
211
230
  def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) -> int:
212
- """
213
- Whether and how to limit the CPU usage.
231
+ """Set the CPU usage limit for concurrent operations.
232
+
233
+ (AI generated docstring)
214
234
 
215
235
  Parameters
216
236
  ----------
217
- CPUlimit: None
237
+ CPUlimit : Any | None
218
238
  Please see the documentation for in `countFolds` for details. I know it is annoying, but I want to be sure you
219
239
  have the most accurate information.
220
- concurrencyPackage: None
221
- Specifies which concurrency package to use:
240
+ concurrencyPackage : str | None = None
241
+ Specifies which concurrency package to use.
222
242
  - `None` or `'multiprocessing'`: Uses standard `multiprocessing`.
223
243
  - `'numba'`: Uses Numba's threading system.
224
244
 
225
245
  Returns
226
246
  -------
227
- concurrencyLimit
247
+ concurrencyLimit : int
228
248
  The actual concurrency limit that was set.
229
249
 
230
250
  Raises
@@ -242,6 +262,7 @@ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = Non
242
262
 
243
263
  When using Numba, this function must be called before importing any Numba-jitted function for this processor limit
244
264
  to affect the Numba-jitted function.
265
+
245
266
  """
246
267
  if not (CPUlimit is None or isinstance(CPUlimit, (bool, int, float))):
247
268
  CPUlimit = oopsieKwargsie(CPUlimit)
@@ -249,20 +270,22 @@ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = Non
249
270
  match concurrencyPackage:
250
271
  case 'multiprocessing' | None:
251
272
  # When to use multiprocessing.set_start_method
252
- # https://github.com/hunterhogan/mapFolding/issues/6
253
- concurrencyLimit: int = defineConcurrencyLimit(CPUlimit)
273
+ # https://github.com/hunterhogan/mapFolding/issues/6 # noqa: ERA001
274
+ concurrencyLimit: int = defineConcurrencyLimit(limit=CPUlimit)
254
275
  case 'numba':
255
- from numba import get_num_threads, set_num_threads
256
- concurrencyLimit = defineConcurrencyLimit(CPUlimit, get_num_threads())
276
+ from numba import get_num_threads, set_num_threads # noqa: PLC0415
277
+ concurrencyLimit = defineConcurrencyLimit(limit=CPUlimit, cpuTotal=get_num_threads())
257
278
  set_num_threads(concurrencyLimit)
258
279
  concurrencyLimit = get_num_threads()
259
280
  case _:
260
- raise NotImplementedError(f"I received `{concurrencyPackage = }` but I don't know what to do with that.")
281
+ message = f"I received `{concurrencyPackage = }` but I don't know what to do with that."
282
+ raise NotImplementedError(message)
261
283
  return concurrencyLimit
262
284
 
263
285
  def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
264
- """
265
- Validate and normalize dimensions for a map folding problem.
286
+ """Validate and normalize dimensions for a map folding problem.
287
+
288
+ (AI generated docstring)
266
289
 
267
290
  This function serves as the gatekeeper for dimension inputs, ensuring that all map dimensions provided to the
268
291
  package meet the requirements for valid computation. It performs multiple validation steps and normalizes the
@@ -270,12 +293,12 @@ def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
270
293
 
271
294
  Parameters
272
295
  ----------
273
- listDimensions
296
+ listDimensions : Sequence[int]
274
297
  A sequence of integers representing the dimensions of the map.
275
298
 
276
299
  Returns
277
300
  -------
278
- mapShape
301
+ mapShape : tuple[int, ...]
279
302
  An _unsorted_ tuple of positive integers representing the validated dimensions.
280
303
 
281
304
  Raises
@@ -284,17 +307,21 @@ def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
284
307
  If the input is empty or contains negative values.
285
308
  NotImplementedError
286
309
  If fewer than two positive dimensions are provided.
310
+
287
311
  """
288
312
  if not listDimensions:
289
- raise ValueError("`listDimensions` is a required parameter.")
313
+ message = "`listDimensions` is a required parameter."
314
+ raise ValueError(message)
290
315
  listOFint: list[int] = intInnit(listDimensions, 'listDimensions')
291
316
  mapDimensions: list[int] = []
292
317
  for dimension in listOFint:
293
318
  if dimension <= 0:
294
- raise ValueError(f"I received `{dimension = }` in `{listDimensions = }`, but all dimensions must be a non-negative integer.")
319
+ message = f"I received `{dimension = }` in `{listDimensions = }`, but all dimensions must be a non-negative integer."
320
+ raise ValueError(message)
295
321
  mapDimensions.append(dimension)
296
- if len(mapDimensions) < 2:
297
- raise NotImplementedError(f"This function requires `{listDimensions = }` to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
322
+ if len(mapDimensions) < 2: # noqa: PLR2004
323
+ message = f"This function requires `{listDimensions = }` to have at least two dimensions greater than 0. You may want to look at https://oeis.org/."
324
+ raise NotImplementedError(message)
298
325
 
299
326
  """
300
327
  I previously sorted the dimensions for a few reasons that may or may not be valid:
mapFolding/dataBaskets.py CHANGED
@@ -1,29 +1,103 @@
1
+ """
2
+ Computational state orchestration for map folding analysis.
3
+
4
+ (AI generated docstring)
5
+
6
+ Building upon the core utilities and their generated data structures, this module
7
+ orchestrates the complex computational state required for Lunnon's recursive
8
+ algorithm execution. The state classes serve as both data containers and computational
9
+ interfaces, managing the intricate arrays, indices, and control structures that
10
+ guide the folding pattern discovery process.
11
+
12
+ Each state class encapsulates a specific computational scenario: sequential processing
13
+ for standard analysis, experimental task division for research applications, and specialized
14
+ leaf sequence tracking for mathematical exploration. The automatic initialization
15
+ integrates seamlessly with the type system and core utilities, ensuring proper
16
+ array allocation and connection graph integration.
17
+
18
+ These state management classes bridge the gap between the foundational computational
19
+ building blocks and the persistent storage system. They maintain computational
20
+ integrity throughout the recursive analysis while providing the structured data
21
+ access patterns that enable efficient result persistence and retrieval.
22
+ """
1
23
  from mapFolding import (
2
- Array1DElephino,
3
- Array1DLeavesTotal,
4
- Array3D,
5
- DatatypeElephino,
6
- DatatypeFoldsTotal,
7
- DatatypeLeavesTotal,
8
- getConnectionGraph,
9
- getLeavesTotal,
10
- makeDataContainer,
11
- )
24
+ Array1DElephino, Array1DLeavesTotal, Array3D, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal,
25
+ getConnectionGraph, getLeavesTotal, makeDataContainer)
12
26
  import dataclasses
13
27
 
14
28
  @dataclasses.dataclass
15
29
  class MapFoldingState:
30
+ """Core computational state for map folding algorithms.
31
+
32
+ (AI generated docstring)
33
+
34
+ This class encapsulates all data needed to perform map folding computations,
35
+ from the basic map dimensions through the complex internal arrays needed
36
+ for efficient algorithmic processing. It serves as both a data container
37
+ and a computational interface, providing properties and methods that
38
+ abstract the underlying complexity.
39
+
40
+ The class handles automatic initialization of all computational arrays
41
+ based on the map dimensions, ensuring consistent sizing and type usage
42
+ throughout the computation. It also manages the relationship between
43
+ different data domains (leaves, elephino, folds) defined in the type system.
44
+
45
+ Key Design Features include automatic array sizing based on map dimensions,
46
+ type-safe access to computational data, lazy initialization of expensive arrays,
47
+ integration with NumPy for performance, and metadata preservation for code generation.
48
+
49
+ Attributes
50
+ ----------
51
+ mapShape : tuple[DatatypeLeavesTotal, ...]
52
+ Dimensions of the map being analyzed for folding patterns.
53
+ groupsOfFolds : DatatypeFoldsTotal = DatatypeFoldsTotal(0)
54
+ Current count of distinct folding pattern groups discovered.
55
+ gap1ndex : DatatypeElephino = DatatypeElephino(0)
56
+ Current position in gap enumeration algorithms.
57
+ gap1ndexCeiling : DatatypeElephino = DatatypeElephino(0)
58
+ Upper bound for gap enumeration operations.
59
+ indexDimension : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
60
+ Current dimension being processed in multi-dimensional algorithms.
61
+ indexLeaf : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
62
+ Current leaf being processed in sequential algorithms.
63
+ indexMiniGap : DatatypeElephino = DatatypeElephino(0)
64
+ Current position within a gap subdivision.
65
+ leaf1ndex : DatatypeLeavesTotal = DatatypeLeavesTotal(1)
66
+ One-based leaf index for algorithmic compatibility.
67
+ leafConnectee : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
68
+ Target leaf for connection operations.
69
+ dimensionsUnconstrained : DatatypeLeavesTotal = None
70
+ Count of dimensions not subject to folding constraints.
71
+ countDimensionsGapped : Array1DLeavesTotal = None
72
+ Array tracking gap counts across dimensions.
73
+ gapRangeStart : Array1DElephino = None
74
+ Array of starting positions for gap ranges.
75
+ gapsWhere : Array1DLeavesTotal = None
76
+ Array indicating locations of gaps in the folding pattern.
77
+ leafAbove : Array1DLeavesTotal = None
78
+ Array mapping each leaf to the leaf above it in the folding.
79
+ leafBelow : Array1DLeavesTotal = None
80
+ Array mapping each leaf to the leaf below it in the folding.
81
+ connectionGraph : Array3D
82
+ Three-dimensional representation of leaf connectivity.
83
+ dimensionsTotal : DatatypeLeavesTotal
84
+ Total number of dimensions in the map.
85
+ leavesTotal : DatatypeLeavesTotal
86
+ Total number of individual leaves in the map.
87
+
88
+ """
89
+
16
90
  mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'})
17
91
 
18
92
  groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})
19
93
 
20
- gap1ndex: DatatypeElephino = DatatypeElephino(0)
21
- gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0)
22
- indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
23
- indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
24
- indexMiniGap: DatatypeElephino = DatatypeElephino(0)
25
- leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1)
26
- leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
94
+ gap1ndex: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
95
+ gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
96
+ indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
97
+ indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
98
+ indexMiniGap: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
99
+ leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1) # noqa: RUF009
100
+ leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
27
101
 
28
102
  dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
29
103
 
@@ -36,13 +110,43 @@ class MapFoldingState:
36
110
  connectionGraph: Array3D = dataclasses.field(init=False, metadata={'dtype': Array3D.__args__[1].__args__[0]}) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
37
111
  dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
38
112
  leavesTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
39
-
40
113
  @property
41
114
  def foldsTotal(self) -> DatatypeFoldsTotal:
42
- _foldsTotal = DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
43
- return _foldsTotal
115
+ """Calculate the total number of possible folding patterns for this map.
116
+
117
+ (AI generated docstring)
118
+
119
+ Returns
120
+ -------
121
+ totalFoldingPatterns : DatatypeFoldsTotal
122
+ The complete count of distinct folding patterns achievable with the current map configuration.
123
+
124
+ Notes
125
+ -----
126
+ This represents the fundamental result of map folding analysis - the total
127
+ number of unique ways a map can be folded given its dimensional constraints.
128
+
129
+ """
130
+ return DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
44
131
 
45
132
  def __post_init__(self) -> None:
133
+ """Initialize all computational arrays and derived values after dataclass construction.
134
+
135
+ (AI generated docstring)
136
+
137
+ This method performs the expensive operations needed to prepare the state
138
+ for computation, including array allocation, dimension calculation, and
139
+ connection graph generation. It runs automatically after the dataclass
140
+ constructor completes.
141
+
142
+ Notes
143
+ -----
144
+ Arrays that are not explicitly provided (None) are automatically
145
+ allocated with appropriate sizes based on the map dimensions.
146
+ The connection graph is always regenerated to ensure consistency
147
+ with the provided map shape.
148
+
149
+ """
46
150
  self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
47
151
  self.leavesTotal = DatatypeLeavesTotal(getLeavesTotal(self.mapShape))
48
152
 
@@ -50,33 +154,124 @@ class MapFoldingState:
50
154
 
51
155
  self.connectionGraph = getConnectionGraph(self.mapShape, leavesTotalAsInt, self.__dataclass_fields__['connectionGraph'].metadata['dtype'])
52
156
 
53
- if self.dimensionsUnconstrained is None: self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal)) # pyright: ignore[reportUnnecessaryComparison]
54
- if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
55
- if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
56
- if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
57
- if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
58
- if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
157
+ if self.dimensionsUnconstrained is None: self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal)) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
158
+ if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
159
+ if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
160
+ if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
161
+ if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
162
+ if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison] # noqa: E701
59
163
 
60
164
  @dataclasses.dataclass
61
165
  class ParallelMapFoldingState(MapFoldingState):
62
- taskDivisions: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
63
- """Number of tasks into which to divide the computation. If the value is greater than `leavesTotal`, the computation will be wrong. Default is `leavesTotal`."""
166
+ """Experimental computational state for task division operations.
167
+
168
+ (AI generated docstring)
64
169
 
65
- taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
66
- """Index of the current task when using task divisions."""
170
+ This class extends the base MapFoldingState with additional attributes
171
+ needed for experimental task division of map folding computations. It manages
172
+ task division state while inheriting all the core computational arrays and
173
+ properties from the base class.
174
+
175
+ The task division model attempts to divide the total computation space into
176
+ discrete tasks that can be processed independently, then combined to
177
+ produce the final result. However, the map folding problem is inherently
178
+ sequential and task division typically results in significant computational
179
+ overhead due to work overlap between tasks.
180
+
181
+ Attributes
182
+ ----------
183
+ taskDivisions : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
184
+ Number of tasks into which the computation is divided.
185
+ taskIndex : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
186
+ Current task identifier when processing in task division mode.
187
+
188
+ """
189
+
190
+ taskDivisions: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
191
+ """
192
+ Number of tasks into which to divide the computation.
193
+
194
+ If this value exceeds `leavesTotal`, the computation will produce incorrect
195
+ results. When set to 0 (default), the value is automatically set to
196
+ `leavesTotal` during initialization, providing optimal task granularity.
197
+ """
198
+
199
+ taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
200
+ """
201
+ Index of the current task when using task divisions.
202
+
203
+ This value identifies which specific task is being processed in the
204
+ parallel computation. It ranges from 0 to `taskDivisions - 1` and
205
+ determines which portion of the total computation space this instance
206
+ is responsible for analyzing.
207
+ """
67
208
 
68
209
  def __post_init__(self) -> None:
210
+ """Initialize parallel-specific state after base initialization.
211
+
212
+ (AI generated docstring)
213
+
214
+ This method calls the parent initialization to set up all base
215
+ computational arrays, then configures the task division
216
+ parameters. If `taskDivisions` is 0, it automatically sets the
217
+ value to `leavesTotal` for optimal parallelization.
218
+
219
+ """
69
220
  super().__post_init__()
70
221
  if self.taskDivisions == 0:
71
222
  self.taskDivisions = DatatypeLeavesTotal(int(self.leavesTotal))
72
223
 
73
224
  @dataclasses.dataclass
74
225
  class LeafSequenceState(MapFoldingState):
226
+ """Specialized computational state for tracking leaf sequences during analysis.
227
+
228
+ (AI generated docstring)
229
+
230
+ This class extends the base MapFoldingState with additional capability
231
+ for recording and analyzing the sequence of leaf connections discovered
232
+ during map folding computations. It integrates with the OEIS (Online
233
+ Encyclopedia of Integer Sequences) system to leverage known sequence
234
+ data for optimization and validation.
235
+
236
+ The leaf sequence tracking is particularly valuable for research and
237
+ verification purposes, allowing detailed analysis of how folding patterns
238
+ emerge and enabling comparison with established mathematical sequences.
239
+
240
+ Attributes
241
+ ----------
242
+ leafSequence : Array1DLeavesTotal = None
243
+ Array storing the sequence of leaf connections discovered.
244
+
245
+ """
246
+
75
247
  leafSequence: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
248
+ """
249
+ Array storing the sequence of leaf connections discovered during computation.
250
+
251
+ This array records the order in which leaf connections are established
252
+ during the folding analysis. The sequence provides insights into the
253
+ algorithmic progression and can be compared against known mathematical
254
+ sequences for validation and optimization purposes.
255
+ """
76
256
 
77
257
  def __post_init__(self) -> None:
258
+ """Initialize sequence tracking arrays with OEIS integration.
259
+
260
+ (AI generated docstring)
261
+
262
+ This method performs base initialization then sets up the leaf sequence
263
+ tracking array. It queries the OEIS system for known fold totals
264
+ corresponding to the current map shape, using this information to
265
+ optimally size the sequence tracking array.
266
+
267
+ Notes
268
+ -----
269
+ The sequence array is automatically initialized to record the starting
270
+ leaf connection, providing a foundation for subsequent sequence tracking.
271
+
272
+ """
78
273
  super().__post_init__()
79
- from mapFolding.oeis import getFoldsTotalKnown
274
+ from mapFolding.oeis import getFoldsTotalKnown # noqa: PLC0415
80
275
  groupsOfFoldsKnown = getFoldsTotalKnown(self.mapShape) // self.leavesTotal
81
276
  if self.leafSequence is None: # pyright: ignore[reportUnnecessaryComparison]
82
277
  self.leafSequence = makeDataContainer(groupsOfFoldsKnown, self.__dataclass_fields__['leafSequence'].metadata['dtype'])