mapFolding 0.12.2__py3-none-any.whl → 0.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mapFolding/beDRY.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """
2
2
  Core computational utilities implementing Lunnon's map folding algorithm.
3
3
 
4
+ (AI generated docstring)
5
+
4
6
  With the configuration foundation established and the type system defined, this
5
7
  module provides the essential building blocks that transform mathematical theory
6
8
  into executable computation. These utilities implement the fundamental operations
@@ -21,27 +23,28 @@ requires to orchestrate the complex recursive algorithms.
21
23
  """
22
24
 
23
25
  from collections.abc import Sequence
26
+ from hunterMakesPy import defineConcurrencyLimit, intInnit, oopsieKwargsie
24
27
  from mapFolding import NumPyIntegerType
25
28
  from numpy import dtype as numpy_dtype, int64 as numpy_int64, ndarray
26
29
  from sys import maxsize as sysMaxsize
27
30
  from typing import Any
28
- from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
29
31
  import numpy
30
32
 
31
33
  def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
32
- """
33
- Calculate the total number of leaves in a map with the given dimensions.
34
+ """Calculate the total number of leaves in a map with the given dimensions.
35
+
36
+ (AI generated docstring)
34
37
 
35
38
  The total number of leaves is the product of all dimensions in the map shape.
36
39
 
37
40
  Parameters
38
41
  ----------
39
- mapShape
42
+ mapShape : tuple[int, ...]
40
43
  A tuple of integers representing the dimensions of the map.
41
44
 
42
45
  Returns
43
46
  -------
44
- leavesTotal
47
+ leavesTotal : int
45
48
  The total number of leaves in the map, calculated as the product of all dimensions.
46
49
 
47
50
  Raises
@@ -49,31 +52,36 @@ def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
49
52
  OverflowError
50
53
  If the product of dimensions would exceed the system's maximum integer size. This check prevents silent numeric
51
54
  overflow issues that could lead to incorrect results.
55
+
52
56
  """
53
57
  productDimensions = 1
54
58
  for dimension in mapShape:
55
59
  # NOTE this check is one-degree short of absurd, but three lines of early absurdity is better than invalid output later. I'd add more checks if I could think of more.
56
60
  if dimension > sysMaxsize // productDimensions:
57
- raise OverflowError(f"I received `{dimension = }` in `{mapShape = }`, but the product of the dimensions exceeds the maximum size of an integer on this system.")
61
+ message = f"I received `{dimension = }` in `{mapShape = }`, but the product of the dimensions exceeds the maximum size of an integer on this system."
62
+ raise OverflowError(message)
58
63
  productDimensions *= dimension
59
64
  return productDimensions
60
65
 
61
66
  def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: int, leavesTotal: int) -> int:
62
- """
63
- Determines whether to divide the computation into tasks and how many divisions.
67
+ """Determine whether to divide the computation into tasks and how many divisions.
68
+
69
+ (AI generated docstring)
64
70
 
65
71
  Parameters
66
72
  ----------
67
- computationDivisions: None
68
- Specifies how to divide computations: Please see the documentation in `countFolds` for details. I know it is
73
+ computationDivisions : int | str | None
74
+ Specifies how to divide computations. Please see the documentation in `countFolds` for details. I know it is
69
75
  annoying, but I want to be sure you have the most accurate information.
70
- concurrencyLimit
76
+ concurrencyLimit : int
71
77
  Maximum number of concurrent tasks allowed.
78
+ leavesTotal : int
79
+ Total number of leaves in the map.
72
80
 
73
81
  Returns
74
82
  -------
75
- taskDivisions
76
- How many tasks must finish before the job can compute the total number of folds; `0` means no tasks, only job.
83
+ taskDivisions : int
84
+ How many tasks must finish before the job can compute the total number of folds. `0` means no tasks, only job.
77
85
 
78
86
  Raises
79
87
  ------
@@ -83,6 +91,7 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
83
91
  Notes
84
92
  -----
85
93
  Task divisions should not exceed total leaves or the folds will be over-counted.
94
+
86
95
  """
87
96
  taskDivisions = 0
88
97
  match computationDivisions:
@@ -98,17 +107,21 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
98
107
  case 'cpu':
99
108
  taskDivisions = min(concurrencyLimit, leavesTotal)
100
109
  case _:
101
- raise ValueError(f"I received '{strComputationDivisions}' for the parameter, `computationDivisions`, but the string value is not supported.")
110
+ message = f"I received '{strComputationDivisions}' for the parameter, `computationDivisions`, but the string value is not supported."
111
+ raise ValueError(message)
102
112
  case _:
103
- raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the type {type(computationDivisions).__name__} is not supported.")
113
+ message = f"I received {computationDivisions} for the parameter, `computationDivisions`, but the type {type(computationDivisions).__name__} is not supported."
114
+ raise ValueError(message)
104
115
 
105
116
  if taskDivisions > leavesTotal:
106
- raise ValueError(f"Problem: `{taskDivisions = }`, is greater than `{leavesTotal = }`, which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`: they are derived from parameters that may or may not be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from my dubious-quality Python code.")
117
+ message = f"Problem: `{taskDivisions = }`, is greater than `{leavesTotal = }`, which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`: they are derived from parameters that may or may not be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from my dubious-quality Python code." # noqa: E501
118
+ raise ValueError(message)
107
119
  return int(max(0, taskDivisions))
108
120
 
109
121
  def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray[tuple[int, int, int], numpy_dtype[numpy_int64]]:
110
- """
111
- Implementation of connection graph generation for map folding.
122
+ """Implement connection graph generation for map folding.
123
+
124
+ (AI generated docstring)
112
125
 
113
126
  This is the internal implementation that calculates all possible connections between leaves in a map folding problem
114
127
  based on Lunnon's algorithm. The function constructs a three-dimensional array representing which leaves can be
@@ -116,14 +129,14 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
116
129
 
117
130
  Parameters
118
131
  ----------
119
- mapShape
132
+ mapShape : tuple[int, ...]
120
133
  A tuple of integers representing the dimensions of the map.
121
- leavesTotal
134
+ leavesTotal : int
122
135
  The total number of leaves in the map.
123
136
 
124
137
  Returns
125
138
  -------
126
- connectionGraph
139
+ connectionGraph : ndarray[tuple[int, int, int], numpy_dtype[numpy_int64]]
127
140
  A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1) where each entry [d,i,j]
128
141
  represents the leaf that would be connected to leaf j when inserting leaf i in dimension d.
129
142
 
@@ -134,9 +147,10 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
134
147
 
135
148
  The algorithm calculates a coordinate system first, then determines connections based on parity rules, boundary
136
149
  conditions, and dimensional constraints.
150
+
137
151
  """
138
152
  dimensionsTotal = len(mapShape)
139
- cumulativeProduct = numpy.multiply.accumulate([1] + list(mapShape), dtype=numpy_int64)
153
+ cumulativeProduct = numpy.multiply.accumulate([1, *list(mapShape)], dtype=numpy_int64)
140
154
  arrayDimensions = numpy.array(mapShape, dtype=numpy_int64)
141
155
  coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=numpy_int64)
142
156
  for indexDimension in range(dimensionsTotal):
@@ -161,8 +175,9 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
161
175
  return connectionGraph
162
176
 
163
177
  def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[NumPyIntegerType]) -> ndarray[tuple[int, int, int], numpy_dtype[NumPyIntegerType]]:
164
- """
165
- Create a properly typed connection graph for the map folding algorithm.
178
+ """Create a properly typed connection graph for the map folding algorithm.
179
+
180
+ (AI generated docstring)
166
181
 
167
182
  This function serves as a typed wrapper around the internal implementation that generates connection graphs. It
168
183
  provides the correct type information for the returned array, ensuring consistency throughout the computation
@@ -170,63 +185,66 @@ def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: ty
170
185
 
171
186
  Parameters
172
187
  ----------
173
- mapShape
188
+ mapShape : tuple[int, ...]
174
189
  A tuple of integers representing the dimensions of the map.
175
- leavesTotal
190
+ leavesTotal : int
176
191
  The total number of leaves in the map.
177
- datatype
192
+ datatype : type[NumPyIntegerType]
178
193
  The NumPy integer type to use for the array elements, ensuring proper memory usage and compatibility with the
179
194
  computation state.
180
195
 
181
196
  Returns
182
197
  -------
183
- connectionGraph
198
+ connectionGraph : ndarray[tuple[int, int, int], numpy_dtype[NumPyIntegerType]]
184
199
  A 3D NumPy array with shape (`dimensionsTotal`, `leavesTotal`+1, `leavesTotal`+1) with the specified `datatype`,
185
200
  representing all possible connections between leaves.
201
+
186
202
  """
187
203
  connectionGraph = _makeConnectionGraph(mapShape, leavesTotal)
188
- connectionGraph = connectionGraph.astype(datatype)
189
- return connectionGraph
204
+ return connectionGraph.astype(datatype)
190
205
 
191
206
  def makeDataContainer(shape: int | tuple[int, ...], datatype: type[NumPyIntegerType]) -> ndarray[Any, numpy_dtype[NumPyIntegerType]]:
192
- """
193
- Create a typed NumPy array container with initialized values.
207
+ """Create a typed NumPy array container with initialized values.
208
+
209
+ (AI generated docstring)
194
210
 
195
211
  This function centralizes the creation of data containers used throughout the computation assembly-line, enabling
196
212
  easy switching between different container types or implementation strategies if needed in the future.
197
213
 
198
214
  Parameters
199
215
  ----------
200
- shape
216
+ shape : int | tuple[int, ...]
201
217
  Either an integer (for 1D arrays) or a tuple of integers (for multi-dimensional arrays) specifying the
202
218
  dimensions of the array.
203
- datatype
219
+ datatype : type[NumPyIntegerType]
204
220
  The NumPy integer type to use for the array elements, ensuring proper type consistency and memory efficiency.
205
221
 
206
222
  Returns
207
223
  -------
208
- container
224
+ container : ndarray[Any, numpy_dtype[NumPyIntegerType]]
209
225
  A NumPy array of zeros with the specified shape and `datatype`.
226
+
210
227
  """
211
228
  return numpy.zeros(shape, dtype=datatype)
212
229
 
213
230
  def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) -> int:
214
- """
215
- Whether and how to limit the CPU usage.
231
+ """Set the CPU usage limit for concurrent operations.
232
+
233
+ (AI generated docstring)
216
234
 
217
235
  Parameters
218
236
  ----------
219
- CPUlimit: None
237
+ CPUlimit : Any | None
220
238
  Please see the documentation for in `countFolds` for details. I know it is annoying, but I want to be sure you
221
239
  have the most accurate information.
222
- concurrencyPackage: None
223
- Specifies which concurrency package to use:
240
+ concurrencyPackage : str | None = None
241
+ Specifies which concurrency package to use.
224
242
  - `None` or `'multiprocessing'`: Uses standard `multiprocessing`.
225
243
  - `'numba'`: Uses Numba's threading system.
226
244
 
227
245
  Returns
228
246
  -------
229
- concurrencyLimit
247
+ concurrencyLimit : int
230
248
  The actual concurrency limit that was set.
231
249
 
232
250
  Raises
@@ -244,6 +262,7 @@ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = Non
244
262
 
245
263
  When using Numba, this function must be called before importing any Numba-jitted function for this processor limit
246
264
  to affect the Numba-jitted function.
265
+
247
266
  """
248
267
  if not (CPUlimit is None or isinstance(CPUlimit, (bool, int, float))):
249
268
  CPUlimit = oopsieKwargsie(CPUlimit)
@@ -251,20 +270,22 @@ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = Non
251
270
  match concurrencyPackage:
252
271
  case 'multiprocessing' | None:
253
272
  # When to use multiprocessing.set_start_method
254
- # https://github.com/hunterhogan/mapFolding/issues/6
255
- concurrencyLimit: int = defineConcurrencyLimit(CPUlimit)
273
+ # https://github.com/hunterhogan/mapFolding/issues/6 # noqa: ERA001
274
+ concurrencyLimit: int = defineConcurrencyLimit(limit=CPUlimit)
256
275
  case 'numba':
257
- from numba import get_num_threads, set_num_threads
258
- concurrencyLimit = defineConcurrencyLimit(CPUlimit, get_num_threads())
276
+ from numba import get_num_threads, set_num_threads # noqa: PLC0415
277
+ concurrencyLimit = defineConcurrencyLimit(limit=CPUlimit, cpuTotal=get_num_threads())
259
278
  set_num_threads(concurrencyLimit)
260
279
  concurrencyLimit = get_num_threads()
261
280
  case _:
262
- raise NotImplementedError(f"I received `{concurrencyPackage = }` but I don't know what to do with that.")
281
+ message = f"I received `{concurrencyPackage = }` but I don't know what to do with that."
282
+ raise NotImplementedError(message)
263
283
  return concurrencyLimit
264
284
 
265
285
  def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
266
- """
267
- Validate and normalize dimensions for a map folding problem.
286
+ """Validate and normalize dimensions for a map folding problem.
287
+
288
+ (AI generated docstring)
268
289
 
269
290
  This function serves as the gatekeeper for dimension inputs, ensuring that all map dimensions provided to the
270
291
  package meet the requirements for valid computation. It performs multiple validation steps and normalizes the
@@ -272,12 +293,12 @@ def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
272
293
 
273
294
  Parameters
274
295
  ----------
275
- listDimensions
296
+ listDimensions : Sequence[int]
276
297
  A sequence of integers representing the dimensions of the map.
277
298
 
278
299
  Returns
279
300
  -------
280
- mapShape
301
+ mapShape : tuple[int, ...]
281
302
  An _unsorted_ tuple of positive integers representing the validated dimensions.
282
303
 
283
304
  Raises
@@ -286,17 +307,21 @@ def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
286
307
  If the input is empty or contains negative values.
287
308
  NotImplementedError
288
309
  If fewer than two positive dimensions are provided.
310
+
289
311
  """
290
312
  if not listDimensions:
291
- raise ValueError("`listDimensions` is a required parameter.")
313
+ message = "`listDimensions` is a required parameter."
314
+ raise ValueError(message)
292
315
  listOFint: list[int] = intInnit(listDimensions, 'listDimensions')
293
316
  mapDimensions: list[int] = []
294
317
  for dimension in listOFint:
295
318
  if dimension <= 0:
296
- raise ValueError(f"I received `{dimension = }` in `{listDimensions = }`, but all dimensions must be a non-negative integer.")
319
+ message = f"I received `{dimension = }` in `{listDimensions = }`, but all dimensions must be a non-negative integer."
320
+ raise ValueError(message)
297
321
  mapDimensions.append(dimension)
298
- if len(mapDimensions) < 2:
299
- raise NotImplementedError(f"This function requires `{listDimensions = }` to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
322
+ if len(mapDimensions) < 2: # noqa: PLR2004
323
+ message = f"This function requires `{listDimensions = }` to have at least two dimensions greater than 0. You may want to look at https://oeis.org/."
324
+ raise NotImplementedError(message)
300
325
 
301
326
  """
302
327
  I previously sorted the dimensions for a few reasons that may or may not be valid:
mapFolding/dataBaskets.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """
2
2
  Computational state orchestration for map folding analysis.
3
3
 
4
+ (AI generated docstring)
5
+
4
6
  Building upon the core utilities and their generated data structures, this module
5
7
  orchestrates the complex computational state required for Lunnon's recursive
6
8
  algorithm execution. The state classes serve as both data containers and computational
@@ -19,15 +21,15 @@ integrity throughout the recursive analysis while providing the structured data
19
21
  access patterns that enable efficient result persistence and retrieval.
20
22
  """
21
23
  from mapFolding import (
22
- Array1DElephino, Array1DLeavesTotal, Array3D, DatatypeElephino, DatatypeFoldsTotal,
23
- DatatypeLeavesTotal, getConnectionGraph, getLeavesTotal, makeDataContainer,
24
- )
24
+ Array1DElephino, Array1DLeavesTotal, Array3D, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal,
25
+ getConnectionGraph, getLeavesTotal, makeDataContainer)
25
26
  import dataclasses
26
27
 
27
28
  @dataclasses.dataclass
28
29
  class MapFoldingState:
29
- """
30
- Core computational state for map folding algorithms.
30
+ """Core computational state for map folding algorithms.
31
+
32
+ (AI generated docstring)
31
33
 
32
34
  This class encapsulates all data needed to perform map folding computations,
33
35
  from the basic map dimensions through the complex internal arrays needed
@@ -40,44 +42,62 @@ class MapFoldingState:
40
42
  throughout the computation. It also manages the relationship between
41
43
  different data domains (leaves, elephino, folds) defined in the type system.
42
44
 
43
- Key Design Features:
44
- - Automatic array sizing based on map dimensions
45
- - Type-safe access to computational data
46
- - Lazy initialization of expensive arrays
47
- - Integration with NumPy for performance
48
- - Metadata preservation for code generation
49
-
50
- Attributes:
51
- mapShape: Dimensions of the map being analyzed for folding patterns.
52
- groupsOfFolds: Current count of distinct folding pattern groups discovered.
53
- gap1ndex: Current position in gap enumeration algorithms.
54
- gap1ndexCeiling: Upper bound for gap enumeration operations.
55
- indexDimension: Current dimension being processed in multi-dimensional algorithms.
56
- indexLeaf: Current leaf being processed in sequential algorithms.
57
- indexMiniGap: Current position within a gap subdivision.
58
- leaf1ndex: One-based leaf index for algorithmic compatibility.
59
- leafConnectee: Target leaf for connection operations.
60
- dimensionsUnconstrained: Count of dimensions not subject to folding constraints.
61
- countDimensionsGapped: Array tracking gap counts across dimensions.
62
- gapRangeStart: Array of starting positions for gap ranges.
63
- gapsWhere: Array indicating locations of gaps in the folding pattern.
64
- leafAbove: Array mapping each leaf to the leaf above it in the folding.
65
- leafBelow: Array mapping each leaf to the leaf below it in the folding.
66
- connectionGraph: Three-dimensional representation of leaf connectivity.
67
- dimensionsTotal: Total number of dimensions in the map.
68
- leavesTotal: Total number of individual leaves in the map.
45
+ Key Design Features include automatic array sizing based on map dimensions,
46
+ type-safe access to computational data, lazy initialization of expensive arrays,
47
+ integration with NumPy for performance, and metadata preservation for code generation.
48
+
49
+ Attributes
50
+ ----------
51
+ mapShape : tuple[DatatypeLeavesTotal, ...]
52
+ Dimensions of the map being analyzed for folding patterns.
53
+ groupsOfFolds : DatatypeFoldsTotal = DatatypeFoldsTotal(0)
54
+ Current count of distinct folding pattern groups discovered.
55
+ gap1ndex : DatatypeElephino = DatatypeElephino(0)
56
+ Current position in gap enumeration algorithms.
57
+ gap1ndexCeiling : DatatypeElephino = DatatypeElephino(0)
58
+ Upper bound for gap enumeration operations.
59
+ indexDimension : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
60
+ Current dimension being processed in multi-dimensional algorithms.
61
+ indexLeaf : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
62
+ Current leaf being processed in sequential algorithms.
63
+ indexMiniGap : DatatypeElephino = DatatypeElephino(0)
64
+ Current position within a gap subdivision.
65
+ leaf1ndex : DatatypeLeavesTotal = DatatypeLeavesTotal(1)
66
+ One-based leaf index for algorithmic compatibility.
67
+ leafConnectee : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
68
+ Target leaf for connection operations.
69
+ dimensionsUnconstrained : DatatypeLeavesTotal = None
70
+ Count of dimensions not subject to folding constraints.
71
+ countDimensionsGapped : Array1DLeavesTotal = None
72
+ Array tracking gap counts across dimensions.
73
+ gapRangeStart : Array1DElephino = None
74
+ Array of starting positions for gap ranges.
75
+ gapsWhere : Array1DLeavesTotal = None
76
+ Array indicating locations of gaps in the folding pattern.
77
+ leafAbove : Array1DLeavesTotal = None
78
+ Array mapping each leaf to the leaf above it in the folding.
79
+ leafBelow : Array1DLeavesTotal = None
80
+ Array mapping each leaf to the leaf below it in the folding.
81
+ connectionGraph : Array3D
82
+ Three-dimensional representation of leaf connectivity.
83
+ dimensionsTotal : DatatypeLeavesTotal
84
+ Total number of dimensions in the map.
85
+ leavesTotal : DatatypeLeavesTotal
86
+ Total number of individual leaves in the map.
87
+
69
88
  """
89
+
70
90
  mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'})
71
91
 
72
92
  groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})
73
93
 
74
- gap1ndex: DatatypeElephino = DatatypeElephino(0)
75
- gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0)
76
- indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
77
- indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
78
- indexMiniGap: DatatypeElephino = DatatypeElephino(0)
79
- leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1)
80
- leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
94
+ gap1ndex: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
95
+ gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
96
+ indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
97
+ indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
98
+ indexMiniGap: DatatypeElephino = DatatypeElephino(0) # noqa: RUF009
99
+ leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1) # noqa: RUF009
100
+ leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
81
101
 
82
102
  dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
83
103
 
@@ -92,34 +112,40 @@ class MapFoldingState:
92
112
  leavesTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
93
113
  @property
94
114
  def foldsTotal(self) -> DatatypeFoldsTotal:
95
- """
96
- Calculate the total number of possible folding patterns for this map.
115
+ """Calculate the total number of possible folding patterns for this map.
116
+
117
+ (AI generated docstring)
97
118
 
98
- Returns:
99
- totalFoldingPatterns: The complete count of distinct folding patterns
100
- achievable with the current map configuration.
119
+ Returns
120
+ -------
121
+ totalFoldingPatterns : DatatypeFoldsTotal
122
+ The complete count of distinct folding patterns achievable with the current map configuration.
123
+
124
+ Notes
125
+ -----
126
+ This represents the fundamental result of map folding analysis - the total
127
+ number of unique ways a map can be folded given its dimensional constraints.
101
128
 
102
- Notes:
103
- This represents the fundamental result of map folding analysis - the total
104
- number of unique ways a map can be folded given its dimensional constraints.
105
129
  """
106
- _foldsTotal = DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
107
- return _foldsTotal
130
+ return DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
108
131
 
109
132
  def __post_init__(self) -> None:
110
- """
111
- Initialize all computational arrays and derived values after dataclass construction.
133
+ """Initialize all computational arrays and derived values after dataclass construction.
134
+
135
+ (AI generated docstring)
112
136
 
113
137
  This method performs the expensive operations needed to prepare the state
114
138
  for computation, including array allocation, dimension calculation, and
115
139
  connection graph generation. It runs automatically after the dataclass
116
140
  constructor completes.
117
141
 
118
- Notes:
119
- Arrays that are not explicitly provided (None) are automatically
120
- allocated with appropriate sizes based on the map dimensions.
121
- The connection graph is always regenerated to ensure consistency
122
- with the provided map shape.
142
+ Notes
143
+ -----
144
+ Arrays that are not explicitly provided (None) are automatically
145
+ allocated with appropriate sizes based on the map dimensions.
146
+ The connection graph is always regenerated to ensure consistency
147
+ with the provided map shape.
148
+
123
149
  """
124
150
  self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
125
151
  self.leavesTotal = DatatypeLeavesTotal(getLeavesTotal(self.mapShape))
@@ -137,8 +163,9 @@ class MapFoldingState:
137
163
 
138
164
  @dataclasses.dataclass
139
165
  class ParallelMapFoldingState(MapFoldingState):
140
- """
141
- Experimental computational state for task division operations.
166
+ """Experimental computational state for task division operations.
167
+
168
+ (AI generated docstring)
142
169
 
143
170
  This class extends the base MapFoldingState with additional attributes
144
171
  needed for experimental task division of map folding computations. It manages
@@ -151,11 +178,16 @@ class ParallelMapFoldingState(MapFoldingState):
151
178
  sequential and task division typically results in significant computational
152
179
  overhead due to work overlap between tasks.
153
180
 
154
- Attributes:
155
- taskDivisions: Number of tasks into which the computation is divided.
156
- taskIndex: Current task identifier when processing in task division mode.
181
+ Attributes
182
+ ----------
183
+ taskDivisions : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
184
+ Number of tasks into which the computation is divided.
185
+ taskIndex : DatatypeLeavesTotal = DatatypeLeavesTotal(0)
186
+ Current task identifier when processing in task division mode.
187
+
157
188
  """
158
- taskDivisions: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
189
+
190
+ taskDivisions: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
159
191
  """
160
192
  Number of tasks into which to divide the computation.
161
193
 
@@ -164,7 +196,7 @@ class ParallelMapFoldingState(MapFoldingState):
164
196
  `leavesTotal` during initialization, providing optimal task granularity.
165
197
  """
166
198
 
167
- taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
199
+ taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0) # noqa: RUF009
168
200
  """
169
201
  Index of the current task when using task divisions.
170
202
 
@@ -175,12 +207,15 @@ class ParallelMapFoldingState(MapFoldingState):
175
207
  """
176
208
 
177
209
  def __post_init__(self) -> None:
178
- """
179
- Initialize parallel-specific state after base initialization.
210
+ """Initialize parallel-specific state after base initialization.
211
+
212
+ (AI generated docstring)
213
+
180
214
  This method calls the parent initialization to set up all base
181
215
  computational arrays, then configures the task division
182
216
  parameters. If `taskDivisions` is 0, it automatically sets the
183
217
  value to `leavesTotal` for optimal parallelization.
218
+
184
219
  """
185
220
  super().__post_init__()
186
221
  if self.taskDivisions == 0:
@@ -188,8 +223,9 @@ class ParallelMapFoldingState(MapFoldingState):
188
223
 
189
224
  @dataclasses.dataclass
190
225
  class LeafSequenceState(MapFoldingState):
191
- """
192
- Specialized computational state for tracking leaf sequences during analysis.
226
+ """Specialized computational state for tracking leaf sequences during analysis.
227
+
228
+ (AI generated docstring)
193
229
 
194
230
  This class extends the base MapFoldingState with additional capability
195
231
  for recording and analyzing the sequence of leaf connections discovered
@@ -201,9 +237,13 @@ class LeafSequenceState(MapFoldingState):
201
237
  verification purposes, allowing detailed analysis of how folding patterns
202
238
  emerge and enabling comparison with established mathematical sequences.
203
239
 
204
- Attributes:
205
- leafSequence: Array storing the sequence of leaf connections discovered.
240
+ Attributes
241
+ ----------
242
+ leafSequence : Array1DLeavesTotal = None
243
+ Array storing the sequence of leaf connections discovered.
244
+
206
245
  """
246
+
207
247
  leafSequence: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
208
248
  """
209
249
  Array storing the sequence of leaf connections discovered during computation.
@@ -215,20 +255,23 @@ class LeafSequenceState(MapFoldingState):
215
255
  """
216
256
 
217
257
  def __post_init__(self) -> None:
218
- """
219
- Initialize sequence tracking arrays with OEIS integration.
258
+ """Initialize sequence tracking arrays with OEIS integration.
259
+
260
+ (AI generated docstring)
220
261
 
221
262
  This method performs base initialization then sets up the leaf sequence
222
263
  tracking array. It queries the OEIS system for known fold totals
223
264
  corresponding to the current map shape, using this information to
224
265
  optimally size the sequence tracking array.
225
266
 
226
- Notes:
227
- The sequence array is automatically initialized to record the starting
228
- leaf connection, providing a foundation for subsequent sequence tracking.
267
+ Notes
268
+ -----
269
+ The sequence array is automatically initialized to record the starting
270
+ leaf connection, providing a foundation for subsequent sequence tracking.
271
+
229
272
  """
230
273
  super().__post_init__()
231
- from mapFolding.oeis import getFoldsTotalKnown
274
+ from mapFolding.oeis import getFoldsTotalKnown # noqa: PLC0415
232
275
  groupsOfFoldsKnown = getFoldsTotalKnown(self.mapShape) // self.leavesTotal
233
276
  if self.leafSequence is None: # pyright: ignore[reportUnnecessaryComparison]
234
277
  self.leafSequence = makeDataContainer(groupsOfFoldsKnown, self.__dataclass_fields__['leafSequence'].metadata['dtype'])