mapFolding 0.11.2__py3-none-any.whl → 0.11.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mapFolding/__init__.py CHANGED
@@ -33,29 +33,16 @@ from mapFolding.datatypes import (
33
33
  from mapFolding.theSSOT import PackageSettings as PackageSettings, packageSettings as packageSettings
34
34
 
35
35
  from mapFolding.beDRY import (
36
- ComputationState as ComputationState,
37
36
  getConnectionGraph as getConnectionGraph,
38
37
  getLeavesTotal as getLeavesTotal,
39
38
  getTaskDivisions as getTaskDivisions,
40
39
  makeDataContainer as makeDataContainer,
41
- outfitCountFolds as outfitCountFolds,
42
40
  setProcessorLimit as setProcessorLimit,
43
41
  validateListDimensions as validateListDimensions,
44
42
  )
45
43
 
46
44
  from mapFolding.dataBaskets import MapFoldingState as MapFoldingState
47
45
 
48
- from mapFolding.infoBooth import (
49
- PackageInformation as PackageInformation,
50
- raiseIfNoneGitHubIssueNumber3 as raiseIfNoneGitHubIssueNumber3,
51
- The as The,
52
- )
53
-
54
- from mapFolding.theDao import (
55
- countInitialize as countInitialize,
56
- doTheNeedful as doTheNeedful,
57
- )
58
-
59
46
  from mapFolding.filesystemToolkit import (
60
47
  getFilenameFoldsTotal as getFilenameFoldsTotal,
61
48
  getPathFilenameFoldsTotal as getPathFilenameFoldsTotal,
mapFolding/basecamp.py CHANGED
@@ -12,10 +12,10 @@ appropriate algorithm implementation, and optional persistence of results.
12
12
  from collections.abc import Sequence
13
13
  from mapFolding import (
14
14
  getPathFilenameFoldsTotal,
15
+ packageSettings,
15
16
  saveFoldsTotal,
16
17
  saveFoldsTotalFAILearly,
17
18
  setProcessorLimit,
18
- The,
19
19
  validateListDimensions,
20
20
  )
21
21
  from os import PathLike
@@ -102,8 +102,7 @@ def countFolds(listDimensions: Sequence[int] | None = None
102
102
  # task division instructions ===============================================
103
103
 
104
104
  if computationDivisions:
105
- # NOTE `The.concurrencyPackage`
106
- concurrencyLimit: int = setProcessorLimit(CPUlimit, The.concurrencyPackage)
105
+ concurrencyLimit: int = setProcessorLimit(CPUlimit, packageSettings.concurrencyPackage)
107
106
  from mapFolding.beDRY import getLeavesTotal, getTaskDivisions
108
107
  leavesTotal: int = getLeavesTotal(mapShape)
109
108
  taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
mapFolding/beDRY.py CHANGED
@@ -208,150 +208,6 @@ def makeDataContainer(shape: int | tuple[int, ...], datatype: type[NumPyIntegerT
208
208
  """
209
209
  return numpy.zeros(shape, dtype=datatype)
210
210
 
211
-
212
- @dataclasses.dataclass
213
- class ComputationState:
214
- """
215
- Represents the complete state of a map folding computation.
216
-
217
- This dataclass encapsulates all the information required to compute the number of possible ways to fold a map,
218
- including the map dimensions, leaf connections, computation progress, and fold counting. It serves as the central
219
- data structure that flows through the entire computational algorithm.
220
-
221
- Fields are categorized into:
222
- 1. Input parameters (`mapShape`, `leavesTotal`, etc.).
223
- 2. Core computational structures (`connectionGraph`, etc.).
224
- 3. Tracking variables for the folding algorithm state.
225
- 4. Result accumulation fields (`foldsTotal`, `groupsOfFolds`).
226
- """
227
- # NOTE Python is anti-DRY, again, `DatatypeLeavesTotal` metadata needs to match the type
228
- mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'})
229
- """Dimensions of the map to be folded, as a tuple of integers."""
230
-
231
- leavesTotal: DatatypeLeavesTotal
232
- """Total number of leaves (unit squares) in the map, equal to the product of all dimensions."""
233
-
234
- taskDivisions: DatatypeLeavesTotal
235
- """Number of parallel tasks to divide the computation into. Zero means sequential computation."""
236
-
237
- concurrencyLimit: DatatypeElephino
238
- """Maximum number of concurrent processes to use during computation."""
239
-
240
- connectionGraph: Array3D = dataclasses.field(init=False, metadata={'dtype': Array3D.__args__[1].__args__[0]}) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
241
- """3D array encoding the connections between leaves in all dimensions."""
242
-
243
- dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
244
- """Total number of dimensions in the map shape."""
245
-
246
- # I am using `dataclasses.field` metadata and `typeAlias.__args__[1].__args__[0]` to make the code more DRY. https://github.com/hunterhogan/mapFolding/issues/9
247
- countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
248
- """Tracks how many dimensions are gapped for each leaf."""
249
-
250
- dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
251
- """Number of dimensions that are not constrained in the current folding state."""
252
-
253
- gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DElephino.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
254
- """Starting index for the gap range for each leaf."""
255
-
256
- gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
257
- """Tracks where gaps occur in the folding pattern."""
258
-
259
- leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
260
- """For each leaf, stores the index of the leaf above it in the folding pattern."""
261
-
262
- leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
263
- """For each leaf, stores the index of the leaf below it in the folding pattern."""
264
-
265
- foldGroups: Array1DFoldsTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DFoldsTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
266
- """Accumulator for fold groups across parallel tasks."""
267
-
268
- foldsTotal: DatatypeFoldsTotal = DatatypeFoldsTotal(0)
269
- """The final computed total number of distinct folding patterns."""
270
-
271
- gap1ndex: DatatypeElephino = DatatypeElephino(0)
272
- """Current index into gaps array during algorithm execution."""
273
-
274
- gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0)
275
- """Upper limit for gap index during the current algorithm phase."""
276
-
277
- groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})
278
- """Accumulator for the number of fold groups found during computation."""
279
-
280
- indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
281
- """Current dimension being processed during algorithm execution."""
282
-
283
- indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
284
- """Current leaf index during iteration."""
285
-
286
- indexMiniGap: DatatypeElephino = DatatypeElephino(0)
287
- """Index used when filtering common gaps."""
288
-
289
- leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1)
290
- """Active leaf being processed in the folding algorithm. Starts at 1, not 0."""
291
-
292
- leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
293
- """Leaf that is being connected to the active leaf."""
294
-
295
- taskIndex: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
296
- """Index of the current parallel task when using task divisions."""
297
-
298
- def __post_init__(self) -> None:
299
- from mapFolding.beDRY import getConnectionGraph, makeDataContainer
300
- self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
301
- leavesTotalAsInt = int(self.leavesTotal)
302
- self.connectionGraph = getConnectionGraph(self.mapShape, leavesTotalAsInt, self.__dataclass_fields__['connectionGraph'].metadata['dtype'])
303
-
304
- if self.dimensionsUnconstrained is None: self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal)) # pyright: ignore[reportUnnecessaryComparison]
305
-
306
- if self.foldGroups is None: # pyright: ignore[reportUnnecessaryComparison]
307
- self.foldGroups = makeDataContainer(max(2, int(self.taskDivisions) + 1), self.__dataclass_fields__['foldGroups'].metadata['dtype'])
308
- self.foldGroups[-1] = self.leavesTotal
309
-
310
- # Dataclasses, Default factories, and arguments in `ComputationState` https://github.com/hunterhogan/mapFolding/issues/12
311
- if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
312
-
313
- if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
314
- if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
315
- if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
316
- if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
317
-
318
- # Automatic, or not, calculation in dataclass `ComputationState` https://github.com/hunterhogan/mapFolding/issues/14
319
- def getFoldsTotal(self) -> None:
320
- self.foldsTotal = DatatypeFoldsTotal(self.foldGroups[0:-1].sum() * self.leavesTotal)
321
-
322
- def outfitCountFolds(mapShape: tuple[int, ...], computationDivisions: int | str | None = None, concurrencyLimit: int = 1) -> ComputationState:
323
- """
324
- Initialize a `ComputationState` with validated parameters for map folding calculation.
325
-
326
- This function serves as the central initialization point for creating a properly configured `ComputationState`
327
- object, ensuring consistent calculation of the fundamental parameters (`leavesTotal` and `taskDivisions`) across the
328
- entire package.
329
-
330
- Parameters
331
- ----------
332
- mapShape
333
- A tuple of integers representing the dimensions of the map.
334
- computationDivisions: None
335
- Controls how to divide the computation into parallel tasks. I know it is annoying, but please see
336
- `countFolds` for details, so that you and I both know you have the most accurate information.
337
- concurrencyLimit: 1
338
- Maximum number of concurrent processes to use during computation.
339
-
340
- Returns
341
- -------
342
- computationStateInitialized
343
- A fully initialized `ComputationState` object that's ready for computation.
344
-
345
- Notes
346
- -----
347
- This function maintains the Single Source of Truth principle for `leavesTotal` and `taskDivisions` calculation,
348
- ensuring these values are derived consistently throughout the package.
349
- """
350
- leavesTotal = getLeavesTotal(mapShape)
351
- taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
352
- computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions, concurrencyLimit)
353
- return computationStateInitialized
354
-
355
211
  def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = None) -> int:
356
212
  """
357
213
  Whether and how to limit the CPU usage.
@@ -392,7 +248,8 @@ def setProcessorLimit(CPUlimit: Any | None, concurrencyPackage: str | None = Non
392
248
 
393
249
  match concurrencyPackage:
394
250
  case 'multiprocessing' | None:
395
- # When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
251
+ # When to use multiprocessing.set_start_method
252
+ # https://github.com/hunterhogan/mapFolding/issues/6
396
253
  concurrencyLimit: int = defineConcurrencyLimit(CPUlimit)
397
254
  case 'numba':
398
255
  from numba import get_num_threads, set_num_threads
mapFolding/datatypes.py CHANGED
@@ -1,9 +1,6 @@
1
1
  from numpy import dtype, uint8 as numpy_uint8, uint16 as numpy_uint16, uint64 as numpy_uint64, integer, ndarray
2
2
  from typing import Any, TypeAlias, TypeVar
3
3
 
4
- # =============================================================================
5
- # Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
6
-
7
4
  NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
8
5
 
9
6
  DatatypeLeavesTotal: TypeAlias = int
mapFolding/oeis.py CHANGED
@@ -144,9 +144,6 @@ def _parseBFileOEIS(OEISbFile: str, oeisID: str) -> dict[int, int]:
144
144
  invalid.
145
145
  """
146
146
  bFileLines: list[str] = OEISbFile.strip().splitlines()
147
- # if not bFileLines.pop(0).startswith(f"# {oeisID}"):
148
- # warnings.warn(f"Content does not match sequence {oeisID}")
149
- # return {-1: -1}
150
147
 
151
148
  OEISsequence: dict[int, int] = {}
152
149
  for line in bFileLines:
@@ -1,106 +1,12 @@
1
- from mapFolding import ComputationState, getPathFilenameFoldsTotal, getPathRootJobDEFAULT, MapFoldingState
1
+ from astToolkit import parseLogicalPath2astModule, str_nameDOTname
2
+ from mapFolding import getPathFilenameFoldsTotal, getPathRootJobDEFAULT, MapFoldingState, packageSettings
2
3
  from mapFolding import DatatypeElephino as TheDatatypeElephino, DatatypeFoldsTotal as TheDatatypeFoldsTotal, DatatypeLeavesTotal as TheDatatypeLeavesTotal
3
- from mapFolding.someAssemblyRequired import ShatteredDataclass, ast_Identifier, parseLogicalPath2astModule, parsePathFilename2astModule, str_nameDOTname
4
- from mapFolding.someAssemblyRequired.toolkitNumba import theNumbaFlow
4
+ from mapFolding.someAssemblyRequired import dataclassInstanceIdentifierHardcoded, ShatteredDataclass
5
5
  from mapFolding.someAssemblyRequired.transformationTools import shatter_dataclassesDOTdataclass
6
6
  from pathlib import Path, PurePosixPath
7
7
  from typing import TypeAlias
8
8
  import dataclasses
9
9
 
10
- @dataclasses.dataclass
11
- class RecipeJob:
12
- state: ComputationState
13
- # TODO create function to calculate `foldsTotalEstimated`
14
- foldsTotalEstimated: int = 0
15
- shatteredDataclass: ShatteredDataclass = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
16
-
17
- # ========================================
18
- # Source
19
- source_astModule = parsePathFilename2astModule(theNumbaFlow.pathFilenameSequential)
20
- sourceCountCallable: ast_Identifier = theNumbaFlow.callableSequential
21
-
22
- sourceLogicalPathModuleDataclass: str_nameDOTname = theNumbaFlow.logicalPathModuleDataclass
23
- sourceDataclassIdentifier: ast_Identifier = theNumbaFlow.dataclassIdentifier
24
- sourceDataclassInstance: ast_Identifier = theNumbaFlow.dataclassInstance
25
-
26
- sourcePathPackage: PurePosixPath | None = theNumbaFlow.pathPackage
27
- sourcePackageIdentifier: ast_Identifier | None = theNumbaFlow.packageIdentifier
28
-
29
- # ========================================
30
- # Filesystem (names of physical objects)
31
- pathPackage: PurePosixPath | None = None
32
- pathModule: PurePosixPath | None = PurePosixPath(getPathRootJobDEFAULT())
33
- """ `pathModule` will override `pathPackage` and `logicalPathRoot`."""
34
- fileExtension: str = theNumbaFlow.fileExtension
35
- pathFilenameFoldsTotal: PurePosixPath = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
36
-
37
- # ========================================
38
- # Logical identifiers (as opposed to physical identifiers)
39
- packageIdentifier: ast_Identifier | None = None
40
- logicalPathRoot: str_nameDOTname | None = None
41
- """ `logicalPathRoot` likely corresponds to a physical filesystem directory."""
42
- moduleIdentifier: ast_Identifier = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
43
- countCallable: ast_Identifier = sourceCountCallable
44
- dataclassIdentifier: ast_Identifier | None = sourceDataclassIdentifier
45
- dataclassInstance: ast_Identifier | None = sourceDataclassInstance
46
- logicalPathModuleDataclass: str_nameDOTname | None = sourceLogicalPathModuleDataclass
47
-
48
- # ========================================
49
- # Datatypes
50
- DatatypeFoldsTotal: TypeAlias = TheDatatypeFoldsTotal
51
- DatatypeElephino: TypeAlias = TheDatatypeElephino
52
- DatatypeLeavesTotal: TypeAlias = TheDatatypeLeavesTotal
53
-
54
- def _makePathFilename(self,
55
- pathRoot: PurePosixPath | None = None,
56
- logicalPathINFIX: str_nameDOTname | None = None,
57
- filenameStem: str | None = None,
58
- fileExtension: str | None = None,
59
- ) -> PurePosixPath:
60
- if pathRoot is None:
61
- pathRoot = self.pathPackage or PurePosixPath(Path.cwd())
62
- if logicalPathINFIX:
63
- whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
64
- pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
65
- if filenameStem is None:
66
- filenameStem = self.moduleIdentifier
67
- if fileExtension is None:
68
- fileExtension = self.fileExtension
69
- filename: str = filenameStem + fileExtension
70
- return pathRoot.joinpath(filename)
71
-
72
- @property
73
- def pathFilenameModule(self) -> PurePosixPath:
74
- if self.pathModule is None:
75
- return self._makePathFilename()
76
- else:
77
- return self._makePathFilename(pathRoot=self.pathModule, logicalPathINFIX=None)
78
-
79
- def __post_init__(self):
80
- pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(self.state.mapShape))
81
-
82
- if self.moduleIdentifier is None: # pyright: ignore[reportUnnecessaryComparison]
83
- self.moduleIdentifier = pathFilenameFoldsTotal.stem
84
-
85
- if self.pathFilenameFoldsTotal is None: # pyright: ignore[reportUnnecessaryComparison]
86
- self.pathFilenameFoldsTotal = pathFilenameFoldsTotal
87
-
88
- if self.shatteredDataclass is None and self.logicalPathModuleDataclass and self.dataclassIdentifier and self.dataclassInstance: # pyright: ignore[reportUnnecessaryComparison]
89
- self.shatteredDataclass = shatter_dataclassesDOTdataclass(self.logicalPathModuleDataclass, self.dataclassIdentifier, self.dataclassInstance)
90
-
91
- # ========================================
92
- # Fields you probably don't need =================================
93
- # Dispatcher =================================
94
- sourceDispatcherCallable: ast_Identifier = theNumbaFlow.callableDispatcher
95
- dispatcherCallable: ast_Identifier = sourceDispatcherCallable
96
- # Parallel counting =================================
97
- sourceDataclassInstanceTaskDistribution: ast_Identifier = theNumbaFlow.dataclassInstanceTaskDistribution
98
- sourceConcurrencyManagerNamespace: ast_Identifier = theNumbaFlow.concurrencyManagerNamespace
99
- sourceConcurrencyManagerIdentifier: ast_Identifier = theNumbaFlow.concurrencyManagerIdentifier
100
- dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
101
- concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
102
- concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier
103
-
104
10
  @dataclasses.dataclass
105
11
  class RecipeJobTheorem2Numba:
106
12
  state: MapFoldingState
@@ -111,32 +17,32 @@ class RecipeJobTheorem2Numba:
111
17
  # ========================================
112
18
  # Source
113
19
  source_astModule = parseLogicalPath2astModule('mapFolding.syntheticModules.theorem2Numba')
114
- sourceCountCallable: ast_Identifier = 'count'
20
+ sourceCountCallable: str = 'count'
115
21
 
116
22
  sourceLogicalPathModuleDataclass: str_nameDOTname = 'mapFolding.dataBaskets'
117
- sourceDataclassIdentifier: ast_Identifier = 'MapFoldingState'
118
- sourceDataclassInstance: ast_Identifier = theNumbaFlow.dataclassInstance
23
+ sourceDataclassIdentifier: str = 'MapFoldingState'
24
+ sourceDataclassInstance: str = dataclassInstanceIdentifierHardcoded
119
25
 
120
- sourcePathPackage: PurePosixPath | None = theNumbaFlow.pathPackage
121
- sourcePackageIdentifier: ast_Identifier | None = theNumbaFlow.packageIdentifier
26
+ sourcePathPackage: PurePosixPath | None = PurePosixPath(packageSettings.pathPackage)
27
+ sourcePackageIdentifier: str | None = packageSettings.packageName
122
28
 
123
29
  # ========================================
124
30
  # Filesystem (names of physical objects)
125
31
  pathPackage: PurePosixPath | None = None
126
32
  pathModule: PurePosixPath | None = PurePosixPath(getPathRootJobDEFAULT())
127
33
  """ `pathModule` will override `pathPackage` and `logicalPathRoot`."""
128
- fileExtension: str = theNumbaFlow.fileExtension
34
+ fileExtension: str = packageSettings.fileExtension
129
35
  pathFilenameFoldsTotal: PurePosixPath = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
130
36
 
131
37
  # ========================================
132
38
  # Logical identifiers (as opposed to physical identifiers)
133
- packageIdentifier: ast_Identifier | None = None
39
+ packageIdentifier: str | None = None
134
40
  logicalPathRoot: str_nameDOTname | None = None
135
41
  """ `logicalPathRoot` likely corresponds to a physical filesystem directory."""
136
- moduleIdentifier: ast_Identifier = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
137
- countCallable: ast_Identifier = sourceCountCallable
138
- dataclassIdentifier: ast_Identifier | None = sourceDataclassIdentifier
139
- dataclassInstance: ast_Identifier | None = sourceDataclassInstance
42
+ moduleIdentifier: str = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
43
+ countCallable: str = sourceCountCallable
44
+ dataclassIdentifier: str | None = sourceDataclassIdentifier
45
+ dataclassInstance: str | None = sourceDataclassInstance
140
46
  logicalPathModuleDataclass: str_nameDOTname | None = sourceLogicalPathModuleDataclass
141
47
 
142
48
  # ========================================
@@ -181,16 +87,3 @@ class RecipeJobTheorem2Numba:
181
87
 
182
88
  if self.shatteredDataclass is None and self.logicalPathModuleDataclass and self.dataclassIdentifier and self.dataclassInstance: # pyright: ignore[reportUnnecessaryComparison]
183
89
  self.shatteredDataclass = shatter_dataclassesDOTdataclass(self.logicalPathModuleDataclass, self.dataclassIdentifier, self.dataclassInstance)
184
-
185
- # ========================================
186
- # Fields you probably don't need =================================
187
- # Dispatcher =================================
188
- sourceDispatcherCallable: ast_Identifier = theNumbaFlow.callableDispatcher
189
- dispatcherCallable: ast_Identifier = sourceDispatcherCallable
190
- # Parallel counting =================================
191
- sourceDataclassInstanceTaskDistribution: ast_Identifier = theNumbaFlow.dataclassInstanceTaskDistribution
192
- sourceConcurrencyManagerNamespace: ast_Identifier = theNumbaFlow.concurrencyManagerNamespace
193
- sourceConcurrencyManagerIdentifier: ast_Identifier = theNumbaFlow.concurrencyManagerIdentifier
194
- dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
195
- concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
196
- concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier