mapFolding 0.8.0__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. mapFolding/__init__.py +33 -4
  2. mapFolding/basecamp.py +16 -2
  3. mapFolding/beDRY.py +40 -32
  4. mapFolding/filesystem.py +124 -90
  5. mapFolding/noHomeYet.py +12 -0
  6. mapFolding/oeis.py +18 -3
  7. mapFolding/reference/__init__.py +38 -0
  8. mapFolding/reference/flattened.py +66 -47
  9. mapFolding/reference/hunterNumba.py +28 -4
  10. mapFolding/reference/irvineJavaPort.py +13 -1
  11. mapFolding/reference/{jax.py → jaxCount.py} +46 -27
  12. mapFolding/reference/lunnanNumpy.py +19 -5
  13. mapFolding/reference/lunnanWhile.py +19 -7
  14. mapFolding/reference/rotatedEntryPoint.py +20 -3
  15. mapFolding/reference/total_countPlus1vsPlusN.py +226 -203
  16. mapFolding/someAssemblyRequired/__init__.py +29 -0
  17. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +32 -14
  18. mapFolding/someAssemblyRequired/ingredientsNumba.py +22 -1
  19. mapFolding/someAssemblyRequired/synthesizeNumbaFlow.py +193 -0
  20. mapFolding/someAssemblyRequired/synthesizeNumbaJobVESTIGIAL.py +3 -4
  21. mapFolding/someAssemblyRequired/transformDataStructures.py +168 -0
  22. mapFolding/someAssemblyRequired/transformationTools.py +233 -225
  23. mapFolding/theDao.py +19 -5
  24. mapFolding/theSSOT.py +89 -122
  25. mapfolding-0.8.2.dist-info/METADATA +187 -0
  26. mapfolding-0.8.2.dist-info/RECORD +39 -0
  27. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/WHEEL +1 -1
  28. tests/conftest.py +43 -33
  29. tests/test_computations.py +7 -7
  30. tests/test_other.py +2 -2
  31. mapFolding/reference/lunnan.py +0 -153
  32. mapFolding/someAssemblyRequired/Z0Z_workbench.py +0 -350
  33. mapFolding/someAssemblyRequired/synthesizeDataConverters.py +0 -117
  34. mapFolding/syntheticModules/numbaCountHistoricalExample.py +0 -158
  35. mapFolding/syntheticModules/numba_doTheNeedfulHistoricalExample.py +0 -13
  36. mapfolding-0.8.0.dist-info/METADATA +0 -157
  37. mapfolding-0.8.0.dist-info/RECORD +0 -41
  38. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/entry_points.txt +0 -0
  39. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info/licenses}/LICENSE +0 -0
  40. {mapfolding-0.8.0.dist-info → mapfolding-0.8.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,193 @@
1
+ """
2
+ Orchestrator for generating Numba-optimized versions of the map folding algorithm.
3
+
4
+ This module transforms the pure Python implementation of the map folding algorithm
5
+ into a highly-optimized Numba implementation. It serves as the high-level coordinator
6
+ for the code transformation process, orchestrating the following steps:
7
+
8
+ 1. Extracting the core algorithm functions from the source implementation
9
+ 2. Transforming function signatures and state handling for Numba compatibility
10
+ 3. Converting state-based operations to direct primitive operations
11
+ 4. Applying Numba decorators with appropriate optimization parameters
12
+ 5. Managing imports and dependencies for the generated code
13
+ 6. Assembling and writing the transformed implementation
14
+
15
+ The transformation process preserves the algorithm's logic while dramatically improving
16
+ performance by leveraging Numba's just-in-time compilation capabilities. This module
17
+ depends on the abstract transformation tools, dataclass handling utilities, and
18
+ Numba-specific optimization configurations from other modules in the package.
19
+
20
+ The primary entry point is the makeNumbaFlow function, which can be executed directly
21
+ to generate a fresh optimized implementation.
22
+ """
23
+
24
+ from mapFolding.someAssemblyRequired import (
25
+ extractFunctionDef,
26
+ ifThis,
27
+ IngredientsFunction,
28
+ IngredientsModule,
29
+ LedgerOfImports,
30
+ Make,
31
+ makeDictionaryReplacementStatements,
32
+ NodeCollector,
33
+ NodeReplacer,
34
+ RecipeSynthesizeFlow,
35
+ Then,
36
+ write_astModule,
37
+ Z0Z_replaceMatchingASTnodes,
38
+ inlineThisFunctionWithTheseValues,
39
+ )
40
+ from mapFolding.someAssemblyRequired.ingredientsNumba import decorateCallableWithNumba
41
+ from mapFolding.someAssemblyRequired.transformDataStructures import shatter_dataclassesDOTdataclass
42
+ from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
43
+ import ast
44
+
45
+ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> None:
46
+ """
47
+ Think about a better organization of this function.
48
+
49
+ Currently, transform `Callable` in order:
50
+ sourceDispatcherCallable
51
+ sourceInitializeCallable
52
+ sourceParallelCallable
53
+ sourceSequentialCallable
54
+
55
+ But, it should be organized around each transformation. So, when the parameters of `sourceSequentialCallable`
56
+ are transformed, for example, the statement in `sourceDispatcherCallable` that calls `sourceSequentialCallable` should be
57
+ transformed at the same time: literally in the same function-or-NodeReplacer-or-subroutine. That would help
58
+ avoid bugs.
59
+
60
+ Furthermore, if the above example transformation requires unpacking the dataclass, for example, then the unpacking
61
+ would be automatically triggered. I have no idea how that would happen, but the transformations are highly predictable,
62
+ so using a programming language to construct if-this-then-that cascades shouldn't be a problem, you know?
63
+
64
+ # TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
65
+ """
66
+ dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
67
+ # TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
68
+ # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
69
+
70
+ # ===========================================================
71
+ sourcePython = numbaFlow.sourceDispatcherCallable
72
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
73
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
74
+ ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
75
+
76
+ # sourceParallelCallable
77
+ shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
78
+ ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
79
+
80
+ NodeReplacer(
81
+ findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
82
+ , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
83
+ ).visit(ingredientsDispatcher.astFunctionDef)
84
+ NodeReplacer(
85
+ findThis = ifThis.isCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
86
+ , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(numbaFlow.sourceConcurrencyManagerNamespace), numbaFlow.sourceConcurrencyManagerIdentifier)
87
+ , listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
88
+ ).visit(ingredientsDispatcher.astFunctionDef)
89
+
90
+ CapturedAssign: list[ast.AST] = []
91
+ CapturedCall: list[ast.Call] = []
92
+ findThis = ifThis.isCall
93
+ doThat = [Then.appendTo(CapturedCall)]
94
+ capture = NodeCollector(findThis, doThat)
95
+
96
+ NodeCollector(
97
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
98
+ , doThat = [Then.appendTo(CapturedAssign)
99
+ , lambda node: capture.visit(node)]
100
+ ).visit(ingredientsDispatcher.astFunctionDef)
101
+
102
+ newAssign = CapturedAssign[0]
103
+ NodeReplacer(
104
+ findThis = lambda node: ifThis.isSubscript(node) and ifThis.isAttribute(node.value) and ifThis.isCall(node.value.value)
105
+ , doThat = Then.replaceWith(CapturedCall[0])
106
+ ).visit(newAssign)
107
+
108
+ NodeReplacer(
109
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
110
+ , doThat = Then.replaceWith(newAssign)
111
+ ).visit(ingredientsDispatcher.astFunctionDef)
112
+
113
+ # sourceSequentialCallable
114
+ shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstance)
115
+
116
+ ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
117
+
118
+ NodeReplacer(
119
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
120
+ , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
121
+ ).visit(ingredientsDispatcher.astFunctionDef)
122
+ NodeReplacer(
123
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
124
+ , doThat = Then.insertThisBelow([shatteredDataclass.astAssignDataclassRepack])
125
+ ).visit(ingredientsDispatcher.astFunctionDef)
126
+ NodeReplacer(
127
+ findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
128
+ , doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
129
+ ).visit(ingredientsDispatcher.astFunctionDef)
130
+
131
+ ingredientsDispatcher.astFunctionDef.name = numbaFlow.dispatcherCallable
132
+
133
+ # ===========================================================
134
+ sourcePython = numbaFlow.sourceInitializeCallable
135
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
136
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
137
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
138
+ ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
139
+
140
+ # ===========================================================
141
+ sourcePython = numbaFlow.sourceParallelCallable
142
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
143
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
144
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
145
+ ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
146
+ ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
147
+ ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
148
+ NodeReplacer(
149
+ findThis = ifThis.isReturn
150
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
151
+ ).visit(ingredientsParallel.astFunctionDef)
152
+
153
+ NodeReplacer(
154
+ findThis = ifThis.isReturn
155
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.countingVariableName))
156
+ ).visit(ingredientsParallel.astFunctionDef)
157
+ ingredientsParallel.astFunctionDef.returns = shatteredDataclass.countingVariableAnnotation
158
+ replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
159
+ ingredientsParallel.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsParallel.astFunctionDef, replacementMap) # type: ignore
160
+ ingredientsParallel = decorateCallableWithNumba(ingredientsParallel)
161
+
162
+ # ===========================================================
163
+ sourcePython = numbaFlow.sourceSequentialCallable
164
+ astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
165
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
166
+ astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
167
+ ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
168
+ ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
169
+ ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=shatteredDataclass.list_ast_argAnnotated4ArgumentsSpecification)
170
+ NodeReplacer(
171
+ findThis = ifThis.isReturn
172
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
173
+ ).visit(ingredientsSequential.astFunctionDef)
174
+ NodeReplacer(
175
+ findThis = ifThis.isReturn
176
+ , doThat = Then.replaceWith(Make.astReturn(shatteredDataclass.astTuple4AssignTargetsToFragments))
177
+ ).visit(ingredientsSequential.astFunctionDef)
178
+ ingredientsSequential.astFunctionDef.returns = shatteredDataclass.astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
179
+ replacementMap = {statement.value: statement.target for statement in shatteredDataclass.listAnnAssign4DataclassUnpack}
180
+ ingredientsSequential.astFunctionDef = Z0Z_replaceMatchingASTnodes(ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
181
+ ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
182
+
183
+ # ===========================================================
184
+ ingredientsModuleNumbaUnified = IngredientsModule(
185
+ ingredientsFunction=[ingredientsInitialize,
186
+ ingredientsParallel,
187
+ ingredientsSequential,
188
+ ingredientsDispatcher], imports=LedgerOfImports(numbaFlow.source_astModule))
189
+
190
+ write_astModule(ingredientsModuleNumbaUnified, numbaFlow.pathFilenameDispatcher, numbaFlow.packageName)
191
+
192
+ if __name__ == '__main__':
193
+ makeNumbaFlow()
@@ -2,8 +2,7 @@
2
2
  from collections.abc import Sequence
3
3
  from typing import Any, cast, TYPE_CHECKING
4
4
  from mapFolding.filesystem import getFilenameFoldsTotal, getPathFilenameFoldsTotal
5
- from mapFolding.someAssemblyRequired import ( ifThis, Make, NodeReplacer, Then, )
6
- from mapFolding.someAssemblyRequired.transformationTools import LedgerOfImports
5
+ from mapFolding.someAssemblyRequired import ( ifThis, LedgerOfImports, Make, NodeReplacer, Then, )
7
6
  from mapFolding.theSSOT import ( ComputationState, raiseIfNoneGitHubIssueNumber3, getPathJobRootDEFAULT, )
8
7
  from os import PathLike
9
8
  from pathlib import Path
@@ -16,7 +15,7 @@ import copy
16
15
  import inspect
17
16
  import numpy
18
17
  if TYPE_CHECKING:
19
- from mapFolding.someAssemblyRequired.synthesizeDataConverters import makeStateJob
18
+ from mapFolding.someAssemblyRequired.transformDataStructures import makeStateJobOUTDATED
20
19
  from mapFolding.someAssemblyRequired.ingredientsNumba import thisIsNumbaDotJit, decorateCallableWithNumba
21
20
  from mapFolding.someAssemblyRequired.ingredientsNumba import ParametersNumba, parametersNumbaDEFAULT
22
21
 
@@ -291,7 +290,7 @@ def writeJobNumba(mapShape: Sequence[int], algorithmSource: ModuleType, callable
291
290
  """
292
291
 
293
292
  # NOTE get the raw ingredients: data and the algorithm
294
- stateJob = makeStateJob(mapShape, writeJob=False, **keywordArguments)
293
+ stateJob = makeStateJobOUTDATED(mapShape, writeJob=False, **keywordArguments)
295
294
  pythonSource: str = inspect.getsource(algorithmSource)
296
295
  astModule: ast.Module = ast.parse(pythonSource)
297
296
  setFunctionDef: set[ast.FunctionDef] = {statement for statement in astModule.body if isinstance(statement, ast.FunctionDef)}
@@ -0,0 +1,168 @@
1
+ """
2
+ Utilities for transforming complex data structures in Python code generation.
3
+
4
+ This module provides specialized tools for working with structured data types during
5
+ the code transformation process, with a particular focus on handling dataclasses. It
6
+ implements functionality that enables:
7
+
8
+ 1. Decomposing dataclasses into individual fields for efficient processing
9
+ 2. Creating optimized parameter passing for transformed functions
10
+ 3. Converting between different representations of data structures
11
+ 4. Serializing and deserializing computation state objects
12
+
13
+ The core functionality revolves around the "shattering" process that breaks down
14
+ a dataclass into its constituent components, making each field individually accessible
15
+ for code generation and optimization purposes. This dataclass handling is critical for
16
+ transforming algorithms that operate on unified state objects into optimized implementations
17
+ that work with primitive types directly.
18
+
19
+ While developed for transforming map folding computation state objects, the utilities are
20
+ designed to be applicable to various data structure transformation scenarios.
21
+ """
22
+
23
+ from collections.abc import Sequence
24
+ from importlib import import_module as importlib_import_module
25
+ from inspect import getsource as inspect_getsource
26
+ from mapFolding.beDRY import outfitCountFolds, validateListDimensions
27
+ from mapFolding.filesystem import getPathFilenameFoldsTotal
28
+ from mapFolding.someAssemblyRequired import (
29
+ ast_Identifier,
30
+ extractClassDef,
31
+ ifThis,
32
+ LedgerOfImports,
33
+ Make,
34
+ NodeCollector,
35
+ strDotStrCuzPyStoopid,
36
+ Then,
37
+ Z0Z_executeActionUnlessDescendantMatches,
38
+ )
39
+ from mapFolding.theSSOT import ComputationState, The
40
+ from pathlib import Path
41
+ from types import ModuleType
42
+ from typing import Any, Literal, overload
43
+ import ast
44
+ import dataclasses
45
+ import pickle
46
+
47
+ # Would `LibCST` be better than `ast` in some cases? https://github.com/hunterhogan/mapFolding/issues/7
48
+
49
+ countingIdentifierHARDCODED = 'groupsOfFolds'
50
+
51
+ @dataclasses.dataclass
52
+ class ShatteredDataclass:
53
+ astAssignDataclassRepack: ast.Assign
54
+ astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns: ast.Subscript
55
+ astTuple4AssignTargetsToFragments: ast.Tuple
56
+ countingVariableAnnotation: ast.expr
57
+ countingVariableName: ast.Name
58
+ ledgerDataclassANDFragments: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
59
+ list_ast_argAnnotated4ArgumentsSpecification: list[ast.arg] = dataclasses.field(default_factory=list)
60
+ list_keyword4DataclassInitialization: list[ast.keyword] = dataclasses.field(default_factory=list)
61
+ listAnnAssign4DataclassUnpack: list[ast.AnnAssign] = dataclasses.field(default_factory=list)
62
+ listAnnotations: list[ast.expr] = dataclasses.field(default_factory=list)
63
+ listNameDataclassFragments4Parameters: list[ast.Name] = dataclasses.field(default_factory=list)
64
+
65
+ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier) -> ShatteredDataclass:
66
+ """
67
+ Parameters:
68
+ logicalPathModule: gimme string cuz python is stoopid
69
+ dataclass_Identifier: The identifier of the dataclass to be dismantled.
70
+ instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
71
+ """
72
+ # TODO learn whether dataclasses.make_dataclass would be useful to transform the target dataclass into the `ShatteredDataclass`
73
+
74
+ module: ast.Module = ast.parse(inspect_getsource(importlib_import_module(logicalPathModule)))
75
+ astName_dataclassesDOTdataclass = Make.astName(dataclass_Identifier)
76
+
77
+ dataclass = extractClassDef(dataclass_Identifier, module)
78
+ if not isinstance(dataclass, ast.ClassDef):
79
+ raise ValueError(f"I could not find {dataclass_Identifier=} in {logicalPathModule=}.")
80
+
81
+ ledgerDataclassANDFragments = LedgerOfImports()
82
+ list_ast_argAnnotated4ArgumentsSpecification: list[ast.arg] = []
83
+ list_keyword4DataclassInitialization: list[ast.keyword] = []
84
+ listAnnAssign4DataclassUnpack: list[ast.AnnAssign] = []
85
+ listAnnotations: list[ast.expr] = []
86
+ listNameDataclassFragments4Parameters: list[ast.Name] = []
87
+
88
+ # TODO get the value from `groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})`
89
+ countingVariable = countingIdentifierHARDCODED
90
+
91
+ addToLedgerPredicate = ifThis.isAnnAssignAndAnnotationIsName
92
+ addToLedgerAction = Then.Z0Z_ledger(logicalPathModule, ledgerDataclassANDFragments)
93
+ addToLedger = NodeCollector(addToLedgerPredicate, [addToLedgerAction])
94
+
95
+ exclusionPredicate = ifThis.is_keyword_IdentifierEqualsConstantValue('init', False)
96
+ appendKeywordAction = Then.Z0Z_appendKeywordMirroredTo(list_keyword4DataclassInitialization)
97
+ filteredAppendKeywordAction = Z0Z_executeActionUnlessDescendantMatches(exclusionPredicate, appendKeywordAction) # type: ignore
98
+
99
+ NodeCollector(
100
+ ifThis.isAnnAssignAndTargetIsName,
101
+ [Then.Z0Z_appendAnnAssignOf_nameDOTnameTo(instance_Identifier, listAnnAssign4DataclassUnpack)
102
+ , Then.append_targetTo(listNameDataclassFragments4Parameters) # type: ignore
103
+ , lambda node: addToLedger.visit(node)
104
+ , filteredAppendKeywordAction
105
+ , lambda node: list_ast_argAnnotated4ArgumentsSpecification.append(Make.ast_arg(node.target.id, node.annotation)) # type: ignore
106
+ , lambda node: listAnnotations.append(node.annotation) # type: ignore
107
+ ]
108
+ ).visit(dataclass)
109
+
110
+ shatteredDataclass = ShatteredDataclass(
111
+ astAssignDataclassRepack = Make.astAssign(listTargets=[Make.astName(instance_Identifier)], value=Make.astCall(astName_dataclassesDOTdataclass, list_astKeywords=list_keyword4DataclassInitialization))
112
+ , astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns = Make.astSubscript(Make.astName('tuple'), Make.astTuple(listAnnotations))
113
+ , astTuple4AssignTargetsToFragments = Make.astTuple(listNameDataclassFragments4Parameters, ast.Store())
114
+ , countingVariableAnnotation = next(ast_arg.annotation for ast_arg in list_ast_argAnnotated4ArgumentsSpecification if ast_arg.arg == countingVariable) or Make.astName('Any')
115
+ , countingVariableName = Make.astName(countingVariable)
116
+ , ledgerDataclassANDFragments = ledgerDataclassANDFragments
117
+ , list_ast_argAnnotated4ArgumentsSpecification = list_ast_argAnnotated4ArgumentsSpecification
118
+ , list_keyword4DataclassInitialization = list_keyword4DataclassInitialization
119
+ , listAnnAssign4DataclassUnpack = listAnnAssign4DataclassUnpack
120
+ , listAnnotations = listAnnotations
121
+ , listNameDataclassFragments4Parameters = listNameDataclassFragments4Parameters
122
+ )
123
+
124
+ shatteredDataclass.ledgerDataclassANDFragments.addImportFromStr(logicalPathModule, dataclass_Identifier)
125
+ return shatteredDataclass
126
+
127
+ def getSourceAlgorithmVESTIGIAL() -> ModuleType:
128
+ moduleImported: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
129
+ return moduleImported
130
+
131
+ @overload
132
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[True], **keywordArguments: Any) -> Path: ...
133
+ @overload
134
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[False], **keywordArguments: Any) -> ComputationState: ...
135
+ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: bool = True, **keywordArguments: Any) -> ComputationState | Path:
136
+ """
137
+ Creates a computation state job for map folding calculations and optionally saves it to disk.
138
+
139
+ This function initializes a computation state for map folding calculations based on the given dimensions,
140
+ sets up the initial counting configuration, and can optionally save the state to a pickle file.
141
+
142
+ Parameters:
143
+ listDimensions: List of integers representing the dimensions of the map to be folded.
144
+ writeJob (True): Whether to save the state to disk.
145
+ **keywordArguments: Additional keyword arguments to pass to the computation state initialization.
146
+
147
+ Returns:
148
+ stateUniversal|pathFilenameJob: The computation state for the map folding calculations, or
149
+ the path to the saved state file if writeJob is True.
150
+ """
151
+ mapShape = validateListDimensions(listDimensions)
152
+ stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
153
+
154
+ moduleSource: ModuleType = getSourceAlgorithmVESTIGIAL()
155
+ # TODO `countInitialize` is hardcoded
156
+ stateUniversal = moduleSource.countInitialize(stateUniversal)
157
+
158
+ if not writeJob:
159
+ return stateUniversal
160
+
161
+ pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal.mapShape, None)
162
+ suffix = pathFilenameChopChop.suffix
163
+ pathJob = Path(str(pathFilenameChopChop)[0:-len(suffix)])
164
+ pathJob.mkdir(parents=True, exist_ok=True)
165
+ pathFilenameJob = pathJob / 'stateJob.pkl'
166
+
167
+ pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
168
+ return pathFilenameJob