mapFolding 0.9.4__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. mapFolding/__init__.py +41 -7
  2. mapFolding/basecamp.py +100 -9
  3. mapFolding/beDRY.py +7 -15
  4. mapFolding/dataBaskets.py +12 -0
  5. mapFolding/datatypes.py +4 -4
  6. mapFolding/oeis.py +2 -7
  7. mapFolding/someAssemblyRequired/RecipeJob.py +97 -3
  8. mapFolding/someAssemblyRequired/Z0Z_makeSomeModules.py +112 -12
  9. mapFolding/someAssemblyRequired/__init__.py +26 -28
  10. mapFolding/someAssemblyRequired/_theTypes.py +13 -19
  11. mapFolding/someAssemblyRequired/_tool_Make.py +4 -6
  12. mapFolding/someAssemblyRequired/_tool_Then.py +17 -22
  13. mapFolding/someAssemblyRequired/_toolboxAntecedents.py +32 -15
  14. mapFolding/someAssemblyRequired/_toolboxContainers.py +124 -29
  15. mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +274 -0
  16. mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +3 -2
  17. mapFolding/someAssemblyRequired/toolboxNumba.py +3 -27
  18. mapFolding/someAssemblyRequired/transformationTools.py +8 -120
  19. mapFolding/syntheticModules/daoOfMapFolding.py +74 -0
  20. mapFolding/syntheticModules/dataPacking.py +1 -1
  21. mapFolding/syntheticModules/theorem2Numba.py +2 -8
  22. mapFolding/syntheticModules/theorem2Trimmed.py +43 -0
  23. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/METADATA +1 -1
  24. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/RECORD +29 -27
  25. tests/test_computations.py +1 -1
  26. mapFolding/Z0Z_flowControl.py +0 -117
  27. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/WHEEL +0 -0
  28. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/entry_points.txt +0 -0
  29. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/licenses/LICENSE +0 -0
  30. {mapfolding-0.9.4.dist-info → mapfolding-0.9.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,274 @@
1
+ from mapFolding import getPathFilenameFoldsTotal, raiseIfNoneGitHubIssueNumber3, The
2
+ from mapFolding.someAssemblyRequired import (
3
+ ast_Identifier,
4
+ be,
5
+ extractFunctionDef,
6
+ ifThis,
7
+ IngredientsFunction,
8
+ IngredientsModule,
9
+ LedgerOfImports,
10
+ Make,
11
+ NodeChanger,
12
+ NodeTourist,
13
+ str_nameDOTname,
14
+ Then,
15
+ )
16
+ from mapFolding.someAssemblyRequired.RecipeJob import RecipeJobTheorem2Numba
17
+ from mapFolding.someAssemblyRequired.toolboxNumba import parametersNumbaLight, SpicesJobNumba, decorateCallableWithNumba
18
+ from mapFolding.someAssemblyRequired.transformationTools import dictionaryEstimates, write_astModule, makeInitializedComputationState
19
+ from mapFolding.syntheticModules.initializeCount import initializeGroupsOfFolds
20
+ from mapFolding.dataBaskets import MapFoldingState
21
+ from pathlib import PurePosixPath
22
+ from typing import cast, NamedTuple
23
+ from Z0Z_tools import autoDecodingRLE
24
+ import ast
25
+ """Synthesize one file to compute `foldsTotal` of `mapShape`."""
26
+
27
+ list_IdentifiersNotUsedAllHARDCODED = ['concurrencyLimit', 'foldsTotal', 'mapShape',]
28
+ list_IdentifiersNotUsedParallelSequentialHARDCODED = ['indexLeaf']
29
+ list_IdentifiersNotUsedSequentialHARDCODED = ['foldGroups', 'taskDivisions', 'taskIndex',]
30
+
31
+ list_IdentifiersReplacedHARDCODED = ['groupsOfFolds',]
32
+
33
+ list_IdentifiersStaticValuesHARDCODED = ['dimensionsTotal', 'leavesTotal',]
34
+
35
+ list_IdentifiersNotUsedHARDCODED = list_IdentifiersStaticValuesHARDCODED + list_IdentifiersReplacedHARDCODED + list_IdentifiersNotUsedAllHARDCODED + list_IdentifiersNotUsedParallelSequentialHARDCODED + list_IdentifiersNotUsedSequentialHARDCODED
36
+
37
+ def addLauncherNumbaProgress(ingredientsModule: IngredientsModule, ingredientsFunction: IngredientsFunction, job: RecipeJobTheorem2Numba, spices: SpicesJobNumba) -> tuple[IngredientsModule, IngredientsFunction]:
38
+ """
39
+ Add progress tracking capabilities to a Numba-optimized function.
40
+
41
+ This function modifies both the module and the function to integrate Numba-compatible
42
+ progress tracking for long-running calculations. It performs several key transformations:
43
+
44
+ 1. Adds a progress bar parameter to the function signature
45
+ 2. Replaces counting increments with progress bar updates
46
+ 3. Creates a launcher section that displays and updates progress
47
+ 4. Configures file output to save results upon completion
48
+
49
+ The progress tracking is particularly important for map folding calculations
50
+ which can take hours or days to complete, providing visual feedback and
51
+ estimated completion times.
52
+
53
+ Parameters:
54
+ ingredientsModule: The module where the function is defined.
55
+ ingredientsFunction: The function to modify with progress tracking.
56
+ job: Configuration specifying shape details and output paths.
57
+ spices: Configuration specifying progress bar details.
58
+
59
+ Returns:
60
+ A tuple containing the modified module and function with progress tracking.
61
+ """
62
+ linesLaunch: str = f"""
63
+ if __name__ == '__main__':
64
+ with ProgressBar(total={job.foldsTotalEstimated}, update_interval=2) as statusUpdate:
65
+ {job.countCallable}(statusUpdate)
66
+ foldsTotal = statusUpdate.n * {job.state.leavesTotal}
67
+ print('\\nmap {job.state.mapShape} =', foldsTotal)
68
+ writeStream = open('{job.pathFilenameFoldsTotal.as_posix()}', 'w')
69
+ writeStream.write(str(foldsTotal))
70
+ writeStream.close()
71
+ """
72
+ numba_progressPythonClass: ast_Identifier = 'ProgressBar'
73
+ numba_progressNumbaType: ast_Identifier = 'ProgressBarType'
74
+ ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressPythonClass)
75
+ ingredientsModule.imports.addImportFrom_asStr('numba_progress', numba_progressNumbaType)
76
+
77
+ ast_argNumbaProgress = ast.arg(arg=spices.numbaProgressBarIdentifier, annotation=ast.Name(id=numba_progressPythonClass, ctx=ast.Load()))
78
+ ingredientsFunction.astFunctionDef.args.args.append(ast_argNumbaProgress)
79
+
80
+ findThis = ifThis.isAugAssign_targetIs(ifThis.isName_Identifier(job.shatteredDataclass.countingVariableName.id))
81
+ doThat = Then.replaceWith(Make.Expr(Make.Call(Make.Attribute(Make.Name(spices.numbaProgressBarIdentifier),'update'),[Make.Constant(1)])))
82
+ countWithProgressBar = NodeChanger(findThis, doThat)
83
+ countWithProgressBar.visit(ingredientsFunction.astFunctionDef)
84
+
85
+ removeReturnStatement = NodeChanger(be.Return, Then.removeIt)
86
+ removeReturnStatement.visit(ingredientsFunction.astFunctionDef)
87
+ ingredientsFunction.astFunctionDef.returns = Make.Constant(value=None)
88
+
89
+ ingredientsModule.appendLauncher(ast.parse(linesLaunch))
90
+
91
+ return ingredientsModule, ingredientsFunction
92
+
93
+ def move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsFunction: IngredientsFunction, job: RecipeJobTheorem2Numba) -> IngredientsFunction:
94
+ """
95
+ Convert function parameters into initialized variables with concrete values.
96
+
97
+ This function implements a critical transformation that converts function parameters
98
+ into statically initialized variables in the function body. This enables several
99
+ optimizations:
100
+
101
+ 1. Eliminating parameter passing overhead.
102
+ 2. Embedding concrete values directly in the code.
103
+ 3. Allowing Numba to optimize based on known value characteristics.
104
+ 4. Simplifying function signatures for specialized use cases.
105
+
106
+ The function handles different data types (scalars, arrays, custom types) appropriately,
107
+ replacing abstract parameter references with concrete values from the computation state.
108
+ It also removes unused parameters and variables to eliminate dead code.
109
+
110
+ Parameters:
111
+ ingredientsFunction: The function to transform.
112
+ job: Recipe containing concrete values for parameters and field metadata.
113
+
114
+ Returns:
115
+ The modified function with parameters converted to initialized variables.
116
+ """
117
+ ingredientsFunction.imports.update(job.shatteredDataclass.imports)
118
+
119
+ list_argCuzMyBrainRefusesToThink = ingredientsFunction.astFunctionDef.args.args + ingredientsFunction.astFunctionDef.args.posonlyargs + ingredientsFunction.astFunctionDef.args.kwonlyargs
120
+ list_arg_arg: list[ast_Identifier] = [ast_arg.arg for ast_arg in list_argCuzMyBrainRefusesToThink]
121
+ listName: list[ast.Name] = []
122
+ NodeTourist(be.Name, Then.appendTo(listName)).visit(ingredientsFunction.astFunctionDef)
123
+ list_Identifiers: list[ast_Identifier] = [astName.id for astName in listName]
124
+ list_IdentifiersNotUsed: list[ast_Identifier] = list(set(list_arg_arg) - set(list_Identifiers))
125
+
126
+ for ast_arg in list_argCuzMyBrainRefusesToThink:
127
+ if ast_arg.arg in job.shatteredDataclass.field2AnnAssign:
128
+ if ast_arg.arg in list_IdentifiersNotUsed:
129
+ pass
130
+ else:
131
+ ImaAnnAssign, elementConstructor = job.shatteredDataclass.Z0Z_field2AnnAssign[ast_arg.arg]
132
+ match elementConstructor:
133
+ case 'scalar':
134
+ ImaAnnAssign.value.args[0].value = int(job.state.__dict__[ast_arg.arg]) # type: ignore
135
+ case 'array':
136
+ dataAsStrRLE: str = autoDecodingRLE(job.state.__dict__[ast_arg.arg], True)
137
+ dataAs_astExpr: ast.expr = cast(ast.Expr, ast.parse(dataAsStrRLE).body[0]).value
138
+ ImaAnnAssign.value.args = [dataAs_astExpr] # type: ignore
139
+ case _:
140
+ list_exprDOTannotation: list[ast.expr] = []
141
+ list_exprDOTvalue: list[ast.expr] = []
142
+ for dimension in job.state.mapShape:
143
+ list_exprDOTannotation.append(Make.Name(elementConstructor))
144
+ list_exprDOTvalue.append(Make.Call(Make.Name(elementConstructor), [Make.Constant(dimension)]))
145
+ ImaAnnAssign.annotation.slice.elts = list_exprDOTannotation # type: ignore
146
+ ImaAnnAssign.value.elts = list_exprDOTvalue # type: ignore
147
+
148
+ ingredientsFunction.astFunctionDef.body.insert(0, ImaAnnAssign)
149
+
150
+ findThis = ifThis.is_arg_Identifier(ast_arg.arg)
151
+ remove_arg = NodeChanger(findThis, Then.removeIt)
152
+ remove_arg.visit(ingredientsFunction.astFunctionDef)
153
+
154
+ ast.fix_missing_locations(ingredientsFunction.astFunctionDef)
155
+ return ingredientsFunction
156
+
157
+ def makeJobNumba(job: RecipeJobTheorem2Numba, spices: SpicesJobNumba) -> None:
158
+
159
+ astFunctionDef = extractFunctionDef(job.source_astModule, job.countCallable)
160
+ if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
161
+ ingredientsCount: IngredientsFunction = IngredientsFunction(astFunctionDef, LedgerOfImports())
162
+
163
+ # Remove `foldGroups` and any other unused statements, so you can dynamically determine which variables are not used
164
+ findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier('foldGroups'))
165
+ doThat = Then.removeIt
166
+ remove_foldGroups = NodeChanger(findThis, doThat)
167
+ remove_foldGroups.visit(ingredientsCount.astFunctionDef)
168
+
169
+ # replace identifiers with static values with their values, so you can dynamically determine which variables are not used
170
+ list_IdentifiersStaticValues = list_IdentifiersStaticValuesHARDCODED
171
+ for identifier in list_IdentifiersStaticValues:
172
+ findThis = ifThis.isName_Identifier(identifier)
173
+ doThat = Then.replaceWith(Make.Constant(int(job.state.__dict__[identifier])))
174
+ NodeChanger(findThis, doThat).visit(ingredientsCount.astFunctionDef)
175
+
176
+ ingredientsModule = IngredientsModule()
177
+ # This launcher eliminates the use of one identifier, so run it now and you can dynamically determine which variables are not used
178
+ if spices.useNumbaProgressBar:
179
+ ingredientsModule, ingredientsCount = addLauncherNumbaProgress(ingredientsModule, ingredientsCount, job, spices)
180
+ spices.parametersNumba['nogil'] = True
181
+ else:
182
+ linesLaunch: str = f"""
183
+ if __name__ == '__main__':
184
+ import time
185
+ timeStart = time.perf_counter()
186
+ foldsTotal = {job.countCallable}() * {job.state.leavesTotal}
187
+ print(time.perf_counter() - timeStart)
188
+ print('\\nmap {job.state.mapShape} =', foldsTotal)
189
+ writeStream = open('{job.pathFilenameFoldsTotal.as_posix()}', 'w')
190
+ writeStream.write(str(foldsTotal))
191
+ writeStream.close()
192
+ """
193
+ # from mapFolding.oeis import getFoldsTotalKnown
194
+ # print(foldsTotal == getFoldsTotalKnown({job.state.mapShape}))
195
+ ingredientsModule.appendLauncher(ast.parse(linesLaunch))
196
+ changeReturnParallelCallable = NodeChanger(be.Return, Then.replaceWith(Make.Return(job.shatteredDataclass.countingVariableName)))
197
+ changeReturnParallelCallable.visit(ingredientsCount.astFunctionDef)
198
+ ingredientsCount.astFunctionDef.returns = job.shatteredDataclass.countingVariableAnnotation
199
+
200
+ ingredientsCount = move_arg2FunctionDefDOTbodyAndAssignInitialValues(ingredientsCount, job)
201
+
202
+ class DatatypeConfig(NamedTuple):
203
+ Z0Z_module: str_nameDOTname
204
+ fml: ast_Identifier
205
+ Z0Z_type_name: ast_Identifier
206
+ Z0Z_asname: ast_Identifier | None = None
207
+
208
+ listDatatypeConfigs = [
209
+ DatatypeConfig(fml='DatatypeLeavesTotal', Z0Z_module='numba', Z0Z_type_name='uint8'),
210
+ DatatypeConfig(fml='DatatypeElephino', Z0Z_module='numba', Z0Z_type_name='uint16'),
211
+ DatatypeConfig(fml='DatatypeFoldsTotal', Z0Z_module='numba', Z0Z_type_name='uint64'),
212
+ ]
213
+
214
+ for datatypeConfig in listDatatypeConfigs:
215
+ ingredientsModule.imports.addImportFrom_asStr(datatypeConfig.Z0Z_module, datatypeConfig.Z0Z_type_name)
216
+ statement = Make.Assign(
217
+ [Make.Name(datatypeConfig.fml, ast.Store())],
218
+ Make.Name(datatypeConfig.Z0Z_type_name)
219
+ )
220
+ ingredientsModule.appendPrologue(statement=statement)
221
+
222
+ ingredientsCount.imports.removeImportFromModule('mapFolding.theSSOT')
223
+
224
+ listNumPyTypeConfigs = [
225
+ DatatypeConfig(fml='Array1DLeavesTotal', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array1DLeavesTotal'),
226
+ DatatypeConfig(fml='Array1DElephino', Z0Z_module='numpy', Z0Z_type_name='uint16', Z0Z_asname='Array1DElephino'),
227
+ DatatypeConfig(fml='Array3D', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array3D'),
228
+ ]
229
+
230
+ for typeConfig in listNumPyTypeConfigs:
231
+ ingredientsCount.imports.removeImportFrom(typeConfig.Z0Z_module, None, typeConfig.fml)
232
+ ingredientsCount.imports.addImportFrom_asStr(typeConfig.Z0Z_module, typeConfig.Z0Z_type_name, typeConfig.Z0Z_asname)
233
+
234
+ ingredientsCount.astFunctionDef.decorator_list = [] # TODO low-priority, handle this more elegantly
235
+ # TODO when I add the function signature in numba style back to the decorator, the logic needs to handle `ProgressBarType:`
236
+ ingredientsCount = decorateCallableWithNumba(ingredientsCount, spices.parametersNumba)
237
+
238
+ ingredientsModule.appendIngredientsFunction(ingredientsCount)
239
+ write_astModule(ingredientsModule, job.pathFilenameModule, job.packageIdentifier)
240
+
241
+ """
242
+ Overview
243
+ - the code starts life in theDao.py, which has many optimizations;
244
+ - `makeNumbaOptimizedFlow` increase optimization especially by using numba;
245
+ - `makeJobNumba` increases optimization especially by limiting its capabilities to just one set of parameters
246
+ - the synthesized module must run well as a standalone interpreted-Python script
247
+ - the next major optimization step will (probably) be to use the module synthesized by `makeJobNumba` to compile a standalone executable
248
+ - Nevertheless, at each major optimization step, the code is constantly being improved and optimized, so everything must be well organized (read: semantic) and able to handle a range of arbitrary upstream and not disrupt downstream transformations
249
+
250
+ Necessary
251
+ - Move the function's parameters to the function body,
252
+ - initialize identifiers with their state types and values,
253
+
254
+ Optimizations
255
+ - replace static-valued identifiers with their values
256
+ - narrowly focused imports
257
+
258
+ Minutia
259
+ - do not use `with` statement inside numba jitted code, except to use numba's obj mode
260
+ """
261
+
262
+ if __name__ == '__main__':
263
+ mapShape = (1,46)
264
+ state = MapFoldingState(mapShape)
265
+ state = initializeGroupsOfFolds(state)
266
+ # foldsTotalEstimated = getFoldsTotalKnown(state.mapShape) // state.leavesTotal
267
+ # foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
268
+ foldsTotalEstimated = 0
269
+ pathModule = PurePosixPath(The.pathPackage, 'jobs')
270
+ pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(state.mapShape, pathModule))
271
+ aJob = RecipeJobTheorem2Numba(state, foldsTotalEstimated, pathModule=pathModule, pathFilenameFoldsTotal=pathFilenameFoldsTotal)
272
+ spices = SpicesJobNumba(useNumbaProgressBar=False, parametersNumba=parametersNumbaLight)
273
+ # spices = SpicesJobNumba()
274
+ makeJobNumba(aJob, spices)
@@ -300,10 +300,11 @@ if __name__ == '__main__':
300
300
  """
301
301
 
302
302
  if __name__ == '__main__':
303
- mapShape = (2,21)
303
+ mapShape = (1,46)
304
304
  state = makeInitializedComputationState(mapShape)
305
305
  # foldsTotalEstimated = getFoldsTotalKnown(state.mapShape) // state.leavesTotal
306
- foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
306
+ # foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
307
+ foldsTotalEstimated = 0
307
308
  pathModule = PurePosixPath(The.pathPackage, 'jobs')
308
309
  pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(state.mapShape, pathModule))
309
310
  aJob = RecipeJob(state, foldsTotalEstimated, pathModule=pathModule, pathFilenameFoldsTotal=pathFilenameFoldsTotal)
@@ -16,23 +16,14 @@ performance improvements while preserving code semantics and correctness.
16
16
  """
17
17
 
18
18
  from collections.abc import Callable, Sequence
19
- from mapFolding.someAssemblyRequired import ast_Identifier, be, IngredientsFunction, Make, NodeTourist, RecipeSynthesizeFlow, str_nameDOTname, Then
19
+ from mapFolding import NotRequired, TypedDict
20
+ from mapFolding.someAssemblyRequired import ast_Identifier, IngredientsFunction, Make, RecipeSynthesizeFlow, str_nameDOTname
20
21
  from mapFolding.someAssemblyRequired.transformationTools import makeNewFlow, write_astModule
21
22
  from numba.core.compiler import CompilerBase as numbaCompilerBase
22
- from typing import Any, cast, Final, TYPE_CHECKING, TypeGuard
23
+ from typing import Any, cast, Final
23
24
  import ast
24
25
  import dataclasses
25
26
 
26
- try:
27
- from typing import NotRequired
28
- except Exception:
29
- from typing_extensions import NotRequired # pyright: ignore[reportShadowedImports]
30
-
31
- if TYPE_CHECKING:
32
- from typing import TypedDict
33
- else:
34
- TypedDict = dict[str,Any]
35
-
36
27
  # Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
37
28
  theNumbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
38
29
 
@@ -60,17 +51,8 @@ class ParametersNumba(TypedDict):
60
51
  signature_or_function: NotRequired[Any | Callable[..., Any] | str | tuple[Any, ...]]
61
52
  target: NotRequired[str]
62
53
 
63
- parametersNumbaFailEarly: Final[ParametersNumba] = { '_nrt': True, 'boundscheck': True, 'cache': True, 'error_model': 'python', 'fastmath': False, 'forceinline': True, 'inline': 'always', 'looplift': False, 'no_cfunc_wrapper': False, 'no_cpython_wrapper': False, 'nopython': True, 'parallel': False, }
64
- """For a production function: speed is irrelevant, error discovery is paramount, must be compatible with anything downstream."""
65
54
  parametersNumbaDefault: Final[ParametersNumba] = { '_nrt': True, 'boundscheck': False, 'cache': True, 'error_model': 'numpy', 'fastmath': True, 'forceinline': True, 'inline': 'always', 'looplift': False, 'no_cfunc_wrapper': False, 'no_cpython_wrapper': False, 'nopython': True, 'parallel': False, }
66
55
  """Middle of the road: fast, lean, but will talk to non-jitted functions."""
67
- parametersNumbaParallelDEFAULT: Final[ParametersNumba] = { **parametersNumbaDefault, '_nrt': True, 'parallel': True, }
68
- """Middle of the road: fast, lean, but will talk to non-jitted functions."""
69
- parametersNumbaSuperJit: Final[ParametersNumba] = { **parametersNumbaDefault, 'no_cfunc_wrapper': True, 'no_cpython_wrapper': True, }
70
- """Speed, no helmet, no talking to non-jitted functions."""
71
- parametersNumbaSuperJitParallel: Final[ParametersNumba] = { **parametersNumbaSuperJit, '_nrt': True, 'parallel': True, }
72
- """Speed, no helmet, concurrency, no talking to non-jitted functions."""
73
- parametersNumbaMinimum: Final[ParametersNumba] = { '_nrt': True, 'boundscheck': True, 'cache': True, 'error_model': 'numpy', 'fastmath': True, 'forceinline': False, 'inline': 'always', 'looplift': False, 'no_cfunc_wrapper': False, 'no_cpython_wrapper': False, 'nopython': False, 'forceobj': True, 'parallel': False, }
74
56
  parametersNumbaLight: Final[ParametersNumba] = {'cache': True, 'error_model': 'numpy', 'fastmath': True, 'forceinline': True}
75
57
 
76
58
  Z0Z_numbaDataTypeModule: str_nameDOTname = 'numba'
@@ -188,11 +170,5 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow) -> None:
188
170
 
189
171
  write_astModule(ingredientsModuleNumbaUnified, numbaFlow.pathFilenameDispatcher, numbaFlow.packageIdentifier)
190
172
 
191
- def getIt(astCallConcurrencyResult: list[ast.Call]) -> Callable[[ast.AST], ast.AST]:
192
- def workhorse(node: ast.AST) -> ast.AST:
193
- NodeTourist(be.Call, Then.appendTo(astCallConcurrencyResult)).visit(node)
194
- return node
195
- return workhorse
196
-
197
173
  if __name__ == '__main__':
198
174
  makeNumbaFlow(theNumbaFlow)
@@ -26,6 +26,7 @@ from mapFolding.someAssemblyRequired import (
26
26
  ast_Identifier,
27
27
  astModuleToIngredientsFunction,
28
28
  be,
29
+ DeReConstructField2ast,
29
30
  DOT,
30
31
  extractClassDef,
31
32
  grab,
@@ -177,120 +178,6 @@ def makeInitializedComputationState(mapShape: tuple[int, ...], writeJob: bool =
177
178
  pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
178
179
  return pathFilenameJob
179
180
 
180
- @dataclasses.dataclass
181
- class DeReConstructField2ast:
182
- """
183
- Transform a dataclass field into AST node representations for code generation.
184
-
185
- This class extracts and transforms a dataclass Field object into various AST node
186
- representations needed for code generation. It handles the conversion of field
187
- attributes, type annotations, and metadata into AST constructs that can be used
188
- to reconstruct the field in generated code.
189
-
190
- The class is particularly important for decomposing dataclass fields (like those in
191
- ComputationState) to enable their use in specialized contexts like Numba-optimized
192
- functions, where the full dataclass cannot be directly used but its contents need
193
- to be accessible.
194
-
195
- Each field is processed according to its type and metadata to create appropriate
196
- variable declarations, type annotations, and initialization code as AST nodes.
197
- """
198
- dataclassesDOTdataclassLogicalPathModule: dataclasses.InitVar[str_nameDOTname]
199
- dataclassClassDef: dataclasses.InitVar[ast.ClassDef]
200
- dataclassesDOTdataclassInstance_Identifier: dataclasses.InitVar[ast_Identifier]
201
- field: dataclasses.InitVar[dataclasses.Field[Any]]
202
-
203
- ledger: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
204
-
205
- name: ast_Identifier = dataclasses.field(init=False)
206
- typeBuffalo: type[Any] | str | Any = dataclasses.field(init=False)
207
- default: Any | None = dataclasses.field(init=False)
208
- default_factory: Callable[..., Any] | None = dataclasses.field(init=False)
209
- repr: bool = dataclasses.field(init=False)
210
- hash: bool | None = dataclasses.field(init=False)
211
- init: bool = dataclasses.field(init=False)
212
- compare: bool = dataclasses.field(init=False)
213
- metadata: dict[Any, Any] = dataclasses.field(init=False)
214
- kw_only: bool = dataclasses.field(init=False)
215
-
216
- astName: ast.Name = dataclasses.field(init=False)
217
- ast_keyword_field__field: ast.keyword = dataclasses.field(init=False)
218
- ast_nameDOTname: ast.Attribute = dataclasses.field(init=False)
219
- astAnnotation: ast.expr = dataclasses.field(init=False)
220
- ast_argAnnotated: ast.arg = dataclasses.field(init=False)
221
- astAnnAssignConstructor: ast.AnnAssign|ast.Assign = dataclasses.field(init=False)
222
- Z0Z_hack: tuple[ast.AnnAssign|ast.Assign, str] = dataclasses.field(init=False)
223
-
224
- def __post_init__(self, dataclassesDOTdataclassLogicalPathModule: str_nameDOTname, dataclassClassDef: ast.ClassDef, dataclassesDOTdataclassInstance_Identifier: ast_Identifier, field: dataclasses.Field[Any]) -> None:
225
- self.compare = field.compare
226
- self.default = field.default if field.default is not dataclasses.MISSING else None
227
- self.default_factory = field.default_factory if field.default_factory is not dataclasses.MISSING else None
228
- self.hash = field.hash
229
- self.init = field.init
230
- self.kw_only = field.kw_only if field.kw_only is not dataclasses.MISSING else False
231
- self.metadata = dict(field.metadata)
232
- self.name = field.name
233
- self.repr = field.repr
234
- self.typeBuffalo = field.type
235
-
236
- self.astName = Make.Name(self.name)
237
- self.ast_keyword_field__field = Make.keyword(self.name, self.astName)
238
- self.ast_nameDOTname = Make.Attribute(Make.Name(dataclassesDOTdataclassInstance_Identifier), self.name)
239
-
240
- sherpa = NodeTourist(ifThis.isAnnAssign_targetIs(ifThis.isName_Identifier(self.name)), Then.extractIt(DOT.annotation)).captureLastMatch(dataclassClassDef)
241
- if sherpa is None: raise raiseIfNoneGitHubIssueNumber3
242
- else: self.astAnnotation = sherpa
243
-
244
- self.ast_argAnnotated = Make.arg(self.name, self.astAnnotation)
245
- """
246
- from ast import Module, Expr, Subscript, Name, Tuple, Load
247
- Subscript(
248
- value=Name(id='ndarray', ctx=Load()),
249
- slice=Tuple(
250
- elts=[
251
- Subscript(
252
- value=Name(id='tuple', ctx=Load()),
253
- slice=Name(id='int', ctx=Load()),
254
- ctx=Load()),
255
- Subscript(
256
- value=Name(id='dtype', ctx=Load()),
257
- slice=Name(id='NumPyLeavesTotal', ctx=Load()),
258
- ctx=Load())],
259
- ctx=Load()),
260
- ctx=Load()
261
- )
262
-
263
- """
264
- dtype = self.metadata.get('dtype', None)
265
- if dtype:
266
- moduleWithLogicalPath: str_nameDOTname = 'numpy'
267
- annotationType = 'ndarray'
268
- self.ledger.addImportFrom_asStr(moduleWithLogicalPath, annotationType)
269
- self.ledger.addImportFrom_asStr(moduleWithLogicalPath, 'dtype')
270
- axesSubscript = Make.Subscript(Make.Name('tuple'), Make.Name('uint8'))
271
- dtype_asnameName: ast.Name = self.astAnnotation # type: ignore
272
- if dtype_asnameName.id == 'Array3D':
273
- axesSubscript = Make.Subscript(Make.Name('tuple'), Make.Tuple([Make.Name('uint8'), Make.Name('uint8'), Make.Name('uint8')]))
274
- ast_expr = Make.Subscript(Make.Name(annotationType), Make.Tuple([axesSubscript, Make.Subscript(Make.Name('dtype'), dtype_asnameName)]))
275
- constructor = 'array'
276
- self.ledger.addImportFrom_asStr(moduleWithLogicalPath, constructor)
277
- dtypeIdentifier: ast_Identifier = dtype.__name__
278
- self.ledger.addImportFrom_asStr(moduleWithLogicalPath, dtypeIdentifier, dtype_asnameName.id)
279
- self.astAnnAssignConstructor = Make.AnnAssign(self.astName, ast_expr, Make.Call(Make.Name(constructor), list_astKeywords=[Make.keyword('dtype', dtype_asnameName)]))
280
- self.astAnnAssignConstructor = Make.Assign([self.astName], Make.Call(Make.Name(constructor), list_astKeywords=[Make.keyword('dtype', dtype_asnameName)]))
281
- self.Z0Z_hack = (self.astAnnAssignConstructor, 'array')
282
- elif isinstance(self.astAnnotation, ast.Name):
283
- self.astAnnAssignConstructor = Make.AnnAssign(self.astName, self.astAnnotation, Make.Call(self.astAnnotation, [Make.Constant(-1)]))
284
- self.Z0Z_hack = (self.astAnnAssignConstructor, 'scalar')
285
- elif isinstance(self.astAnnotation, ast.Subscript):
286
- elementConstructor: ast_Identifier = self.metadata['elementConstructor']
287
- self.ledger.addImportFrom_asStr(dataclassesDOTdataclassLogicalPathModule, elementConstructor)
288
- takeTheTuple: ast.Tuple = deepcopy(self.astAnnotation.slice) # type: ignore
289
- self.astAnnAssignConstructor = Make.AnnAssign(self.astName, self.astAnnotation, takeTheTuple)
290
- self.Z0Z_hack = (self.astAnnAssignConstructor, elementConstructor)
291
- if isinstance(self.astAnnotation, ast.Name):
292
- self.ledger.addImportFrom_asStr(dataclassesDOTdataclassLogicalPathModule, self.astAnnotation.id) # pyright: ignore [reportUnknownArgumentType, reportUnknownMemberType, reportIJustCalledATypeGuardMethod_WTF]
293
-
294
181
  def shatter_dataclassesDOTdataclass(logicalPathModule: str_nameDOTname, dataclass_Identifier: ast_Identifier, instance_Identifier: ast_Identifier) -> ShatteredDataclass:
295
182
  """
296
183
  Decompose a dataclass definition into AST components for manipulation and code generation.
@@ -524,6 +411,13 @@ def makeNewFlow(recipeFlow: RecipeSynthesizeFlow) -> IngredientsModule:
524
411
 
525
412
  replaceCall2concurrencyManager = NodeChanger(ifThis.isCallAttributeNamespace_Identifier(recipeFlow.concurrencyManagerNamespace, recipeFlow.concurrencyManagerIdentifier), Then.replaceWith(Make.Call(Make.Attribute(Make.Name(recipeFlow.concurrencyManagerNamespace), recipeFlow.concurrencyManagerIdentifier), listArguments=[Make.Name(recipeFlow.callableParallel)] + listParameters)))
526
413
 
414
+ def getIt(astCallConcurrencyResult: list[ast.Call]) -> Callable[[ast.AST], ast.AST]:
415
+ # TODO I cannot remember why I made this function. It doesn't fit with how I normally do things.
416
+ def workhorse(node: ast.AST) -> ast.AST:
417
+ NodeTourist(be.Call, Then.appendTo(astCallConcurrencyResult)).visit(node)
418
+ return node
419
+ return workhorse
420
+
527
421
  # NOTE I am dissatisfied with this logic for many reasons, including that it requires separate NodeCollector and NodeReplacer instances.
528
422
  astCallConcurrencyResult: list[ast.Call] = []
529
423
  get_astCallConcurrencyResult = NodeTourist(ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(getTheOtherRecord_damn)), getIt(astCallConcurrencyResult))
@@ -563,12 +457,6 @@ def unpackDataclassCallFunctionRepackDataclass(ingredientsCaller: IngredientsFun
563
457
  repack4targetCallable.visit(ingredientsCaller.astFunctionDef)
564
458
  return ingredientsCaller
565
459
 
566
- def getIt(astCallConcurrencyResult: list[ast.Call]) -> Callable[[ast.AST], ast.AST]:
567
- def workhorse(node: ast.AST) -> ast.AST:
568
- NodeTourist(be.Call, Then.appendTo(astCallConcurrencyResult)).visit(node)
569
- return node
570
- return workhorse
571
-
572
460
  dictionaryEstimates: dict[tuple[int, ...], int] = {
573
461
  (2,2,2,2,2,2,2,2): 798148657152000,
574
462
  (2,21): 776374224866624,
@@ -0,0 +1,74 @@
1
+ from mapFolding.dataBaskets import Array1DElephino, Array1DLeavesTotal, Array3D, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal, MapFoldingState
2
+ from numba import jit
3
+
4
+ @jit(cache=True, error_model='numpy', fastmath=True, forceinline=True)
5
+ def count(groupsOfFolds: DatatypeFoldsTotal, gap1ndex: DatatypeElephino, gap1ndexCeiling: DatatypeElephino, indexDimension: DatatypeLeavesTotal, indexLeaf: DatatypeLeavesTotal, indexMiniGap: DatatypeElephino, leaf1ndex: DatatypeLeavesTotal, leafConnectee: DatatypeLeavesTotal, dimensionsUnconstrained: DatatypeLeavesTotal, countDimensionsGapped: Array1DLeavesTotal, gapRangeStart: Array1DElephino, gapsWhere: Array1DLeavesTotal, leafAbove: Array1DLeavesTotal, leafBelow: Array1DLeavesTotal, connectionGraph: Array3D, dimensionsTotal: DatatypeLeavesTotal, leavesTotal: DatatypeLeavesTotal) -> tuple[DatatypeFoldsTotal, DatatypeElephino, DatatypeElephino, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeElephino, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeLeavesTotal, Array1DLeavesTotal, Array1DElephino, Array1DLeavesTotal, Array1DLeavesTotal, Array1DLeavesTotal, Array3D, DatatypeLeavesTotal, DatatypeLeavesTotal]:
6
+ while leaf1ndex > 0:
7
+ if leaf1ndex <= 1 or leafBelow[0] == 1:
8
+ if leaf1ndex > leavesTotal:
9
+ groupsOfFolds += 1
10
+ else:
11
+ dimensionsUnconstrained = dimensionsTotal
12
+ gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
13
+ indexDimension = 0
14
+ while indexDimension < dimensionsTotal:
15
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
16
+ if leafConnectee == leaf1ndex:
17
+ dimensionsUnconstrained -= 1
18
+ else:
19
+ while leafConnectee != leaf1ndex:
20
+ gapsWhere[gap1ndexCeiling] = leafConnectee
21
+ if countDimensionsGapped[leafConnectee] == 0:
22
+ gap1ndexCeiling += 1
23
+ countDimensionsGapped[leafConnectee] += 1
24
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
25
+ indexDimension += 1
26
+ if not dimensionsUnconstrained:
27
+ indexLeaf = 0
28
+ while indexLeaf < leaf1ndex:
29
+ gapsWhere[gap1ndexCeiling] = indexLeaf
30
+ gap1ndexCeiling += 1
31
+ indexLeaf += 1
32
+ indexMiniGap = gap1ndex
33
+ while indexMiniGap < gap1ndexCeiling:
34
+ gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
35
+ if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
36
+ gap1ndex += 1
37
+ countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
38
+ indexMiniGap += 1
39
+ while leaf1ndex > 0 and gap1ndex == gapRangeStart[leaf1ndex - 1]:
40
+ leaf1ndex -= 1
41
+ leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
42
+ leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
43
+ if leaf1ndex > 0:
44
+ gap1ndex -= 1
45
+ leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
46
+ leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
47
+ leafBelow[leafAbove[leaf1ndex]] = leaf1ndex
48
+ leafAbove[leafBelow[leaf1ndex]] = leaf1ndex
49
+ gapRangeStart[leaf1ndex] = gap1ndex
50
+ leaf1ndex += 1
51
+ return (groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)
52
+
53
+ def doTheNeedful(state: MapFoldingState) -> MapFoldingState:
54
+ mapShape: tuple[DatatypeLeavesTotal, ...] = state.mapShape
55
+ groupsOfFolds: DatatypeFoldsTotal = state.groupsOfFolds
56
+ gap1ndex: DatatypeElephino = state.gap1ndex
57
+ gap1ndexCeiling: DatatypeElephino = state.gap1ndexCeiling
58
+ indexDimension: DatatypeLeavesTotal = state.indexDimension
59
+ indexLeaf: DatatypeLeavesTotal = state.indexLeaf
60
+ indexMiniGap: DatatypeElephino = state.indexMiniGap
61
+ leaf1ndex: DatatypeLeavesTotal = state.leaf1ndex
62
+ leafConnectee: DatatypeLeavesTotal = state.leafConnectee
63
+ dimensionsUnconstrained: DatatypeLeavesTotal = state.dimensionsUnconstrained
64
+ countDimensionsGapped: Array1DLeavesTotal = state.countDimensionsGapped
65
+ gapRangeStart: Array1DElephino = state.gapRangeStart
66
+ gapsWhere: Array1DLeavesTotal = state.gapsWhere
67
+ leafAbove: Array1DLeavesTotal = state.leafAbove
68
+ leafBelow: Array1DLeavesTotal = state.leafBelow
69
+ connectionGraph: Array3D = state.connectionGraph
70
+ dimensionsTotal: DatatypeLeavesTotal = state.dimensionsTotal
71
+ leavesTotal: DatatypeLeavesTotal = state.leavesTotal
72
+ groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal = count(groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)
73
+ state = MapFoldingState(mapShape=mapShape, groupsOfFolds=groupsOfFolds, gap1ndex=gap1ndex, gap1ndexCeiling=gap1ndexCeiling, indexDimension=indexDimension, indexLeaf=indexLeaf, indexMiniGap=indexMiniGap, leaf1ndex=leaf1ndex, leafConnectee=leafConnectee, dimensionsUnconstrained=dimensionsUnconstrained, countDimensionsGapped=countDimensionsGapped, gapRangeStart=gapRangeStart, gapsWhere=gapsWhere, leafAbove=leafAbove, leafBelow=leafBelow)
74
+ return state
@@ -20,6 +20,6 @@ def doTheNeedful(state: MapFoldingState) -> MapFoldingState:
20
20
  connectionGraph: Array3D = state.connectionGraph
21
21
  dimensionsTotal: DatatypeLeavesTotal = state.dimensionsTotal
22
22
  leavesTotal: DatatypeLeavesTotal = state.leavesTotal
23
- groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal = count(groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)
23
+ groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal = count(groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)
24
24
  state = MapFoldingState(mapShape=mapShape, groupsOfFolds=groupsOfFolds, gap1ndex=gap1ndex, gap1ndexCeiling=gap1ndexCeiling, indexDimension=indexDimension, indexLeaf=indexLeaf, indexMiniGap=indexMiniGap, leaf1ndex=leaf1ndex, leafConnectee=leafConnectee, dimensionsUnconstrained=dimensionsUnconstrained, countDimensionsGapped=countDimensionsGapped, gapRangeStart=gapRangeStart, gapsWhere=gapsWhere, leafAbove=leafAbove, leafBelow=leafBelow)
25
25
  return state
@@ -2,7 +2,7 @@ from mapFolding.dataBaskets import Array1DElephino, Array1DLeavesTotal, Array3D,
2
2
  from numba import jit
3
3
 
4
4
  @jit(cache=True, error_model='numpy', fastmath=True, forceinline=True)
5
- def count(groupsOfFolds: DatatypeFoldsTotal, gap1ndex: DatatypeElephino, gap1ndexCeiling: DatatypeElephino, indexDimension: DatatypeLeavesTotal, indexLeaf: DatatypeLeavesTotal, indexMiniGap: DatatypeElephino, leaf1ndex: DatatypeLeavesTotal, leafConnectee: DatatypeLeavesTotal, dimensionsUnconstrained: DatatypeLeavesTotal, countDimensionsGapped: Array1DLeavesTotal, gapRangeStart: Array1DElephino, gapsWhere: Array1DLeavesTotal, leafAbove: Array1DLeavesTotal, leafBelow: Array1DLeavesTotal, connectionGraph: Array3D, dimensionsTotal: DatatypeLeavesTotal, leavesTotal: DatatypeLeavesTotal) -> tuple[DatatypeFoldsTotal, DatatypeElephino, DatatypeElephino, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeElephino, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeLeavesTotal, Array1DLeavesTotal, Array1DElephino, Array1DLeavesTotal, Array1DLeavesTotal, Array1DLeavesTotal, Array3D, DatatypeLeavesTotal, DatatypeLeavesTotal]:
5
+ def count(groupsOfFolds: DatatypeFoldsTotal, gap1ndex: DatatypeElephino, gap1ndexCeiling: DatatypeElephino, indexDimension: DatatypeLeavesTotal, indexMiniGap: DatatypeElephino, leaf1ndex: DatatypeLeavesTotal, leafConnectee: DatatypeLeavesTotal, dimensionsUnconstrained: DatatypeLeavesTotal, countDimensionsGapped: Array1DLeavesTotal, gapRangeStart: Array1DElephino, gapsWhere: Array1DLeavesTotal, leafAbove: Array1DLeavesTotal, leafBelow: Array1DLeavesTotal, connectionGraph: Array3D, dimensionsTotal: DatatypeLeavesTotal, leavesTotal: DatatypeLeavesTotal) -> tuple[DatatypeFoldsTotal, DatatypeElephino, DatatypeElephino, DatatypeLeavesTotal, DatatypeElephino, DatatypeLeavesTotal, DatatypeLeavesTotal, DatatypeLeavesTotal, Array1DLeavesTotal, Array1DElephino, Array1DLeavesTotal, Array1DLeavesTotal, Array1DLeavesTotal, Array3D, DatatypeLeavesTotal, DatatypeLeavesTotal]:
6
6
  while leaf1ndex > 4:
7
7
  if leafBelow[0] == 1:
8
8
  if leaf1ndex > leavesTotal:
@@ -23,12 +23,6 @@ def count(groupsOfFolds: DatatypeFoldsTotal, gap1ndex: DatatypeElephino, gap1nde
23
23
  countDimensionsGapped[leafConnectee] += 1
24
24
  leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
25
25
  indexDimension += 1
26
- if not dimensionsUnconstrained:
27
- indexLeaf = 0
28
- while indexLeaf < leaf1ndex:
29
- gapsWhere[gap1ndexCeiling] = indexLeaf
30
- gap1ndexCeiling += 1
31
- indexLeaf += 1
32
26
  indexMiniGap = gap1ndex
33
27
  while indexMiniGap < gap1ndexCeiling:
34
28
  gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
@@ -48,4 +42,4 @@ def count(groupsOfFolds: DatatypeFoldsTotal, gap1ndex: DatatypeElephino, gap1nde
48
42
  gapRangeStart[leaf1ndex] = gap1ndex
49
43
  leaf1ndex += 1
50
44
  groupsOfFolds *= 2
51
- return (groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexLeaf, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)
45
+ return (groupsOfFolds, gap1ndex, gap1ndexCeiling, indexDimension, indexMiniGap, leaf1ndex, leafConnectee, dimensionsUnconstrained, countDimensionsGapped, gapRangeStart, gapsWhere, leafAbove, leafBelow, connectionGraph, dimensionsTotal, leavesTotal)