mapFolding 0.9.2__tar.gz → 0.9.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {mapfolding-0.9.2 → mapfolding-0.9.3}/PKG-INFO +3 -3
  2. {mapfolding-0.9.2 → mapfolding-0.9.3}/README.md +1 -1
  3. mapfolding-0.9.3/mapFolding/Z0Z_flowControl.py +99 -0
  4. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/__init__.py +28 -26
  5. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/basecamp.py +1 -1
  6. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/beDRY.py +1 -2
  7. mapfolding-0.9.3/mapFolding/dataBaskets.py +49 -0
  8. mapfolding-0.9.3/mapFolding/datatypes.py +21 -0
  9. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/oeis.py +1 -2
  10. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/__init__.py +1 -1
  11. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_theTypes.py +1 -1
  12. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_toolboxContainers.py +58 -48
  13. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +7 -7
  14. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/toolboxNumba.py +1 -1
  15. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/transformationTools.py +1 -1
  16. mapfolding-0.9.3/mapFolding/theDaoOfMapFolding.py +142 -0
  17. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/theSSOT.py +13 -21
  18. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/PKG-INFO +3 -3
  19. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/SOURCES.txt +4 -0
  20. {mapfolding-0.9.2 → mapfolding-0.9.3}/pyproject.toml +2 -1
  21. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/__init__.py +2 -2
  22. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/conftest.py +7 -7
  23. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/test_computations.py +15 -13
  24. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/test_tasks.py +2 -2
  25. {mapfolding-0.9.2 → mapfolding-0.9.3}/LICENSE +0 -0
  26. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/py.typed +0 -0
  27. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/__init__.py +0 -0
  28. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/flattened.py +0 -0
  29. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/hunterNumba.py +0 -0
  30. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/irvineJavaPort.py +0 -0
  31. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/jaxCount.py +0 -0
  32. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/jobsCompleted/[2x19]/p2x19.py +0 -0
  33. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/jobsCompleted/__init__.py +0 -0
  34. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/jobsCompleted/p2x19/p2x19.py +0 -0
  35. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/lunnonNumpy.py +0 -0
  36. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/lunnonWhile.py +0 -0
  37. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/rotatedEntryPoint.py +0 -0
  38. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/reference/total_countPlus1vsPlusN.py +0 -0
  39. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/RecipeJob.py +0 -0
  40. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_tool_Make.py +0 -0
  41. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_tool_Then.py +0 -0
  42. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_toolboxAntecedents.py +0 -0
  43. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/_toolboxPython.py +0 -0
  44. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/someAssemblyRequired/getLLVMforNoReason.py +0 -0
  45. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/syntheticModules/__init__.py +0 -0
  46. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/syntheticModules/numbaCount.py +0 -0
  47. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/theDao.py +0 -0
  48. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding/toolboxFilesystem.py +0 -0
  49. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/dependency_links.txt +0 -0
  50. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/entry_points.txt +0 -0
  51. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/requires.txt +0 -0
  52. {mapfolding-0.9.2 → mapfolding-0.9.3}/mapFolding.egg-info/top_level.txt +0 -0
  53. {mapfolding-0.9.2 → mapfolding-0.9.3}/setup.cfg +0 -0
  54. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/test_filesystem.py +0 -0
  55. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/test_oeis.py +0 -0
  56. {mapfolding-0.9.2 → mapfolding-0.9.3}/tests/test_other.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mapFolding
3
- Version: 0.9.2
3
+ Version: 0.9.3
4
4
  Summary: Map folding algorithm with code transformation framework for optimizing numerical computations
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -8,7 +8,7 @@ Project-URL: Donate, https://www.patreon.com/integrated
8
8
  Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
9
9
  Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
10
10
  Project-URL: Issues, https://github.com/hunterhogan/mapFolding/issues
11
- Keywords: A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
11
+ Keywords: A000136,A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Environment :: Console
14
14
  Classifier: Intended Audience :: Developers
@@ -106,7 +106,7 @@ def countFolds_optimized(shape_param):
106
106
 
107
107
  ### 2. Code Generation Framework
108
108
 
109
- Study and extend a complete Python code transformation pipeline:
109
+ Study and extend a complete Python code transformation assembly line:
110
110
 
111
111
  - AST analysis and manipulation
112
112
  - Dataclass decomposition ("shattering")
@@ -53,7 +53,7 @@ def countFolds_optimized(shape_param):
53
53
 
54
54
  ### 2. Code Generation Framework
55
55
 
56
- Study and extend a complete Python code transformation pipeline:
56
+ Study and extend a complete Python code transformation assembly line:
57
57
 
58
58
  - AST analysis and manipulation
59
59
  - Dataclass decomposition ("shattering")
@@ -0,0 +1,99 @@
1
+ from collections.abc import Sequence
2
+ from mapFolding import (
3
+ ComputationState,
4
+ getPathFilenameFoldsTotal,
5
+ outfitCountFolds,
6
+ saveFoldsTotal,
7
+ saveFoldsTotalFAILearly,
8
+ setProcessorLimit,
9
+ The,
10
+ validateListDimensions,
11
+ )
12
+ from os import PathLike
13
+ from pathlib import PurePath
14
+
15
+ def countFolds(listDimensions: Sequence[int] | None = None
16
+ , pathLikeWriteFoldsTotal: PathLike[str] | PurePath | None = None
17
+ , computationDivisions: int | str | None = None
18
+ , CPUlimit: int | float | bool | None = None
19
+ # , * I need to improve `standardizedEqualToCallableReturn` so it will work with keyword arguments
20
+ , mapShape: tuple[int, ...] | None = None
21
+ , oeisID: str | None = None
22
+ , oeis_n: int | None = None
23
+ , flow: str | None = None
24
+ ) -> int:
25
+ """
26
+ To select the execution path, I need at least:
27
+ - mapShape
28
+ - task division instructions
29
+ - memorialization instructions
30
+ """
31
+
32
+ # mapShape =====================================================================
33
+
34
+ if mapShape:
35
+ pass
36
+ else:
37
+ if oeisID and oeis_n:
38
+ from mapFolding.oeis import settingsOEIS
39
+ try:
40
+ mapShape = settingsOEIS[oeisID]['getMapShape'](oeis_n)
41
+ except KeyError:
42
+ pass
43
+ if not mapShape and listDimensions:
44
+ mapShape = validateListDimensions(listDimensions)
45
+
46
+ if mapShape is None:
47
+ raise ValueError(f"""I received these values:
48
+ `{listDimensions = }`,
49
+ `{mapShape = }`,
50
+ `{oeisID = }` and `{oeis_n = }`,
51
+ but I was unable to select a map for which to count the folds.""")
52
+
53
+ # task division instructions ===============================================
54
+
55
+ if computationDivisions:
56
+ # NOTE `The.concurrencyPackage`
57
+ concurrencyLimit: int = setProcessorLimit(CPUlimit, The.concurrencyPackage)
58
+ from mapFolding.beDRY import getLeavesTotal, getTaskDivisions
59
+ leavesTotal: int = getLeavesTotal(mapShape)
60
+ taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
61
+ del leavesTotal
62
+ else:
63
+ concurrencyLimit = 1
64
+ taskDivisions = 0
65
+
66
+ # memorialization instructions ===========================================
67
+
68
+ if pathLikeWriteFoldsTotal is not None:
69
+ pathFilenameFoldsTotal = getPathFilenameFoldsTotal(mapShape, pathLikeWriteFoldsTotal)
70
+ saveFoldsTotalFAILearly(pathFilenameFoldsTotal)
71
+ else:
72
+ pathFilenameFoldsTotal = None
73
+
74
+ # Flow control until I can figure out a good way ===============================
75
+
76
+ if flow == 'theDaoOfMapFolding':
77
+ from mapFolding.dataBaskets import MapFoldingState
78
+ mapFoldingState: MapFoldingState = MapFoldingState(mapShape)
79
+
80
+ from mapFolding.theDaoOfMapFolding import doTheNeedful
81
+ mapFoldingState = doTheNeedful(mapFoldingState)
82
+ foldsTotal = mapFoldingState.foldsTotal
83
+
84
+ # NOTE treat this as a default?
85
+ # flow based on `The` and `ComputationState` ====================================
86
+
87
+ else:
88
+ computationStateInitialized: ComputationState = outfitCountFolds(mapShape, computationDivisions, concurrencyLimit)
89
+ computationStateComplete: ComputationState = The.dispatcher(computationStateInitialized)
90
+
91
+ computationStateComplete.getFoldsTotal()
92
+ foldsTotal = computationStateComplete.foldsTotal
93
+
94
+ # Follow memorialization instructions ===========================================
95
+
96
+ if pathFilenameFoldsTotal is not None:
97
+ saveFoldsTotal(pathFilenameFoldsTotal, foldsTotal)
98
+
99
+ return foldsTotal
@@ -1,29 +1,27 @@
1
1
  """
2
2
  Map folding enumeration and counting algorithms with advanced optimization capabilities.
3
3
 
4
- This package implements algorithms to count and enumerate the distinct ways
5
- a rectangular map can be folded, based on the mathematical problem described
6
- in Lunnon's 1971 paper. It provides multiple layers of functionality, from
7
- high-level user interfaces to sophisticated algorithmic optimizations and code
8
- transformation tools.
4
+ This package implements algorithms to count and enumerate the distinct ways a rectangular map can be folded, based on
5
+ the mathematical problem described in Lunnon's 1971 paper. It provides multiple layers of functionality, from high-level
6
+ user interfaces to sophisticated algorithmic optimizations and code transformation tools.
9
7
 
10
8
  Core modules:
11
9
  - basecamp: Public API with simplified interfaces for end users
12
10
  - theDao: Core computational algorithm using a functional state-transformation approach
13
- - beDRY: Core utility functions implementing consistent data handling, validation, and
14
- resource management across the package's computational assembly-line
11
+ - beDRY: Core utility functions implementing consistent data handling, validation, and resource management across the
12
+ package's computational assembly-line
15
13
  - theSSOT: Single Source of Truth for configuration, types, and state management
16
- - toolboxFilesystem: Cross-platform file management services for storing and retrieving
17
- computation results with robust error handling and fallback mechanisms
14
+ - toolboxFilesystem: Cross-platform file management services for storing and retrieving computation results with robust
15
+ error handling and fallback mechanisms
18
16
  - oeis: Interface to the Online Encyclopedia of Integer Sequences for known results
19
17
 
20
18
  Extended functionality:
21
- - someAssemblyRequired: Code transformation framework that optimizes the core algorithm
22
- through AST manipulation, dataclass transformation, and compilation techniques
23
- - The system converts readable code into high-performance implementations through
24
- a systematic analysis and transformation pipeline
25
- - Provides tools to "shatter" complex dataclasses into primitive components,
26
- enabling compatibility with Numba and other optimization frameworks
19
+ - someAssemblyRequired: Code transformation framework that optimizes the core algorithm through AST manipulation,
20
+ dataclass transformation, and compilation techniques
21
+ - The system converts readable code into high-performance implementations through a systematic analysis and
22
+ transformation assembly line
23
+ - Provides tools to "shatter" complex dataclasses into primitive components, enabling compatibility with Numba and
24
+ other optimization frameworks
27
25
  - Creates specialized implementations tailored for specific input parameters
28
26
 
29
27
  Testing and extension:
@@ -35,24 +33,22 @@ Testing and extension:
35
33
 
36
34
  Special directories:
37
35
  - .cache/: Stores cached data from external sources like OEIS to improve performance
38
- - syntheticModules/: Contains dynamically generated, optimized implementations of the
39
- core algorithm created by the code transformation framework
36
+ - syntheticModules/: Contains dynamically generated, optimized implementations of the core algorithm created by the code
37
+ transformation framework
40
38
  - reference/: Historical implementations and educational resources for algorithm exploration
41
- - reference/jobsCompleted/: Contains successful computations for previously unknown values,
42
- including first-ever calculations for 2x19 and 2x20 maps (OEIS A001415)
39
+ - reference/jobsCompleted/: Contains successful computations for previously unknown values, including first-ever
40
+ calculations for 2x19 and 2x20 maps (OEIS A001415)
43
41
 
44
- This package balances algorithm readability and understandability with
45
- high-performance computation capabilities, allowing users to compute map folding
46
- totals for larger dimensions than previously feasible while also providing
47
- a foundation for exploring advanced code transformation techniques.
42
+ This package balances algorithm readability and understandability with high-performance computation capabilities,
43
+ allowing users to compute map folding totals for larger dimensions than previously feasible while also providing a
44
+ foundation for exploring advanced code transformation techniques.
48
45
  """
49
46
 
50
- from mapFolding.theSSOT import (
47
+ from mapFolding.datatypes import (
51
48
  Array1DElephino as Array1DElephino,
52
49
  Array1DFoldsTotal as Array1DFoldsTotal,
53
50
  Array1DLeavesTotal as Array1DLeavesTotal,
54
51
  Array3D as Array3D,
55
- ComputationState as ComputationState,
56
52
  DatatypeElephino as DatatypeElephino,
57
53
  DatatypeFoldsTotal as DatatypeFoldsTotal,
58
54
  DatatypeLeavesTotal as DatatypeLeavesTotal,
@@ -60,6 +56,10 @@ from mapFolding.theSSOT import (
60
56
  NumPyFoldsTotal as NumPyFoldsTotal,
61
57
  NumPyIntegerType as NumPyIntegerType,
62
58
  NumPyLeavesTotal as NumPyLeavesTotal,
59
+ )
60
+
61
+ from mapFolding.theSSOT import (
62
+ ComputationState as ComputationState,
63
63
  raiseIfNoneGitHubIssueNumber3 as raiseIfNoneGitHubIssueNumber3,
64
64
  The as The,
65
65
  )
@@ -70,6 +70,8 @@ from mapFolding.theDao import (
70
70
  )
71
71
 
72
72
  from mapFolding.beDRY import (
73
+ getLeavesTotal as getLeavesTotal,
74
+ getTaskDivisions as getTaskDivisions,
73
75
  outfitCountFolds as outfitCountFolds,
74
76
  setProcessorLimit as setProcessorLimit,
75
77
  validateListDimensions as validateListDimensions,
@@ -83,7 +85,7 @@ from mapFolding.toolboxFilesystem import (
83
85
  writeStringToHere as writeStringToHere,
84
86
  )
85
87
 
86
- from mapFolding.basecamp import countFolds as countFolds
88
+ from mapFolding.Z0Z_flowControl import countFolds
87
89
 
88
90
  from mapFolding.oeis import (
89
91
  clearOEIScache as clearOEIScache,
@@ -63,7 +63,7 @@ def countFolds(listDimensions: Sequence[int]
63
63
 
64
64
  Note well
65
65
  ---------
66
- You probably don't want to divide the computation into tasks.
66
+ You probably do not want to divide your computation into tasks.
67
67
 
68
68
  If you want to compute a large `foldsTotal`, dividing the computation into tasks is usually a bad idea. Dividing the
69
69
  algorithm into tasks is inherently inefficient: efficient division into tasks means there would be no overlap in the
@@ -29,8 +29,7 @@ def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
29
29
  """
30
30
  Calculate the total number of leaves in a map with the given dimensions.
31
31
 
32
- The total number of leaves is the product of all dimensions in the map shape. This value is foundational for
33
- initializing the computation state and determining task divisions.
32
+ The total number of leaves is the product of all dimensions in the map shape.
34
33
 
35
34
  Parameters
36
35
  ----------
@@ -0,0 +1,49 @@
1
+ from mapFolding.beDRY import getConnectionGraph, getLeavesTotal, makeDataContainer
2
+ from mapFolding.datatypes import Array3D, Array1DElephino, Array1DLeavesTotal, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal
3
+ import dataclasses
4
+
5
+ @dataclasses.dataclass
6
+ class MapFoldingState:
7
+ mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'})
8
+
9
+ groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})
10
+
11
+ gap1ndex: DatatypeElephino = DatatypeElephino(0)
12
+ gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0)
13
+ indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
14
+ indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
15
+ indexMiniGap: DatatypeElephino = DatatypeElephino(0)
16
+ leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1)
17
+ leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
18
+
19
+ dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
20
+
21
+ countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
22
+ gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DElephino.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
23
+ gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
24
+ leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
25
+ leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
26
+
27
+ connectionGraph: Array3D = dataclasses.field(init=False, metadata={'dtype': Array3D.__args__[1].__args__[0]}) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
28
+ dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
29
+ leavesTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
30
+
31
+ @property
32
+ def foldsTotal(self) -> DatatypeFoldsTotal:
33
+ _foldsTotal = DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
34
+ return _foldsTotal
35
+
36
+ def __post_init__(self) -> None:
37
+ self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
38
+ self.leavesTotal = DatatypeLeavesTotal(getLeavesTotal(self.mapShape))
39
+
40
+ leavesTotalAsInt = int(self.leavesTotal)
41
+
42
+ self.connectionGraph = getConnectionGraph(self.mapShape, leavesTotalAsInt, self.__dataclass_fields__['connectionGraph'].metadata['dtype'])
43
+
44
+ if self.dimensionsUnconstrained is None: self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal)) # pyright: ignore[reportUnnecessaryComparison]
45
+ if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
46
+ if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
47
+ if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
48
+ if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
49
+ if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
@@ -0,0 +1,21 @@
1
+ from numpy import dtype, int64 as numpy_int64, integer, ndarray
2
+ from typing import Any, TypeAlias, TypeVar
3
+
4
+ # =============================================================================
5
+ # Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
6
+
7
+ NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
8
+
9
+ DatatypeLeavesTotal: TypeAlias = int
10
+ NumPyLeavesTotal: TypeAlias = numpy_int64
11
+
12
+ DatatypeElephino: TypeAlias = int
13
+ NumPyElephino: TypeAlias = numpy_int64
14
+
15
+ DatatypeFoldsTotal: TypeAlias = int
16
+ NumPyFoldsTotal: TypeAlias = numpy_int64
17
+
18
+ Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]]
19
+ Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyLeavesTotal]]
20
+ Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]]
21
+ Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]]
@@ -20,7 +20,7 @@ mathematical definition in OEIS and the computational implementation in the pack
20
20
  from collections.abc import Callable
21
21
  from datetime import datetime, timedelta
22
22
  from functools import cache
23
- from mapFolding import writeStringToHere, The
23
+ from mapFolding import countFolds, The, writeStringToHere
24
24
  from pathlib import Path
25
25
  from typing import Any, Final, TYPE_CHECKING
26
26
  import argparse
@@ -401,7 +401,6 @@ def oeisIDfor_n(oeisID: str, n: int | Any) -> int:
401
401
  raise ArithmeticError(f"OEIS sequence {oeisID} is not defined at {n = }.")
402
402
  foldsTotal: int = settingsOEIS[oeisID]['valuesKnown'][n]
403
403
  return foldsTotal
404
- from mapFolding.basecamp import countFolds
405
404
  return countFolds(mapShape)
406
405
 
407
406
  def OEIS_for_n() -> None:
@@ -22,7 +22,7 @@ functional implementations into highly-optimized variants with verified correctn
22
22
  - Recipe configuration for generating optimized code (RecipeSynthesizeFlow)
23
23
  - Dataclass decomposition for compatibility (ShatteredDataclass)
24
24
 
25
- 3. **Optimization Pipelines**
25
+ 3. **Optimization assembly lines**
26
26
  - General-purpose Numba acceleration (makeNumbaFlow)
27
27
  - Job-specific optimization for concrete parameters (makeJobNumba)
28
28
  - Specialized component transformation (decorateCallableWithNumba)
@@ -15,7 +15,7 @@ else:
15
15
 
16
16
  class ImaCallToName(ast.Call):
17
17
  func: ast.Name # pyright: ignore[reportIncompatibleVariableOverride]
18
- # assert isinstance(ast.Call.func, ast.Name), "brinksmanship"
18
+ # assert isinstance(ast.Call.func, ast.Name), "brinkmanship"
19
19
  # func: ast.Name
20
20
 
21
21
  astClassHasDOTtargetAttributeNameSubscript: typing_TypeAlias = ast.AnnAssign | ast.AugAssign
@@ -1,23 +1,21 @@
1
1
  """
2
2
  AST Container Classes for Python Code Generation and Transformation
3
3
 
4
- This module provides specialized container classes that organize AST nodes, imports,
5
- and program structure for code generation and transformation. These classes form
6
- the organizational backbone of the code generation system, enabling:
7
-
8
- 1. Tracking and managing imports with LedgerOfImports
9
- 2. Packaging function definitions with their dependencies via IngredientsFunction
10
- 3. Structuring complete modules with IngredientsModule
11
- 4. Configuring code synthesis with RecipeSynthesizeFlow
12
- 5. Organizing decomposed dataclass representations with ShatteredDataclass
13
-
14
- Together, these container classes implement a component-based architecture for
15
- programmatic generation of high-performance code. They maintain a clean separation
16
- between structure and content, allowing transformations to be applied systematically
17
- while preserving relationships between code elements.
18
-
19
- The containers work in conjunction with transformation tools that manipulate the
20
- contained AST nodes to implement specific optimizations and transformations.
4
+ This module provides specialized container classes that organize AST nodes, imports, and program structure for code
5
+ generation and transformation. These classes form the organizational backbone of the code generation system, enabling:
6
+
7
+ 1. Tracking and managing imports with LedgerOfImports.
8
+ 2. Packaging function definitions with their dependencies via IngredientsFunction.
9
+ 3. Structuring complete modules with IngredientsModule.
10
+ 4. Configuring code synthesis with RecipeSynthesizeFlow.
11
+ 5. Organizing decomposed dataclass representations with ShatteredDataclass.
12
+
13
+ Together, these container classes implement a component-based architecture for programmatic generation of
14
+ high-performance code. They maintain a clean separation between structure and content, allowing transformations to be
15
+ applied systematically while preserving relationships between code elements.
16
+
17
+ The containers work in conjunction with transformation tools that manipulate the contained AST nodes to implement
18
+ specific optimizations and transformations.
21
19
  """
22
20
 
23
21
  from collections import defaultdict
@@ -33,30 +31,31 @@ class LedgerOfImports:
33
31
  """
34
32
  Track and manage import statements for programmatically generated code.
35
33
 
36
- LedgerOfImports acts as a registry for import statements, maintaining a clean
37
- separation between the logical structure of imports and their textual representation.
34
+ LedgerOfImports acts as a registry for import statements, maintaining a clean separation between the logical
35
+ structure of imports and their textual representation.
38
36
  It enables:
39
37
 
40
- 1. Tracking regular imports and import-from statements
41
- 2. Adding imports programmatically during code transformation
42
- 3. Merging imports from multiple sources
43
- 4. Removing unnecessary or conflicting imports
44
- 5. Generating optimized AST import nodes for the final code
38
+ 1. Tracking regular imports and import-from statements.
39
+ 2. Adding imports programmatically during code transformation.
40
+ 3. Merging imports from multiple sources.
41
+ 4. Removing unnecessary or conflicting imports.
42
+ 5. Generating optimized AST import nodes for the final code.
45
43
 
46
- This class forms the foundation of dependency management in generated code,
47
- ensuring that all required libraries are available without duplication or
48
- conflict.
44
+ This class forms the foundation of dependency management in generated code, ensuring that all required libraries are
45
+ available without duplication or conflict.
49
46
  """
50
47
  # TODO When resolving the ledger of imports, remove self-referential imports
51
- # TODO add TypeIgnore tracking to the ledger of imports
52
48
 
53
- def __init__(self, startWith: ast.AST | None = None) -> None:
49
+ type_ignores: list[ast.TypeIgnore]
50
+
51
+ def __init__(self, startWith: ast.AST | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
54
52
  self.dictionaryImportFrom: dict[str_nameDOTname, list[tuple[ast_Identifier, ast_Identifier | None]]] = defaultdict(list)
55
53
  self.listImport: list[str_nameDOTname] = []
54
+ self.type_ignores = [] if type_ignores is None else list(type_ignores)
56
55
  if startWith:
57
56
  self.walkThis(startWith)
58
57
 
59
- def addAst(self, astImport____: ast.Import | ast.ImportFrom) -> None:
58
+ def addAst(self, astImport____: ast.Import | ast.ImportFrom, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
60
59
  match astImport____:
61
60
  case ast.Import():
62
61
  for alias in astImport____.names:
@@ -69,14 +68,20 @@ class LedgerOfImports:
69
68
  self.dictionaryImportFrom[astImport____.module].append((alias.name, alias.asname))
70
69
  case _:
71
70
  raise ValueError(f"I received {type(astImport____) = }, but I can only accept {ast.Import} and {ast.ImportFrom}.")
71
+ if type_ignores:
72
+ self.type_ignores.extend(type_ignores)
72
73
 
73
- def addImport_asStr(self, moduleWithLogicalPath: str_nameDOTname) -> None:
74
+ def addImport_asStr(self, moduleWithLogicalPath: str_nameDOTname, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
74
75
  self.listImport.append(moduleWithLogicalPath)
76
+ if type_ignores:
77
+ self.type_ignores.extend(type_ignores)
75
78
 
76
- def addImportFrom_asStr(self, moduleWithLogicalPath: str_nameDOTname, name: ast_Identifier, asname: ast_Identifier | None = None) -> None:
79
+ def addImportFrom_asStr(self, moduleWithLogicalPath: str_nameDOTname, name: ast_Identifier, asname: ast_Identifier | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
77
80
  if moduleWithLogicalPath not in self.dictionaryImportFrom:
78
81
  self.dictionaryImportFrom[moduleWithLogicalPath] = []
79
82
  self.dictionaryImportFrom[moduleWithLogicalPath].append((name, asname))
83
+ if type_ignores:
84
+ self.type_ignores.extend(type_ignores)
80
85
 
81
86
  def removeImportFromModule(self, moduleWithLogicalPath: str_nameDOTname) -> None:
82
87
  """Remove all imports from a specific module."""
@@ -129,11 +134,14 @@ class LedgerOfImports:
129
134
  self.dictionaryImportFrom = updateExtendPolishDictionaryLists(self.dictionaryImportFrom, *(ledger.dictionaryImportFrom for ledger in fromLedger), destroyDuplicates=True, reorderLists=True)
130
135
  for ledger in fromLedger:
131
136
  self.listImport.extend(ledger.listImport)
137
+ self.type_ignores.extend(ledger.type_ignores)
132
138
 
133
- def walkThis(self, walkThis: ast.AST) -> None:
139
+ def walkThis(self, walkThis: ast.AST, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
134
140
  for nodeBuffalo in ast.walk(walkThis):
135
141
  if isinstance(nodeBuffalo, (ast.Import, ast.ImportFrom)):
136
142
  self.addAst(nodeBuffalo)
143
+ if type_ignores:
144
+ self.type_ignores.extend(type_ignores)
137
145
 
138
146
  # Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
139
147
  @dataclasses.dataclass
@@ -141,17 +149,16 @@ class IngredientsFunction:
141
149
  """
142
150
  Package a function definition with its import dependencies for code generation.
143
151
 
144
- IngredientsFunction encapsulates an AST function definition along with all the
145
- imports required for that function to operate correctly. This creates a modular,
146
- portable unit that can be:
152
+ IngredientsFunction encapsulates an AST function definition along with all the imports required for that function to
153
+ operate correctly. This creates a modular, portable unit that can be:
147
154
 
148
- 1. Transformed independently (e.g., by applying Numba decorators)
149
- 2. Transplanted between modules while maintaining dependencies
150
- 3. Combined with other functions to form complete modules
151
- 4. Analyzed for optimization opportunities
155
+ 1. Transformed independently (e.g., by applying Numba decorators).
156
+ 2. Transplanted between modules while maintaining dependencies.
157
+ 3. Combined with other functions to form complete modules.
158
+ 4. Analyzed for optimization opportunities.
152
159
 
153
- This class forms the primary unit of function manipulation in the code generation
154
- system, enabling targeted transformations while preserving function dependencies.
160
+ This class forms the primary unit of function manipulation in the code generation system, enabling targeted
161
+ transformations while preserving function dependencies.
155
162
 
156
163
  Parameters:
157
164
  astFunctionDef: The AST representation of the function definition
@@ -266,15 +273,18 @@ class IngredientsModule:
266
273
  for ingredientsFunction in self.listIngredientsFunctions:
267
274
  ingredientsFunction.imports.removeImportFrom(moduleWithLogicalPath, name, asname)
268
275
 
269
- @property
270
- def list_astImportImportFrom(self) -> list[ast.Import | ast.ImportFrom]:
271
- """List of `ast.Import` and `ast.ImportFrom` statements."""
276
+ def _consolidatedLedger(self) -> LedgerOfImports:
277
+ """Consolidate all ledgers of imports."""
272
278
  sherpaLedger = LedgerOfImports()
273
279
  listLedgers: list[LedgerOfImports] = [self.imports]
274
280
  for ingredientsFunction in self.listIngredientsFunctions:
275
281
  listLedgers.append(ingredientsFunction.imports)
276
282
  sherpaLedger.update(*listLedgers)
277
- return sherpaLedger.makeList_ast()
283
+ return sherpaLedger
284
+
285
+ @property
286
+ def list_astImportImportFrom(self) -> list[ast.Import | ast.ImportFrom]:
287
+ return self._consolidatedLedger().makeList_ast()
278
288
 
279
289
  @property
280
290
  def body(self) -> list[ast.stmt]:
@@ -291,7 +301,7 @@ class IngredientsModule:
291
301
  @property
292
302
  def type_ignores(self) -> list[ast.TypeIgnore]:
293
303
  listTypeIgnore: list[ast.TypeIgnore] = self.supplemental_type_ignores
294
- # listTypeIgnore.extend(self.imports.makeListAst())
304
+ listTypeIgnore.extend(self._consolidatedLedger().type_ignores)
295
305
  listTypeIgnore.extend(self.prologue.type_ignores)
296
306
  for ingredientsFunction in self.listIngredientsFunctions:
297
307
  listTypeIgnore.extend(ingredientsFunction.type_ignores)
@@ -316,7 +326,7 @@ class RecipeSynthesizeFlow:
316
326
 
317
327
  This configuration class serves as a single source of truth for the code generation
318
328
  process, ensuring consistency across all generated artifacts while enabling
319
- customization of the transformation pipeline.
329
+ customization of the transformation assembly line.
320
330
 
321
331
  The transformation process uses this configuration to extract functions from the
322
332
  source module, transform them according to optimization rules, and output
@@ -6,7 +6,7 @@ for specific map folding calculation jobs. Unlike the general-purpose transforma
6
6
  in toolboxNumba.py, this module creates standalone Python modules optimized for a
7
7
  single map shape with statically-encoded parameters.
8
8
 
9
- The code generation pipeline focuses on:
9
+ The code generation assembly line focuses on:
10
10
 
11
11
  1. Converting function parameters to initialized variables with concrete values.
12
12
  2. Replacing dynamic computations with statically-known values.
@@ -175,7 +175,7 @@ def makeJobNumba(job: RecipeJob, spices: SpicesJobNumba) -> None:
175
175
  """
176
176
  Generate a highly-optimized, single-purpose Numba module for a specific map shape.
177
177
 
178
- This function implements the complete transformation pipeline for creating a
178
+ This function implements the complete transformation assembly line for creating a
179
179
  standalone, specialized implementation for calculating map folding solutions for
180
180
  a specific shape. The process includes:
181
181
 
@@ -245,9 +245,9 @@ if __name__ == '__main__':
245
245
  Z0Z_asname: ast_Identifier | None = None
246
246
 
247
247
  listDatatypeConfigs = [
248
- DatatypeConfig(fml='DatatypeLeavesTotal', Z0Z_module='numba', Z0Z_type_name='uint16'),
248
+ DatatypeConfig(fml='DatatypeLeavesTotal', Z0Z_module='numba', Z0Z_type_name='uint8'),
249
249
  DatatypeConfig(fml='DatatypeElephino', Z0Z_module='numba', Z0Z_type_name='uint16'),
250
- DatatypeConfig(fml='DatatypeFoldsTotal', Z0Z_module='numba', Z0Z_type_name='int64'),
250
+ DatatypeConfig(fml='DatatypeFoldsTotal', Z0Z_module='numba', Z0Z_type_name='uint64'),
251
251
  ]
252
252
 
253
253
  for datatypeConfig in listDatatypeConfigs:
@@ -261,9 +261,9 @@ if __name__ == '__main__':
261
261
  ingredientsCount.imports.removeImportFromModule('mapFolding.theSSOT')
262
262
 
263
263
  listNumPyTypeConfigs = [
264
- DatatypeConfig(fml='Array1DLeavesTotal', Z0Z_module='numpy', Z0Z_type_name='uint16', Z0Z_asname='Array1DLeavesTotal'),
264
+ DatatypeConfig(fml='Array1DLeavesTotal', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array1DLeavesTotal'),
265
265
  DatatypeConfig(fml='Array1DElephino', Z0Z_module='numpy', Z0Z_type_name='uint16', Z0Z_asname='Array1DElephino'),
266
- DatatypeConfig(fml='Array3D', Z0Z_module='numpy', Z0Z_type_name='uint16', Z0Z_asname='Array3D'),
266
+ DatatypeConfig(fml='Array3D', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array3D'),
267
267
  ]
268
268
 
269
269
  for typeConfig in listNumPyTypeConfigs:
@@ -299,7 +299,7 @@ if __name__ == '__main__':
299
299
  """
300
300
 
301
301
  if __name__ == '__main__':
302
- mapShape = (2,2,2,2,2,2,2,2)
302
+ mapShape = (2,21)
303
303
  state = makeInitializedComputationState(mapShape)
304
304
  # foldsTotalEstimated = getFoldsTotalKnown(state.mapShape) // state.leavesTotal
305
305
  foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
@@ -158,7 +158,7 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow) -> None:
158
158
  """
159
159
  Transform standard Python algorithm code into optimized Numba implementations.
160
160
 
161
- This function implements the complete transformation pipeline that converts
161
+ This function implements the complete transformation assembly line that converts
162
162
  a conventional Python implementation into a high-performance Numba-accelerated
163
163
  version. The process includes:
164
164
 
@@ -424,7 +424,7 @@ def write_astModule(ingredients: IngredientsModule, pathFilename: PathLike[Any]
424
424
  4. Optimizes imports using autoflake
425
425
  5. Writes the final source code to the specified file location
426
426
 
427
- This is typically the final step in the code generation pipeline,
427
+ This is typically the final step in the code generation assembly line,
428
428
  producing optimized Python modules ready for execution.
429
429
 
430
430
  Parameters:
@@ -0,0 +1,142 @@
1
+ from mapFolding.dataBaskets import MapFoldingState
2
+
3
+ def activeLeafGreaterThan0(state: MapFoldingState) -> bool:
4
+ return state.leaf1ndex > 0
5
+
6
+ def activeLeafGreaterThanLeavesTotal(state: MapFoldingState) -> bool:
7
+ return state.leaf1ndex > state.leavesTotal
8
+
9
+ def activeLeafIsTheFirstLeaf(state: MapFoldingState) -> bool:
10
+ return state.leaf1ndex <= 1
11
+
12
+ def activeLeafIsUnconstrainedInAllDimensions(state: MapFoldingState) -> bool:
13
+ return not state.dimensionsUnconstrained
14
+
15
+ def activeLeafUnconstrainedInThisDimension(state: MapFoldingState) -> MapFoldingState:
16
+ state.dimensionsUnconstrained -= 1
17
+ return state
18
+
19
+ def filterCommonGaps(state: MapFoldingState) -> MapFoldingState:
20
+ state.gapsWhere[state.gap1ndex] = state.gapsWhere[state.indexMiniGap]
21
+ if state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] == state.dimensionsUnconstrained:
22
+ state = incrementActiveGap(state)
23
+ state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] = 0
24
+ return state
25
+
26
+ def gapAvailable(state: MapFoldingState) -> bool:
27
+ return state.leaf1ndex > 0
28
+
29
+ def incrementActiveGap(state: MapFoldingState) -> MapFoldingState:
30
+ state.gap1ndex += 1
31
+ return state
32
+
33
+ def incrementGap1ndexCeiling(state: MapFoldingState) -> MapFoldingState:
34
+ state.gap1ndexCeiling += 1
35
+ return state
36
+
37
+ def incrementIndexMiniGap(state: MapFoldingState) -> MapFoldingState:
38
+ state.indexMiniGap += 1
39
+ return state
40
+
41
+ def initializeIndexMiniGap(state: MapFoldingState) -> MapFoldingState:
42
+ state.indexMiniGap = state.gap1ndex
43
+ return state
44
+
45
+ def initializeVariablesToFindGaps(state: MapFoldingState) -> MapFoldingState:
46
+ state.dimensionsUnconstrained = state.dimensionsTotal
47
+ state.gap1ndexCeiling = state.gapRangeStart[state.leaf1ndex - 1]
48
+ state.indexDimension = 0
49
+ return state
50
+
51
+ def insertActiveLeaf(state: MapFoldingState) -> MapFoldingState:
52
+ state.indexLeaf = 0
53
+ while state.indexLeaf < state.leaf1ndex:
54
+ state.gapsWhere[state.gap1ndexCeiling] = state.indexLeaf
55
+ state.gap1ndexCeiling += 1
56
+ state.indexLeaf += 1
57
+ return state
58
+
59
+ def insertActiveLeafAtGap(state: MapFoldingState) -> MapFoldingState:
60
+ state.gap1ndex -= 1
61
+ state.leafAbove[state.leaf1ndex] = state.gapsWhere[state.gap1ndex]
62
+ state.leafBelow[state.leaf1ndex] = state.leafBelow[state.leafAbove[state.leaf1ndex]]
63
+ state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leaf1ndex
64
+ state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leaf1ndex
65
+ state.gapRangeStart[state.leaf1ndex] = state.gap1ndex
66
+ state.leaf1ndex += 1
67
+ return state
68
+
69
+ def leafBelowSentinelIs1(state: MapFoldingState) -> bool:
70
+ return state.leafBelow[0] == 1
71
+
72
+ def leafConnecteeIsActiveLeaf(state: MapFoldingState) -> bool:
73
+ return state.leafConnectee == state.leaf1ndex
74
+
75
+ def lookForGaps(state: MapFoldingState) -> MapFoldingState:
76
+ state.gapsWhere[state.gap1ndexCeiling] = state.leafConnectee
77
+ if state.countDimensionsGapped[state.leafConnectee] == 0:
78
+ state = incrementGap1ndexCeiling(state)
79
+ state.countDimensionsGapped[state.leafConnectee] += 1
80
+ return state
81
+
82
+ def lookupLeafConnecteeInConnectionGraph(state: MapFoldingState) -> MapFoldingState:
83
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leaf1ndex]
84
+ return state
85
+
86
+ def loopingLeavesConnectedToActiveLeaf(state: MapFoldingState) -> bool:
87
+ return state.leafConnectee != state.leaf1ndex
88
+
89
+ def loopingThroughTheDimensions(state: MapFoldingState) -> bool:
90
+ return state.indexDimension < state.dimensionsTotal
91
+
92
+ def loopingToActiveGapCeiling(state: MapFoldingState) -> bool:
93
+ return state.indexMiniGap < state.gap1ndexCeiling
94
+
95
+ def noGapsHere(state: MapFoldingState) -> bool:
96
+ return (state.leaf1ndex > 0) and (state.gap1ndex == state.gapRangeStart[state.leaf1ndex - 1])
97
+
98
+ def tryAnotherLeafConnectee(state: MapFoldingState) -> MapFoldingState:
99
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leafBelow[state.leafConnectee]]
100
+ return state
101
+
102
+ def tryNextDimension(state: MapFoldingState) -> MapFoldingState:
103
+ state.indexDimension += 1
104
+ return state
105
+
106
+ def undoLastLeafPlacement(state: MapFoldingState) -> MapFoldingState:
107
+ state.leaf1ndex -= 1
108
+ state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leafBelow[state.leaf1ndex]
109
+ state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leafAbove[state.leaf1ndex]
110
+ return state
111
+
112
+ def count(state: MapFoldingState) -> MapFoldingState:
113
+ while activeLeafGreaterThan0(state):
114
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
115
+ if activeLeafGreaterThanLeavesTotal(state):
116
+ state.groupsOfFolds += 1
117
+ else:
118
+ state = initializeVariablesToFindGaps(state)
119
+ while loopingThroughTheDimensions(state):
120
+ state = lookupLeafConnecteeInConnectionGraph(state)
121
+ if leafConnecteeIsActiveLeaf(state):
122
+ state = activeLeafUnconstrainedInThisDimension(state)
123
+ else:
124
+ while loopingLeavesConnectedToActiveLeaf(state):
125
+ state = lookForGaps(state)
126
+ state = tryAnotherLeafConnectee(state)
127
+ state = tryNextDimension(state)
128
+ if activeLeafIsUnconstrainedInAllDimensions(state):
129
+ state = insertActiveLeaf(state)
130
+ state = initializeIndexMiniGap(state)
131
+ while loopingToActiveGapCeiling(state):
132
+ state = filterCommonGaps(state)
133
+ state = incrementIndexMiniGap(state)
134
+ while noGapsHere(state):
135
+ state = undoLastLeafPlacement(state)
136
+ if gapAvailable(state):
137
+ state = insertActiveLeafAtGap(state)
138
+ return state
139
+
140
+ def doTheNeedful(state: MapFoldingState) -> MapFoldingState:
141
+ state = count(state)
142
+ return state
@@ -19,12 +19,23 @@ collisions when transforming algorithms.
19
19
  from collections.abc import Callable
20
20
  from importlib import import_module as importlib_import_module
21
21
  from inspect import getfile as inspect_getfile
22
- from numpy import dtype, int64 as numpy_int64, integer, ndarray
23
22
  from pathlib import Path
24
23
  from tomli import load as tomli_load
25
24
  from types import ModuleType
26
- from typing import Any, TypeAlias, TypeVar
27
25
  import dataclasses
26
+ from mapFolding.datatypes import (
27
+ Array1DElephino as Array1DElephino,
28
+ Array1DFoldsTotal as Array1DFoldsTotal,
29
+ Array1DLeavesTotal as Array1DLeavesTotal,
30
+ Array3D as Array3D,
31
+ DatatypeElephino as DatatypeElephino,
32
+ DatatypeFoldsTotal as DatatypeFoldsTotal,
33
+ DatatypeLeavesTotal as DatatypeLeavesTotal,
34
+ NumPyElephino as NumPyElephino,
35
+ NumPyFoldsTotal as NumPyFoldsTotal,
36
+ NumPyIntegerType as NumPyIntegerType,
37
+ NumPyLeavesTotal as NumPyLeavesTotal,
38
+ )
28
39
 
29
40
  # Evaluate When Packaging https://github.com/hunterhogan/mapFolding/issues/18
30
41
  try:
@@ -144,25 +155,6 @@ class PackageSettings:
144
155
 
145
156
  The = PackageSettings(logicalPathModuleDispatcher=logicalPathModuleDispatcherHARDCODED, callableDispatcher=callableDispatcherHARDCODED, concurrencyPackage=concurrencyPackageHARDCODED)
146
157
 
147
- # =============================================================================
148
- # Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
149
-
150
- NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
151
-
152
- DatatypeLeavesTotal: TypeAlias = int
153
- NumPyLeavesTotal: TypeAlias = numpy_int64
154
-
155
- DatatypeElephino: TypeAlias = int
156
- NumPyElephino: TypeAlias = numpy_int64
157
-
158
- DatatypeFoldsTotal: TypeAlias = int
159
- NumPyFoldsTotal: TypeAlias = numpy_int64
160
-
161
- Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]]
162
- Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyLeavesTotal]]
163
- Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]]
164
- Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]]
165
-
166
158
  @dataclasses.dataclass
167
159
  class ComputationState:
168
160
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mapFolding
3
- Version: 0.9.2
3
+ Version: 0.9.3
4
4
  Summary: Map folding algorithm with code transformation framework for optimizing numerical computations
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -8,7 +8,7 @@ Project-URL: Donate, https://www.patreon.com/integrated
8
8
  Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
9
9
  Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
10
10
  Project-URL: Issues, https://github.com/hunterhogan/mapFolding/issues
11
- Keywords: A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
11
+ Keywords: A000136,A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Environment :: Console
14
14
  Classifier: Intended Audience :: Developers
@@ -106,7 +106,7 @@ def countFolds_optimized(shape_param):
106
106
 
107
107
  ### 2. Code Generation Framework
108
108
 
109
- Study and extend a complete Python code transformation pipeline:
109
+ Study and extend a complete Python code transformation assembly line:
110
110
 
111
111
  - AST analysis and manipulation
112
112
  - Dataclass decomposition ("shattering")
@@ -1,12 +1,16 @@
1
1
  LICENSE
2
2
  README.md
3
3
  pyproject.toml
4
+ mapFolding/Z0Z_flowControl.py
4
5
  mapFolding/__init__.py
5
6
  mapFolding/basecamp.py
6
7
  mapFolding/beDRY.py
8
+ mapFolding/dataBaskets.py
9
+ mapFolding/datatypes.py
7
10
  mapFolding/oeis.py
8
11
  mapFolding/py.typed
9
12
  mapFolding/theDao.py
13
+ mapFolding/theDaoOfMapFolding.py
10
14
  mapFolding/theSSOT.py
11
15
  mapFolding/toolboxFilesystem.py
12
16
  mapFolding.egg-info/PKG-INFO
@@ -37,6 +37,7 @@ dependencies = [
37
37
  "Z0Z_tools",]
38
38
  description = "Map folding algorithm with code transformation framework for optimizing numerical computations"
39
39
  keywords = [
40
+ "A000136",
40
41
  "A001415",
41
42
  "A001416",
42
43
  "A001417",
@@ -71,7 +72,7 @@ readme = { file = "README.md", content-type = "text/markdown" }
71
72
  requires-python = ">=3.10"
72
73
  scripts = { getOEISids = "mapFolding.oeis:getOEISids", clearOEIScache = "mapFolding.oeis:clearOEIScache", OEIS_for_n = "mapFolding.oeis:OEIS_for_n" }
73
74
  urls = { Donate = "https://www.patreon.com/integrated", Homepage = "https://github.com/hunterhogan/mapFolding", Repository = "https://github.com/hunterhogan/mapFolding.git", Issues = "https://github.com/hunterhogan/mapFolding/issues"}
74
- version = "0.9.2"
75
+ version = "0.9.3"
75
76
 
76
77
  [tool.coverage]
77
78
  report = { exclude_lines = [
@@ -14,7 +14,7 @@ own recipe configurations and job implementations.
14
14
  - Ensures consistency across different implementation strategies
15
15
 
16
16
  2. **Code Generation Testing**
17
- - Tests the AST transformation pipeline from source to optimized implementations
17
+ - Tests the AST transformation assembly line from source to optimized implementations
18
18
  - Validates that generated Numba-accelerated modules produce correct results
19
19
  - Ensures robust code generation across different parameter sets
20
20
 
@@ -29,7 +29,7 @@ This suite is designed to make it easy to test your custom recipes and jobs:
29
29
 
30
30
  ### For Custom Recipes (RecipeSynthesizeFlow):
31
31
  Copy and adapt the `syntheticDispatcherFixture` and associated tests from
32
- `test_computations.py` to validate your customized code transformation pipelines.
32
+ `test_computations.py` to validate your customized code transformation assembly lines.
33
33
 
34
34
  ### For Custom Jobs (RecipeJob):
35
35
  Copy and adapt the `test_writeJobNumba` function to test specialized job modules
@@ -56,7 +56,7 @@ See the examples in `test_computations.py` for guidance on adapting these fixtur
56
56
  """
57
57
 
58
58
  from collections.abc import Callable, Generator, Sequence
59
- from mapFolding import ComputationState, The
59
+ from mapFolding import The
60
60
  from mapFolding.beDRY import getLeavesTotal, validateListDimensions, makeDataContainer
61
61
  from mapFolding.oeis import oeisIDsImplemented, settingsOEIS
62
62
  from mapFolding.someAssemblyRequired import importLogicalPath2Callable, RecipeSynthesizeFlow
@@ -171,7 +171,7 @@ def oneTestCuzTestsOverwritingTests(oeisID_1random: str) -> tuple[int, ...]:
171
171
  pass
172
172
 
173
173
  @pytest.fixture
174
- def listDimensionsTestCountFolds(oeisID: str) -> tuple[int, ...]:
174
+ def mapShapeTestCountFolds(oeisID: str) -> tuple[int, ...]:
175
175
  """For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestValidation`
176
176
  if `validateListDimensions` approves. Each `listDimensions` is suitable for testing counts."""
177
177
  while True:
@@ -202,10 +202,10 @@ def mapShapeTestFunctionality(oeisID_1random: str) -> tuple[int, ...]:
202
202
  pass
203
203
 
204
204
  @pytest.fixture
205
- def listDimensionsTestParallelization(oeisID: str) -> list[int]:
205
+ def mapShapeTestParallelization(oeisID: str) -> tuple[int, ...]:
206
206
  """For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestParallelization`"""
207
207
  n = random.choice(settingsOEIS[oeisID]['valuesTestParallelization'])
208
- return list(settingsOEIS[oeisID]['getMapShape'](n))
208
+ return settingsOEIS[oeisID]['getMapShape'](n)
209
209
 
210
210
  @pytest.fixture
211
211
  def mockBenchmarkTimer() -> Generator[unittest.mock.MagicMock | unittest.mock.AsyncMock, Any, None]:
@@ -255,8 +255,8 @@ def useThisDispatcher() -> Generator[Callable[..., None], Any, None]:
255
255
  def patchDispatcher(callableTarget: Callable[..., Any]) -> None:
256
256
  """Patch the dispatcher property to return the target callable."""
257
257
  # Create a new property that returns the target callable
258
- def patched_dispatcher(self: theSSOT.PackageSettings) -> Callable[['ComputationState'], 'ComputationState']:
259
- def wrapper(state: 'ComputationState') -> 'ComputationState':
258
+ def patched_dispatcher(self: theSSOT.PackageSettings) -> Callable[..., Any]:
259
+ def wrapper(state: Any) -> Any:
260
260
  return callableTarget(state)
261
261
  return wrapper
262
262
 
@@ -268,7 +268,7 @@ def useThisDispatcher() -> Generator[Callable[..., None], Any, None]:
268
268
  # Restore the original property
269
269
  theSSOT.PackageSettings.dispatcher = original_dispatcher_property # type: ignore
270
270
 
271
- def getAlgorithmDispatcher() -> Callable[[ComputationState], ComputationState]:
271
+ def getAlgorithmDispatcher() -> Callable[..., Any]:
272
272
  moduleImported: ModuleType = importlib.import_module(The.logicalPathModuleSourceAlgorithm)
273
273
  dispatcherCallable = getattr(moduleImported, The.sourceCallableDispatcher)
274
274
  return dispatcherCallable
@@ -85,7 +85,7 @@ All tests leverage standardized utilities like `standardizedEqualToCallableRetur
85
85
  that provide consistent, informative error messages and simplify test validation.
86
86
  """
87
87
 
88
- from mapFolding import countFolds, getFoldsTotalKnown, oeisIDfor_n, validateListDimensions
88
+ from mapFolding import countFolds, getFoldsTotalKnown, oeisIDfor_n
89
89
  from mapFolding.oeis import settingsOEIS
90
90
  from mapFolding.someAssemblyRequired.RecipeJob import RecipeJob
91
91
  from mapFolding.someAssemblyRequired.transformationTools import makeInitializedComputationState
@@ -98,28 +98,30 @@ import pytest
98
98
  if __name__ == '__main__':
99
99
  multiprocessing.set_start_method('spawn')
100
100
 
101
- def test_algorithmSourceParallel(listDimensionsTestParallelization: list[int], useAlgorithmSourceDispatcher: None) -> None:
102
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum', None)
101
+ def test_algorithmSourceParallel(mapShapeTestParallelization: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
102
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestParallelization), countFolds, mapShapeTestParallelization, None, 'maximum', None)
103
103
 
104
- def test_algorithmSourceSequential(listDimensionsTestCountFolds: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
105
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestCountFolds)), countFolds, listDimensionsTestCountFolds)
104
+ def test_theDaoOfMapFolding(mapShapeTestCountFolds: tuple[int, ...]) -> None:
105
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, None, None, None, None, mapShapeTestCountFolds, None, None, 'theDaoOfMapFolding')
106
+
107
+ def test_algorithmSourceSequential(mapShapeTestCountFolds: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
108
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, mapShapeTestCountFolds)
106
109
 
107
110
  def test_aOFn_calculate_value(oeisID: str) -> None:
108
111
  for n in settingsOEIS[oeisID]['valuesTestValidation']:
109
112
  standardizedEqualToCallableReturn(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
110
113
 
111
- def test_syntheticParallel(syntheticDispatcherFixture: None, listDimensionsTestParallelization: list[int]) -> None:
112
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum')
114
+ def test_syntheticParallel(syntheticDispatcherFixture: None, mapShapeTestParallelization: tuple[int, ...]) -> None:
115
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestParallelization), countFolds, mapShapeTestParallelization, None, 'maximum')
113
116
 
114
- def test_syntheticSequential(syntheticDispatcherFixture: None, listDimensionsTestCountFolds: list[int]) -> None:
115
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestCountFolds)), countFolds, listDimensionsTestCountFolds)
117
+ def test_syntheticSequential(syntheticDispatcherFixture: None, mapShapeTestCountFolds: tuple[int, ...]) -> None:
118
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, mapShapeTestCountFolds)
116
119
 
117
120
  @pytest.mark.parametrize('pathFilenameTmpTesting', ['.py'], indirect=True)
118
- def test_writeJobNumba(oneTestCuzTestsOverwritingTests: list[int], pathFilenameTmpTesting: Path) -> None:
121
+ def test_writeJobNumba(oneTestCuzTestsOverwritingTests: tuple[int, ...], pathFilenameTmpTesting: Path) -> None:
119
122
  from mapFolding.someAssemblyRequired.toolboxNumba import SpicesJobNumba
120
123
  from mapFolding.someAssemblyRequired.synthesizeNumbaJob import makeJobNumba
121
- mapShape = validateListDimensions(oneTestCuzTestsOverwritingTests)
122
- state = makeInitializedComputationState(mapShape)
124
+ state = makeInitializedComputationState(oneTestCuzTestsOverwritingTests)
123
125
 
124
126
  pathFilenameModule = pathFilenameTmpTesting.absolute()
125
127
  pathFilenameFoldsTotal = pathFilenameModule.with_suffix('.foldsTotalTesting')
@@ -142,4 +144,4 @@ def test_writeJobNumba(oneTestCuzTestsOverwritingTests: list[int], pathFilenameT
142
144
  module.__name__ = "__main__"
143
145
  Don_Lapre_Road_to_Self_Improvement.loader.exec_module(module)
144
146
 
145
- standardizedEqualToCallableReturn(str(getFoldsTotalKnown(mapShape)), pathFilenameFoldsTotal.read_text().strip)
147
+ standardizedEqualToCallableReturn(str(getFoldsTotalKnown(oneTestCuzTestsOverwritingTests)), pathFilenameFoldsTotal.read_text().strip)
@@ -15,8 +15,8 @@ if __name__ == '__main__':
15
15
  def test_countFoldsComputationDivisionsInvalid(mapShapeTestFunctionality: tuple[int, ...]) -> None:
16
16
  standardizedEqualToCallableReturn(ValueError, countFolds, mapShapeTestFunctionality, None, {"wrong": "value"})
17
17
 
18
- def test_countFoldsComputationDivisionsMaximum(listDimensionsTestParallelization: list[int]) -> None:
19
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum', None)
18
+ def test_countFoldsComputationDivisionsMaximum(mapShapeTestParallelization: list[int]) -> None:
19
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(mapShapeTestParallelization)), countFolds, mapShapeTestParallelization, None, 'maximum', None)
20
20
 
21
21
  @pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_defineConcurrencyLimit())
22
22
  def test_defineConcurrencyLimit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
File without changes
File without changes