mapFolding 0.9.2__py3-none-any.whl → 0.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/Z0Z_flowControl.py +99 -0
- mapFolding/__init__.py +28 -26
- mapFolding/basecamp.py +1 -1
- mapFolding/beDRY.py +1 -2
- mapFolding/dataBaskets.py +49 -0
- mapFolding/datatypes.py +21 -0
- mapFolding/oeis.py +1 -2
- mapFolding/someAssemblyRequired/__init__.py +1 -1
- mapFolding/someAssemblyRequired/_theTypes.py +1 -1
- mapFolding/someAssemblyRequired/_toolboxContainers.py +58 -48
- mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +7 -7
- mapFolding/someAssemblyRequired/toolboxNumba.py +1 -1
- mapFolding/someAssemblyRequired/transformationTools.py +1 -1
- mapFolding/theDaoOfMapFolding.py +142 -0
- mapFolding/theSSOT.py +13 -21
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/METADATA +3 -3
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/RECORD +25 -21
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/WHEEL +1 -1
- tests/__init__.py +2 -2
- tests/conftest.py +7 -7
- tests/test_computations.py +15 -13
- tests/test_tasks.py +2 -2
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.9.2.dist-info → mapfolding-0.9.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
from mapFolding import (
|
|
3
|
+
ComputationState,
|
|
4
|
+
getPathFilenameFoldsTotal,
|
|
5
|
+
outfitCountFolds,
|
|
6
|
+
saveFoldsTotal,
|
|
7
|
+
saveFoldsTotalFAILearly,
|
|
8
|
+
setProcessorLimit,
|
|
9
|
+
The,
|
|
10
|
+
validateListDimensions,
|
|
11
|
+
)
|
|
12
|
+
from os import PathLike
|
|
13
|
+
from pathlib import PurePath
|
|
14
|
+
|
|
15
|
+
def countFolds(listDimensions: Sequence[int] | None = None
|
|
16
|
+
, pathLikeWriteFoldsTotal: PathLike[str] | PurePath | None = None
|
|
17
|
+
, computationDivisions: int | str | None = None
|
|
18
|
+
, CPUlimit: int | float | bool | None = None
|
|
19
|
+
# , * I need to improve `standardizedEqualToCallableReturn` so it will work with keyword arguments
|
|
20
|
+
, mapShape: tuple[int, ...] | None = None
|
|
21
|
+
, oeisID: str | None = None
|
|
22
|
+
, oeis_n: int | None = None
|
|
23
|
+
, flow: str | None = None
|
|
24
|
+
) -> int:
|
|
25
|
+
"""
|
|
26
|
+
To select the execution path, I need at least:
|
|
27
|
+
- mapShape
|
|
28
|
+
- task division instructions
|
|
29
|
+
- memorialization instructions
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# mapShape =====================================================================
|
|
33
|
+
|
|
34
|
+
if mapShape:
|
|
35
|
+
pass
|
|
36
|
+
else:
|
|
37
|
+
if oeisID and oeis_n:
|
|
38
|
+
from mapFolding.oeis import settingsOEIS
|
|
39
|
+
try:
|
|
40
|
+
mapShape = settingsOEIS[oeisID]['getMapShape'](oeis_n)
|
|
41
|
+
except KeyError:
|
|
42
|
+
pass
|
|
43
|
+
if not mapShape and listDimensions:
|
|
44
|
+
mapShape = validateListDimensions(listDimensions)
|
|
45
|
+
|
|
46
|
+
if mapShape is None:
|
|
47
|
+
raise ValueError(f"""I received these values:
|
|
48
|
+
`{listDimensions = }`,
|
|
49
|
+
`{mapShape = }`,
|
|
50
|
+
`{oeisID = }` and `{oeis_n = }`,
|
|
51
|
+
but I was unable to select a map for which to count the folds.""")
|
|
52
|
+
|
|
53
|
+
# task division instructions ===============================================
|
|
54
|
+
|
|
55
|
+
if computationDivisions:
|
|
56
|
+
# NOTE `The.concurrencyPackage`
|
|
57
|
+
concurrencyLimit: int = setProcessorLimit(CPUlimit, The.concurrencyPackage)
|
|
58
|
+
from mapFolding.beDRY import getLeavesTotal, getTaskDivisions
|
|
59
|
+
leavesTotal: int = getLeavesTotal(mapShape)
|
|
60
|
+
taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
|
|
61
|
+
del leavesTotal
|
|
62
|
+
else:
|
|
63
|
+
concurrencyLimit = 1
|
|
64
|
+
taskDivisions = 0
|
|
65
|
+
|
|
66
|
+
# memorialization instructions ===========================================
|
|
67
|
+
|
|
68
|
+
if pathLikeWriteFoldsTotal is not None:
|
|
69
|
+
pathFilenameFoldsTotal = getPathFilenameFoldsTotal(mapShape, pathLikeWriteFoldsTotal)
|
|
70
|
+
saveFoldsTotalFAILearly(pathFilenameFoldsTotal)
|
|
71
|
+
else:
|
|
72
|
+
pathFilenameFoldsTotal = None
|
|
73
|
+
|
|
74
|
+
# Flow control until I can figure out a good way ===============================
|
|
75
|
+
|
|
76
|
+
if flow == 'theDaoOfMapFolding':
|
|
77
|
+
from mapFolding.dataBaskets import MapFoldingState
|
|
78
|
+
mapFoldingState: MapFoldingState = MapFoldingState(mapShape)
|
|
79
|
+
|
|
80
|
+
from mapFolding.theDaoOfMapFolding import doTheNeedful
|
|
81
|
+
mapFoldingState = doTheNeedful(mapFoldingState)
|
|
82
|
+
foldsTotal = mapFoldingState.foldsTotal
|
|
83
|
+
|
|
84
|
+
# NOTE treat this as a default?
|
|
85
|
+
# flow based on `The` and `ComputationState` ====================================
|
|
86
|
+
|
|
87
|
+
else:
|
|
88
|
+
computationStateInitialized: ComputationState = outfitCountFolds(mapShape, computationDivisions, concurrencyLimit)
|
|
89
|
+
computationStateComplete: ComputationState = The.dispatcher(computationStateInitialized)
|
|
90
|
+
|
|
91
|
+
computationStateComplete.getFoldsTotal()
|
|
92
|
+
foldsTotal = computationStateComplete.foldsTotal
|
|
93
|
+
|
|
94
|
+
# Follow memorialization instructions ===========================================
|
|
95
|
+
|
|
96
|
+
if pathFilenameFoldsTotal is not None:
|
|
97
|
+
saveFoldsTotal(pathFilenameFoldsTotal, foldsTotal)
|
|
98
|
+
|
|
99
|
+
return foldsTotal
|
mapFolding/__init__.py
CHANGED
|
@@ -1,29 +1,27 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Map folding enumeration and counting algorithms with advanced optimization capabilities.
|
|
3
3
|
|
|
4
|
-
This package implements algorithms to count and enumerate the distinct ways
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
high-level user interfaces to sophisticated algorithmic optimizations and code
|
|
8
|
-
transformation tools.
|
|
4
|
+
This package implements algorithms to count and enumerate the distinct ways a rectangular map can be folded, based on
|
|
5
|
+
the mathematical problem described in Lunnon's 1971 paper. It provides multiple layers of functionality, from high-level
|
|
6
|
+
user interfaces to sophisticated algorithmic optimizations and code transformation tools.
|
|
9
7
|
|
|
10
8
|
Core modules:
|
|
11
9
|
- basecamp: Public API with simplified interfaces for end users
|
|
12
10
|
- theDao: Core computational algorithm using a functional state-transformation approach
|
|
13
|
-
- beDRY: Core utility functions implementing consistent data handling, validation, and
|
|
14
|
-
|
|
11
|
+
- beDRY: Core utility functions implementing consistent data handling, validation, and resource management across the
|
|
12
|
+
package's computational assembly-line
|
|
15
13
|
- theSSOT: Single Source of Truth for configuration, types, and state management
|
|
16
|
-
- toolboxFilesystem: Cross-platform file management services for storing and retrieving
|
|
17
|
-
|
|
14
|
+
- toolboxFilesystem: Cross-platform file management services for storing and retrieving computation results with robust
|
|
15
|
+
error handling and fallback mechanisms
|
|
18
16
|
- oeis: Interface to the Online Encyclopedia of Integer Sequences for known results
|
|
19
17
|
|
|
20
18
|
Extended functionality:
|
|
21
|
-
- someAssemblyRequired: Code transformation framework that optimizes the core algorithm
|
|
22
|
-
|
|
23
|
-
- The system converts readable code into high-performance implementations through
|
|
24
|
-
|
|
25
|
-
- Provides tools to "shatter" complex dataclasses into primitive components,
|
|
26
|
-
|
|
19
|
+
- someAssemblyRequired: Code transformation framework that optimizes the core algorithm through AST manipulation,
|
|
20
|
+
dataclass transformation, and compilation techniques
|
|
21
|
+
- The system converts readable code into high-performance implementations through a systematic analysis and
|
|
22
|
+
transformation assembly line
|
|
23
|
+
- Provides tools to "shatter" complex dataclasses into primitive components, enabling compatibility with Numba and
|
|
24
|
+
other optimization frameworks
|
|
27
25
|
- Creates specialized implementations tailored for specific input parameters
|
|
28
26
|
|
|
29
27
|
Testing and extension:
|
|
@@ -35,24 +33,22 @@ Testing and extension:
|
|
|
35
33
|
|
|
36
34
|
Special directories:
|
|
37
35
|
- .cache/: Stores cached data from external sources like OEIS to improve performance
|
|
38
|
-
- syntheticModules/: Contains dynamically generated, optimized implementations of the
|
|
39
|
-
|
|
36
|
+
- syntheticModules/: Contains dynamically generated, optimized implementations of the core algorithm created by the code
|
|
37
|
+
transformation framework
|
|
40
38
|
- reference/: Historical implementations and educational resources for algorithm exploration
|
|
41
|
-
- reference/jobsCompleted/: Contains successful computations for previously unknown values,
|
|
42
|
-
|
|
39
|
+
- reference/jobsCompleted/: Contains successful computations for previously unknown values, including first-ever
|
|
40
|
+
calculations for 2x19 and 2x20 maps (OEIS A001415)
|
|
43
41
|
|
|
44
|
-
This package balances algorithm readability and understandability with
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
a foundation for exploring advanced code transformation techniques.
|
|
42
|
+
This package balances algorithm readability and understandability with high-performance computation capabilities,
|
|
43
|
+
allowing users to compute map folding totals for larger dimensions than previously feasible while also providing a
|
|
44
|
+
foundation for exploring advanced code transformation techniques.
|
|
48
45
|
"""
|
|
49
46
|
|
|
50
|
-
from mapFolding.
|
|
47
|
+
from mapFolding.datatypes import (
|
|
51
48
|
Array1DElephino as Array1DElephino,
|
|
52
49
|
Array1DFoldsTotal as Array1DFoldsTotal,
|
|
53
50
|
Array1DLeavesTotal as Array1DLeavesTotal,
|
|
54
51
|
Array3D as Array3D,
|
|
55
|
-
ComputationState as ComputationState,
|
|
56
52
|
DatatypeElephino as DatatypeElephino,
|
|
57
53
|
DatatypeFoldsTotal as DatatypeFoldsTotal,
|
|
58
54
|
DatatypeLeavesTotal as DatatypeLeavesTotal,
|
|
@@ -60,6 +56,10 @@ from mapFolding.theSSOT import (
|
|
|
60
56
|
NumPyFoldsTotal as NumPyFoldsTotal,
|
|
61
57
|
NumPyIntegerType as NumPyIntegerType,
|
|
62
58
|
NumPyLeavesTotal as NumPyLeavesTotal,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
from mapFolding.theSSOT import (
|
|
62
|
+
ComputationState as ComputationState,
|
|
63
63
|
raiseIfNoneGitHubIssueNumber3 as raiseIfNoneGitHubIssueNumber3,
|
|
64
64
|
The as The,
|
|
65
65
|
)
|
|
@@ -70,6 +70,8 @@ from mapFolding.theDao import (
|
|
|
70
70
|
)
|
|
71
71
|
|
|
72
72
|
from mapFolding.beDRY import (
|
|
73
|
+
getLeavesTotal as getLeavesTotal,
|
|
74
|
+
getTaskDivisions as getTaskDivisions,
|
|
73
75
|
outfitCountFolds as outfitCountFolds,
|
|
74
76
|
setProcessorLimit as setProcessorLimit,
|
|
75
77
|
validateListDimensions as validateListDimensions,
|
|
@@ -83,7 +85,7 @@ from mapFolding.toolboxFilesystem import (
|
|
|
83
85
|
writeStringToHere as writeStringToHere,
|
|
84
86
|
)
|
|
85
87
|
|
|
86
|
-
from mapFolding.
|
|
88
|
+
from mapFolding.Z0Z_flowControl import countFolds
|
|
87
89
|
|
|
88
90
|
from mapFolding.oeis import (
|
|
89
91
|
clearOEIScache as clearOEIScache,
|
mapFolding/basecamp.py
CHANGED
|
@@ -63,7 +63,7 @@ def countFolds(listDimensions: Sequence[int]
|
|
|
63
63
|
|
|
64
64
|
Note well
|
|
65
65
|
---------
|
|
66
|
-
You probably
|
|
66
|
+
You probably do not want to divide your computation into tasks.
|
|
67
67
|
|
|
68
68
|
If you want to compute a large `foldsTotal`, dividing the computation into tasks is usually a bad idea. Dividing the
|
|
69
69
|
algorithm into tasks is inherently inefficient: efficient division into tasks means there would be no overlap in the
|
mapFolding/beDRY.py
CHANGED
|
@@ -29,8 +29,7 @@ def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
|
|
|
29
29
|
"""
|
|
30
30
|
Calculate the total number of leaves in a map with the given dimensions.
|
|
31
31
|
|
|
32
|
-
The total number of leaves is the product of all dimensions in the map shape.
|
|
33
|
-
initializing the computation state and determining task divisions.
|
|
32
|
+
The total number of leaves is the product of all dimensions in the map shape.
|
|
34
33
|
|
|
35
34
|
Parameters
|
|
36
35
|
----------
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from mapFolding.beDRY import getConnectionGraph, getLeavesTotal, makeDataContainer
|
|
2
|
+
from mapFolding.datatypes import Array3D, Array1DElephino, Array1DLeavesTotal, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal
|
|
3
|
+
import dataclasses
|
|
4
|
+
|
|
5
|
+
@dataclasses.dataclass
|
|
6
|
+
class MapFoldingState:
|
|
7
|
+
mapShape: tuple[DatatypeLeavesTotal, ...] = dataclasses.field(init=True, metadata={'elementConstructor': 'DatatypeLeavesTotal'})
|
|
8
|
+
|
|
9
|
+
groupsOfFolds: DatatypeFoldsTotal = dataclasses.field(default=DatatypeFoldsTotal(0), metadata={'theCountingIdentifier': True})
|
|
10
|
+
|
|
11
|
+
gap1ndex: DatatypeElephino = DatatypeElephino(0)
|
|
12
|
+
gap1ndexCeiling: DatatypeElephino = DatatypeElephino(0)
|
|
13
|
+
indexDimension: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
|
|
14
|
+
indexLeaf: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
|
|
15
|
+
indexMiniGap: DatatypeElephino = DatatypeElephino(0)
|
|
16
|
+
leaf1ndex: DatatypeLeavesTotal = DatatypeLeavesTotal(1)
|
|
17
|
+
leafConnectee: DatatypeLeavesTotal = DatatypeLeavesTotal(0)
|
|
18
|
+
|
|
19
|
+
dimensionsUnconstrained: DatatypeLeavesTotal = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
20
|
+
|
|
21
|
+
countDimensionsGapped: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
22
|
+
gapRangeStart: Array1DElephino = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DElephino.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
23
|
+
gapsWhere: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
24
|
+
leafAbove: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
25
|
+
leafBelow: Array1DLeavesTotal = dataclasses.field(default=None, init=True, metadata={'dtype': Array1DLeavesTotal.__args__[1].__args__[0]}) # pyright: ignore[reportAssignmentType, reportAttributeAccessIssue, reportUnknownMemberType]
|
|
26
|
+
|
|
27
|
+
connectionGraph: Array3D = dataclasses.field(init=False, metadata={'dtype': Array3D.__args__[1].__args__[0]}) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
28
|
+
dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
|
|
29
|
+
leavesTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def foldsTotal(self) -> DatatypeFoldsTotal:
|
|
33
|
+
_foldsTotal = DatatypeFoldsTotal(self.leavesTotal) * self.groupsOfFolds
|
|
34
|
+
return _foldsTotal
|
|
35
|
+
|
|
36
|
+
def __post_init__(self) -> None:
|
|
37
|
+
self.dimensionsTotal = DatatypeLeavesTotal(len(self.mapShape))
|
|
38
|
+
self.leavesTotal = DatatypeLeavesTotal(getLeavesTotal(self.mapShape))
|
|
39
|
+
|
|
40
|
+
leavesTotalAsInt = int(self.leavesTotal)
|
|
41
|
+
|
|
42
|
+
self.connectionGraph = getConnectionGraph(self.mapShape, leavesTotalAsInt, self.__dataclass_fields__['connectionGraph'].metadata['dtype'])
|
|
43
|
+
|
|
44
|
+
if self.dimensionsUnconstrained is None: self.dimensionsUnconstrained = DatatypeLeavesTotal(int(self.dimensionsTotal)) # pyright: ignore[reportUnnecessaryComparison]
|
|
45
|
+
if self.gapsWhere is None: self.gapsWhere = makeDataContainer(leavesTotalAsInt * leavesTotalAsInt + 1, self.__dataclass_fields__['gapsWhere'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
|
|
46
|
+
if self.countDimensionsGapped is None: self.countDimensionsGapped = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['countDimensionsGapped'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
|
|
47
|
+
if self.gapRangeStart is None: self.gapRangeStart = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['gapRangeStart'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
|
|
48
|
+
if self.leafAbove is None: self.leafAbove = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafAbove'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
|
|
49
|
+
if self.leafBelow is None: self.leafBelow = makeDataContainer(leavesTotalAsInt + 1, self.__dataclass_fields__['leafBelow'].metadata['dtype']) # pyright: ignore[reportUnnecessaryComparison]
|
mapFolding/datatypes.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from numpy import dtype, int64 as numpy_int64, integer, ndarray
|
|
2
|
+
from typing import Any, TypeAlias, TypeVar
|
|
3
|
+
|
|
4
|
+
# =============================================================================
|
|
5
|
+
# Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
|
|
6
|
+
|
|
7
|
+
NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
|
|
8
|
+
|
|
9
|
+
DatatypeLeavesTotal: TypeAlias = int
|
|
10
|
+
NumPyLeavesTotal: TypeAlias = numpy_int64
|
|
11
|
+
|
|
12
|
+
DatatypeElephino: TypeAlias = int
|
|
13
|
+
NumPyElephino: TypeAlias = numpy_int64
|
|
14
|
+
|
|
15
|
+
DatatypeFoldsTotal: TypeAlias = int
|
|
16
|
+
NumPyFoldsTotal: TypeAlias = numpy_int64
|
|
17
|
+
|
|
18
|
+
Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]]
|
|
19
|
+
Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyLeavesTotal]]
|
|
20
|
+
Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]]
|
|
21
|
+
Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]]
|
mapFolding/oeis.py
CHANGED
|
@@ -20,7 +20,7 @@ mathematical definition in OEIS and the computational implementation in the pack
|
|
|
20
20
|
from collections.abc import Callable
|
|
21
21
|
from datetime import datetime, timedelta
|
|
22
22
|
from functools import cache
|
|
23
|
-
from mapFolding import
|
|
23
|
+
from mapFolding import countFolds, The, writeStringToHere
|
|
24
24
|
from pathlib import Path
|
|
25
25
|
from typing import Any, Final, TYPE_CHECKING
|
|
26
26
|
import argparse
|
|
@@ -401,7 +401,6 @@ def oeisIDfor_n(oeisID: str, n: int | Any) -> int:
|
|
|
401
401
|
raise ArithmeticError(f"OEIS sequence {oeisID} is not defined at {n = }.")
|
|
402
402
|
foldsTotal: int = settingsOEIS[oeisID]['valuesKnown'][n]
|
|
403
403
|
return foldsTotal
|
|
404
|
-
from mapFolding.basecamp import countFolds
|
|
405
404
|
return countFolds(mapShape)
|
|
406
405
|
|
|
407
406
|
def OEIS_for_n() -> None:
|
|
@@ -22,7 +22,7 @@ functional implementations into highly-optimized variants with verified correctn
|
|
|
22
22
|
- Recipe configuration for generating optimized code (RecipeSynthesizeFlow)
|
|
23
23
|
- Dataclass decomposition for compatibility (ShatteredDataclass)
|
|
24
24
|
|
|
25
|
-
3. **Optimization
|
|
25
|
+
3. **Optimization assembly lines**
|
|
26
26
|
- General-purpose Numba acceleration (makeNumbaFlow)
|
|
27
27
|
- Job-specific optimization for concrete parameters (makeJobNumba)
|
|
28
28
|
- Specialized component transformation (decorateCallableWithNumba)
|
|
@@ -15,7 +15,7 @@ else:
|
|
|
15
15
|
|
|
16
16
|
class ImaCallToName(ast.Call):
|
|
17
17
|
func: ast.Name # pyright: ignore[reportIncompatibleVariableOverride]
|
|
18
|
-
# assert isinstance(ast.Call.func, ast.Name), "
|
|
18
|
+
# assert isinstance(ast.Call.func, ast.Name), "brinkmanship"
|
|
19
19
|
# func: ast.Name
|
|
20
20
|
|
|
21
21
|
astClassHasDOTtargetAttributeNameSubscript: typing_TypeAlias = ast.AnnAssign | ast.AugAssign
|
|
@@ -1,23 +1,21 @@
|
|
|
1
1
|
"""
|
|
2
2
|
AST Container Classes for Python Code Generation and Transformation
|
|
3
3
|
|
|
4
|
-
This module provides specialized container classes that organize AST nodes, imports,
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
The containers work in conjunction with transformation tools that manipulate the
|
|
20
|
-
contained AST nodes to implement specific optimizations and transformations.
|
|
4
|
+
This module provides specialized container classes that organize AST nodes, imports, and program structure for code
|
|
5
|
+
generation and transformation. These classes form the organizational backbone of the code generation system, enabling:
|
|
6
|
+
|
|
7
|
+
1. Tracking and managing imports with LedgerOfImports.
|
|
8
|
+
2. Packaging function definitions with their dependencies via IngredientsFunction.
|
|
9
|
+
3. Structuring complete modules with IngredientsModule.
|
|
10
|
+
4. Configuring code synthesis with RecipeSynthesizeFlow.
|
|
11
|
+
5. Organizing decomposed dataclass representations with ShatteredDataclass.
|
|
12
|
+
|
|
13
|
+
Together, these container classes implement a component-based architecture for programmatic generation of
|
|
14
|
+
high-performance code. They maintain a clean separation between structure and content, allowing transformations to be
|
|
15
|
+
applied systematically while preserving relationships between code elements.
|
|
16
|
+
|
|
17
|
+
The containers work in conjunction with transformation tools that manipulate the contained AST nodes to implement
|
|
18
|
+
specific optimizations and transformations.
|
|
21
19
|
"""
|
|
22
20
|
|
|
23
21
|
from collections import defaultdict
|
|
@@ -33,30 +31,31 @@ class LedgerOfImports:
|
|
|
33
31
|
"""
|
|
34
32
|
Track and manage import statements for programmatically generated code.
|
|
35
33
|
|
|
36
|
-
LedgerOfImports acts as a registry for import statements, maintaining a clean
|
|
37
|
-
|
|
34
|
+
LedgerOfImports acts as a registry for import statements, maintaining a clean separation between the logical
|
|
35
|
+
structure of imports and their textual representation.
|
|
38
36
|
It enables:
|
|
39
37
|
|
|
40
|
-
1. Tracking regular imports and import-from statements
|
|
41
|
-
2. Adding imports programmatically during code transformation
|
|
42
|
-
3. Merging imports from multiple sources
|
|
43
|
-
4. Removing unnecessary or conflicting imports
|
|
44
|
-
5. Generating optimized AST import nodes for the final code
|
|
38
|
+
1. Tracking regular imports and import-from statements.
|
|
39
|
+
2. Adding imports programmatically during code transformation.
|
|
40
|
+
3. Merging imports from multiple sources.
|
|
41
|
+
4. Removing unnecessary or conflicting imports.
|
|
42
|
+
5. Generating optimized AST import nodes for the final code.
|
|
45
43
|
|
|
46
|
-
This class forms the foundation of dependency management in generated code,
|
|
47
|
-
|
|
48
|
-
conflict.
|
|
44
|
+
This class forms the foundation of dependency management in generated code, ensuring that all required libraries are
|
|
45
|
+
available without duplication or conflict.
|
|
49
46
|
"""
|
|
50
47
|
# TODO When resolving the ledger of imports, remove self-referential imports
|
|
51
|
-
# TODO add TypeIgnore tracking to the ledger of imports
|
|
52
48
|
|
|
53
|
-
|
|
49
|
+
type_ignores: list[ast.TypeIgnore]
|
|
50
|
+
|
|
51
|
+
def __init__(self, startWith: ast.AST | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
|
|
54
52
|
self.dictionaryImportFrom: dict[str_nameDOTname, list[tuple[ast_Identifier, ast_Identifier | None]]] = defaultdict(list)
|
|
55
53
|
self.listImport: list[str_nameDOTname] = []
|
|
54
|
+
self.type_ignores = [] if type_ignores is None else list(type_ignores)
|
|
56
55
|
if startWith:
|
|
57
56
|
self.walkThis(startWith)
|
|
58
57
|
|
|
59
|
-
def addAst(self, astImport____: ast.Import | ast.ImportFrom) -> None:
|
|
58
|
+
def addAst(self, astImport____: ast.Import | ast.ImportFrom, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
|
|
60
59
|
match astImport____:
|
|
61
60
|
case ast.Import():
|
|
62
61
|
for alias in astImport____.names:
|
|
@@ -69,14 +68,20 @@ class LedgerOfImports:
|
|
|
69
68
|
self.dictionaryImportFrom[astImport____.module].append((alias.name, alias.asname))
|
|
70
69
|
case _:
|
|
71
70
|
raise ValueError(f"I received {type(astImport____) = }, but I can only accept {ast.Import} and {ast.ImportFrom}.")
|
|
71
|
+
if type_ignores:
|
|
72
|
+
self.type_ignores.extend(type_ignores)
|
|
72
73
|
|
|
73
|
-
def addImport_asStr(self, moduleWithLogicalPath: str_nameDOTname) -> None:
|
|
74
|
+
def addImport_asStr(self, moduleWithLogicalPath: str_nameDOTname, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
|
|
74
75
|
self.listImport.append(moduleWithLogicalPath)
|
|
76
|
+
if type_ignores:
|
|
77
|
+
self.type_ignores.extend(type_ignores)
|
|
75
78
|
|
|
76
|
-
def addImportFrom_asStr(self, moduleWithLogicalPath: str_nameDOTname, name: ast_Identifier, asname: ast_Identifier | None = None) -> None:
|
|
79
|
+
def addImportFrom_asStr(self, moduleWithLogicalPath: str_nameDOTname, name: ast_Identifier, asname: ast_Identifier | None = None, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
|
|
77
80
|
if moduleWithLogicalPath not in self.dictionaryImportFrom:
|
|
78
81
|
self.dictionaryImportFrom[moduleWithLogicalPath] = []
|
|
79
82
|
self.dictionaryImportFrom[moduleWithLogicalPath].append((name, asname))
|
|
83
|
+
if type_ignores:
|
|
84
|
+
self.type_ignores.extend(type_ignores)
|
|
80
85
|
|
|
81
86
|
def removeImportFromModule(self, moduleWithLogicalPath: str_nameDOTname) -> None:
|
|
82
87
|
"""Remove all imports from a specific module."""
|
|
@@ -129,11 +134,14 @@ class LedgerOfImports:
|
|
|
129
134
|
self.dictionaryImportFrom = updateExtendPolishDictionaryLists(self.dictionaryImportFrom, *(ledger.dictionaryImportFrom for ledger in fromLedger), destroyDuplicates=True, reorderLists=True)
|
|
130
135
|
for ledger in fromLedger:
|
|
131
136
|
self.listImport.extend(ledger.listImport)
|
|
137
|
+
self.type_ignores.extend(ledger.type_ignores)
|
|
132
138
|
|
|
133
|
-
def walkThis(self, walkThis: ast.AST) -> None:
|
|
139
|
+
def walkThis(self, walkThis: ast.AST, type_ignores: list[ast.TypeIgnore] | None = None) -> None:
|
|
134
140
|
for nodeBuffalo in ast.walk(walkThis):
|
|
135
141
|
if isinstance(nodeBuffalo, (ast.Import, ast.ImportFrom)):
|
|
136
142
|
self.addAst(nodeBuffalo)
|
|
143
|
+
if type_ignores:
|
|
144
|
+
self.type_ignores.extend(type_ignores)
|
|
137
145
|
|
|
138
146
|
# Consolidate settings classes through inheritance https://github.com/hunterhogan/mapFolding/issues/15
|
|
139
147
|
@dataclasses.dataclass
|
|
@@ -141,17 +149,16 @@ class IngredientsFunction:
|
|
|
141
149
|
"""
|
|
142
150
|
Package a function definition with its import dependencies for code generation.
|
|
143
151
|
|
|
144
|
-
IngredientsFunction encapsulates an AST function definition along with all the
|
|
145
|
-
|
|
146
|
-
portable unit that can be:
|
|
152
|
+
IngredientsFunction encapsulates an AST function definition along with all the imports required for that function to
|
|
153
|
+
operate correctly. This creates a modular, portable unit that can be:
|
|
147
154
|
|
|
148
|
-
1. Transformed independently (e.g., by applying Numba decorators)
|
|
149
|
-
2. Transplanted between modules while maintaining dependencies
|
|
150
|
-
3. Combined with other functions to form complete modules
|
|
151
|
-
4. Analyzed for optimization opportunities
|
|
155
|
+
1. Transformed independently (e.g., by applying Numba decorators).
|
|
156
|
+
2. Transplanted between modules while maintaining dependencies.
|
|
157
|
+
3. Combined with other functions to form complete modules.
|
|
158
|
+
4. Analyzed for optimization opportunities.
|
|
152
159
|
|
|
153
|
-
This class forms the primary unit of function manipulation in the code generation
|
|
154
|
-
|
|
160
|
+
This class forms the primary unit of function manipulation in the code generation system, enabling targeted
|
|
161
|
+
transformations while preserving function dependencies.
|
|
155
162
|
|
|
156
163
|
Parameters:
|
|
157
164
|
astFunctionDef: The AST representation of the function definition
|
|
@@ -266,15 +273,18 @@ class IngredientsModule:
|
|
|
266
273
|
for ingredientsFunction in self.listIngredientsFunctions:
|
|
267
274
|
ingredientsFunction.imports.removeImportFrom(moduleWithLogicalPath, name, asname)
|
|
268
275
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
"""List of `ast.Import` and `ast.ImportFrom` statements."""
|
|
276
|
+
def _consolidatedLedger(self) -> LedgerOfImports:
|
|
277
|
+
"""Consolidate all ledgers of imports."""
|
|
272
278
|
sherpaLedger = LedgerOfImports()
|
|
273
279
|
listLedgers: list[LedgerOfImports] = [self.imports]
|
|
274
280
|
for ingredientsFunction in self.listIngredientsFunctions:
|
|
275
281
|
listLedgers.append(ingredientsFunction.imports)
|
|
276
282
|
sherpaLedger.update(*listLedgers)
|
|
277
|
-
return sherpaLedger
|
|
283
|
+
return sherpaLedger
|
|
284
|
+
|
|
285
|
+
@property
|
|
286
|
+
def list_astImportImportFrom(self) -> list[ast.Import | ast.ImportFrom]:
|
|
287
|
+
return self._consolidatedLedger().makeList_ast()
|
|
278
288
|
|
|
279
289
|
@property
|
|
280
290
|
def body(self) -> list[ast.stmt]:
|
|
@@ -291,7 +301,7 @@ class IngredientsModule:
|
|
|
291
301
|
@property
|
|
292
302
|
def type_ignores(self) -> list[ast.TypeIgnore]:
|
|
293
303
|
listTypeIgnore: list[ast.TypeIgnore] = self.supplemental_type_ignores
|
|
294
|
-
|
|
304
|
+
listTypeIgnore.extend(self._consolidatedLedger().type_ignores)
|
|
295
305
|
listTypeIgnore.extend(self.prologue.type_ignores)
|
|
296
306
|
for ingredientsFunction in self.listIngredientsFunctions:
|
|
297
307
|
listTypeIgnore.extend(ingredientsFunction.type_ignores)
|
|
@@ -316,7 +326,7 @@ class RecipeSynthesizeFlow:
|
|
|
316
326
|
|
|
317
327
|
This configuration class serves as a single source of truth for the code generation
|
|
318
328
|
process, ensuring consistency across all generated artifacts while enabling
|
|
319
|
-
customization of the transformation
|
|
329
|
+
customization of the transformation assembly line.
|
|
320
330
|
|
|
321
331
|
The transformation process uses this configuration to extract functions from the
|
|
322
332
|
source module, transform them according to optimization rules, and output
|
|
@@ -6,7 +6,7 @@ for specific map folding calculation jobs. Unlike the general-purpose transforma
|
|
|
6
6
|
in toolboxNumba.py, this module creates standalone Python modules optimized for a
|
|
7
7
|
single map shape with statically-encoded parameters.
|
|
8
8
|
|
|
9
|
-
The code generation
|
|
9
|
+
The code generation assembly line focuses on:
|
|
10
10
|
|
|
11
11
|
1. Converting function parameters to initialized variables with concrete values.
|
|
12
12
|
2. Replacing dynamic computations with statically-known values.
|
|
@@ -175,7 +175,7 @@ def makeJobNumba(job: RecipeJob, spices: SpicesJobNumba) -> None:
|
|
|
175
175
|
"""
|
|
176
176
|
Generate a highly-optimized, single-purpose Numba module for a specific map shape.
|
|
177
177
|
|
|
178
|
-
This function implements the complete transformation
|
|
178
|
+
This function implements the complete transformation assembly line for creating a
|
|
179
179
|
standalone, specialized implementation for calculating map folding solutions for
|
|
180
180
|
a specific shape. The process includes:
|
|
181
181
|
|
|
@@ -245,9 +245,9 @@ if __name__ == '__main__':
|
|
|
245
245
|
Z0Z_asname: ast_Identifier | None = None
|
|
246
246
|
|
|
247
247
|
listDatatypeConfigs = [
|
|
248
|
-
DatatypeConfig(fml='DatatypeLeavesTotal', Z0Z_module='numba', Z0Z_type_name='
|
|
248
|
+
DatatypeConfig(fml='DatatypeLeavesTotal', Z0Z_module='numba', Z0Z_type_name='uint8'),
|
|
249
249
|
DatatypeConfig(fml='DatatypeElephino', Z0Z_module='numba', Z0Z_type_name='uint16'),
|
|
250
|
-
DatatypeConfig(fml='DatatypeFoldsTotal', Z0Z_module='numba', Z0Z_type_name='
|
|
250
|
+
DatatypeConfig(fml='DatatypeFoldsTotal', Z0Z_module='numba', Z0Z_type_name='uint64'),
|
|
251
251
|
]
|
|
252
252
|
|
|
253
253
|
for datatypeConfig in listDatatypeConfigs:
|
|
@@ -261,9 +261,9 @@ if __name__ == '__main__':
|
|
|
261
261
|
ingredientsCount.imports.removeImportFromModule('mapFolding.theSSOT')
|
|
262
262
|
|
|
263
263
|
listNumPyTypeConfigs = [
|
|
264
|
-
DatatypeConfig(fml='Array1DLeavesTotal', Z0Z_module='numpy', Z0Z_type_name='
|
|
264
|
+
DatatypeConfig(fml='Array1DLeavesTotal', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array1DLeavesTotal'),
|
|
265
265
|
DatatypeConfig(fml='Array1DElephino', Z0Z_module='numpy', Z0Z_type_name='uint16', Z0Z_asname='Array1DElephino'),
|
|
266
|
-
DatatypeConfig(fml='Array3D', Z0Z_module='numpy', Z0Z_type_name='
|
|
266
|
+
DatatypeConfig(fml='Array3D', Z0Z_module='numpy', Z0Z_type_name='uint8', Z0Z_asname='Array3D'),
|
|
267
267
|
]
|
|
268
268
|
|
|
269
269
|
for typeConfig in listNumPyTypeConfigs:
|
|
@@ -299,7 +299,7 @@ if __name__ == '__main__':
|
|
|
299
299
|
"""
|
|
300
300
|
|
|
301
301
|
if __name__ == '__main__':
|
|
302
|
-
mapShape = (2,
|
|
302
|
+
mapShape = (2,21)
|
|
303
303
|
state = makeInitializedComputationState(mapShape)
|
|
304
304
|
# foldsTotalEstimated = getFoldsTotalKnown(state.mapShape) // state.leavesTotal
|
|
305
305
|
foldsTotalEstimated = dictionaryEstimates[state.mapShape] // state.leavesTotal
|
|
@@ -158,7 +158,7 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow) -> None:
|
|
|
158
158
|
"""
|
|
159
159
|
Transform standard Python algorithm code into optimized Numba implementations.
|
|
160
160
|
|
|
161
|
-
This function implements the complete transformation
|
|
161
|
+
This function implements the complete transformation assembly line that converts
|
|
162
162
|
a conventional Python implementation into a high-performance Numba-accelerated
|
|
163
163
|
version. The process includes:
|
|
164
164
|
|
|
@@ -424,7 +424,7 @@ def write_astModule(ingredients: IngredientsModule, pathFilename: PathLike[Any]
|
|
|
424
424
|
4. Optimizes imports using autoflake
|
|
425
425
|
5. Writes the final source code to the specified file location
|
|
426
426
|
|
|
427
|
-
This is typically the final step in the code generation
|
|
427
|
+
This is typically the final step in the code generation assembly line,
|
|
428
428
|
producing optimized Python modules ready for execution.
|
|
429
429
|
|
|
430
430
|
Parameters:
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from mapFolding.dataBaskets import MapFoldingState
|
|
2
|
+
|
|
3
|
+
def activeLeafGreaterThan0(state: MapFoldingState) -> bool:
|
|
4
|
+
return state.leaf1ndex > 0
|
|
5
|
+
|
|
6
|
+
def activeLeafGreaterThanLeavesTotal(state: MapFoldingState) -> bool:
|
|
7
|
+
return state.leaf1ndex > state.leavesTotal
|
|
8
|
+
|
|
9
|
+
def activeLeafIsTheFirstLeaf(state: MapFoldingState) -> bool:
|
|
10
|
+
return state.leaf1ndex <= 1
|
|
11
|
+
|
|
12
|
+
def activeLeafIsUnconstrainedInAllDimensions(state: MapFoldingState) -> bool:
|
|
13
|
+
return not state.dimensionsUnconstrained
|
|
14
|
+
|
|
15
|
+
def activeLeafUnconstrainedInThisDimension(state: MapFoldingState) -> MapFoldingState:
|
|
16
|
+
state.dimensionsUnconstrained -= 1
|
|
17
|
+
return state
|
|
18
|
+
|
|
19
|
+
def filterCommonGaps(state: MapFoldingState) -> MapFoldingState:
|
|
20
|
+
state.gapsWhere[state.gap1ndex] = state.gapsWhere[state.indexMiniGap]
|
|
21
|
+
if state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] == state.dimensionsUnconstrained:
|
|
22
|
+
state = incrementActiveGap(state)
|
|
23
|
+
state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] = 0
|
|
24
|
+
return state
|
|
25
|
+
|
|
26
|
+
def gapAvailable(state: MapFoldingState) -> bool:
|
|
27
|
+
return state.leaf1ndex > 0
|
|
28
|
+
|
|
29
|
+
def incrementActiveGap(state: MapFoldingState) -> MapFoldingState:
|
|
30
|
+
state.gap1ndex += 1
|
|
31
|
+
return state
|
|
32
|
+
|
|
33
|
+
def incrementGap1ndexCeiling(state: MapFoldingState) -> MapFoldingState:
|
|
34
|
+
state.gap1ndexCeiling += 1
|
|
35
|
+
return state
|
|
36
|
+
|
|
37
|
+
def incrementIndexMiniGap(state: MapFoldingState) -> MapFoldingState:
|
|
38
|
+
state.indexMiniGap += 1
|
|
39
|
+
return state
|
|
40
|
+
|
|
41
|
+
def initializeIndexMiniGap(state: MapFoldingState) -> MapFoldingState:
|
|
42
|
+
state.indexMiniGap = state.gap1ndex
|
|
43
|
+
return state
|
|
44
|
+
|
|
45
|
+
def initializeVariablesToFindGaps(state: MapFoldingState) -> MapFoldingState:
|
|
46
|
+
state.dimensionsUnconstrained = state.dimensionsTotal
|
|
47
|
+
state.gap1ndexCeiling = state.gapRangeStart[state.leaf1ndex - 1]
|
|
48
|
+
state.indexDimension = 0
|
|
49
|
+
return state
|
|
50
|
+
|
|
51
|
+
def insertActiveLeaf(state: MapFoldingState) -> MapFoldingState:
|
|
52
|
+
state.indexLeaf = 0
|
|
53
|
+
while state.indexLeaf < state.leaf1ndex:
|
|
54
|
+
state.gapsWhere[state.gap1ndexCeiling] = state.indexLeaf
|
|
55
|
+
state.gap1ndexCeiling += 1
|
|
56
|
+
state.indexLeaf += 1
|
|
57
|
+
return state
|
|
58
|
+
|
|
59
|
+
def insertActiveLeafAtGap(state: MapFoldingState) -> MapFoldingState:
|
|
60
|
+
state.gap1ndex -= 1
|
|
61
|
+
state.leafAbove[state.leaf1ndex] = state.gapsWhere[state.gap1ndex]
|
|
62
|
+
state.leafBelow[state.leaf1ndex] = state.leafBelow[state.leafAbove[state.leaf1ndex]]
|
|
63
|
+
state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leaf1ndex
|
|
64
|
+
state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leaf1ndex
|
|
65
|
+
state.gapRangeStart[state.leaf1ndex] = state.gap1ndex
|
|
66
|
+
state.leaf1ndex += 1
|
|
67
|
+
return state
|
|
68
|
+
|
|
69
|
+
def leafBelowSentinelIs1(state: MapFoldingState) -> bool:
|
|
70
|
+
return state.leafBelow[0] == 1
|
|
71
|
+
|
|
72
|
+
def leafConnecteeIsActiveLeaf(state: MapFoldingState) -> bool:
|
|
73
|
+
return state.leafConnectee == state.leaf1ndex
|
|
74
|
+
|
|
75
|
+
def lookForGaps(state: MapFoldingState) -> MapFoldingState:
|
|
76
|
+
state.gapsWhere[state.gap1ndexCeiling] = state.leafConnectee
|
|
77
|
+
if state.countDimensionsGapped[state.leafConnectee] == 0:
|
|
78
|
+
state = incrementGap1ndexCeiling(state)
|
|
79
|
+
state.countDimensionsGapped[state.leafConnectee] += 1
|
|
80
|
+
return state
|
|
81
|
+
|
|
82
|
+
def lookupLeafConnecteeInConnectionGraph(state: MapFoldingState) -> MapFoldingState:
|
|
83
|
+
state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leaf1ndex]
|
|
84
|
+
return state
|
|
85
|
+
|
|
86
|
+
def loopingLeavesConnectedToActiveLeaf(state: MapFoldingState) -> bool:
|
|
87
|
+
return state.leafConnectee != state.leaf1ndex
|
|
88
|
+
|
|
89
|
+
def loopingThroughTheDimensions(state: MapFoldingState) -> bool:
|
|
90
|
+
return state.indexDimension < state.dimensionsTotal
|
|
91
|
+
|
|
92
|
+
def loopingToActiveGapCeiling(state: MapFoldingState) -> bool:
|
|
93
|
+
return state.indexMiniGap < state.gap1ndexCeiling
|
|
94
|
+
|
|
95
|
+
def noGapsHere(state: MapFoldingState) -> bool:
|
|
96
|
+
return (state.leaf1ndex > 0) and (state.gap1ndex == state.gapRangeStart[state.leaf1ndex - 1])
|
|
97
|
+
|
|
98
|
+
def tryAnotherLeafConnectee(state: MapFoldingState) -> MapFoldingState:
|
|
99
|
+
state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leafBelow[state.leafConnectee]]
|
|
100
|
+
return state
|
|
101
|
+
|
|
102
|
+
def tryNextDimension(state: MapFoldingState) -> MapFoldingState:
|
|
103
|
+
state.indexDimension += 1
|
|
104
|
+
return state
|
|
105
|
+
|
|
106
|
+
def undoLastLeafPlacement(state: MapFoldingState) -> MapFoldingState:
|
|
107
|
+
state.leaf1ndex -= 1
|
|
108
|
+
state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leafBelow[state.leaf1ndex]
|
|
109
|
+
state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leafAbove[state.leaf1ndex]
|
|
110
|
+
return state
|
|
111
|
+
|
|
112
|
+
def count(state: MapFoldingState) -> MapFoldingState:
|
|
113
|
+
while activeLeafGreaterThan0(state):
|
|
114
|
+
if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
|
|
115
|
+
if activeLeafGreaterThanLeavesTotal(state):
|
|
116
|
+
state.groupsOfFolds += 1
|
|
117
|
+
else:
|
|
118
|
+
state = initializeVariablesToFindGaps(state)
|
|
119
|
+
while loopingThroughTheDimensions(state):
|
|
120
|
+
state = lookupLeafConnecteeInConnectionGraph(state)
|
|
121
|
+
if leafConnecteeIsActiveLeaf(state):
|
|
122
|
+
state = activeLeafUnconstrainedInThisDimension(state)
|
|
123
|
+
else:
|
|
124
|
+
while loopingLeavesConnectedToActiveLeaf(state):
|
|
125
|
+
state = lookForGaps(state)
|
|
126
|
+
state = tryAnotherLeafConnectee(state)
|
|
127
|
+
state = tryNextDimension(state)
|
|
128
|
+
if activeLeafIsUnconstrainedInAllDimensions(state):
|
|
129
|
+
state = insertActiveLeaf(state)
|
|
130
|
+
state = initializeIndexMiniGap(state)
|
|
131
|
+
while loopingToActiveGapCeiling(state):
|
|
132
|
+
state = filterCommonGaps(state)
|
|
133
|
+
state = incrementIndexMiniGap(state)
|
|
134
|
+
while noGapsHere(state):
|
|
135
|
+
state = undoLastLeafPlacement(state)
|
|
136
|
+
if gapAvailable(state):
|
|
137
|
+
state = insertActiveLeafAtGap(state)
|
|
138
|
+
return state
|
|
139
|
+
|
|
140
|
+
def doTheNeedful(state: MapFoldingState) -> MapFoldingState:
|
|
141
|
+
state = count(state)
|
|
142
|
+
return state
|
mapFolding/theSSOT.py
CHANGED
|
@@ -19,12 +19,23 @@ collisions when transforming algorithms.
|
|
|
19
19
|
from collections.abc import Callable
|
|
20
20
|
from importlib import import_module as importlib_import_module
|
|
21
21
|
from inspect import getfile as inspect_getfile
|
|
22
|
-
from numpy import dtype, int64 as numpy_int64, integer, ndarray
|
|
23
22
|
from pathlib import Path
|
|
24
23
|
from tomli import load as tomli_load
|
|
25
24
|
from types import ModuleType
|
|
26
|
-
from typing import Any, TypeAlias, TypeVar
|
|
27
25
|
import dataclasses
|
|
26
|
+
from mapFolding.datatypes import (
|
|
27
|
+
Array1DElephino as Array1DElephino,
|
|
28
|
+
Array1DFoldsTotal as Array1DFoldsTotal,
|
|
29
|
+
Array1DLeavesTotal as Array1DLeavesTotal,
|
|
30
|
+
Array3D as Array3D,
|
|
31
|
+
DatatypeElephino as DatatypeElephino,
|
|
32
|
+
DatatypeFoldsTotal as DatatypeFoldsTotal,
|
|
33
|
+
DatatypeLeavesTotal as DatatypeLeavesTotal,
|
|
34
|
+
NumPyElephino as NumPyElephino,
|
|
35
|
+
NumPyFoldsTotal as NumPyFoldsTotal,
|
|
36
|
+
NumPyIntegerType as NumPyIntegerType,
|
|
37
|
+
NumPyLeavesTotal as NumPyLeavesTotal,
|
|
38
|
+
)
|
|
28
39
|
|
|
29
40
|
# Evaluate When Packaging https://github.com/hunterhogan/mapFolding/issues/18
|
|
30
41
|
try:
|
|
@@ -144,25 +155,6 @@ class PackageSettings:
|
|
|
144
155
|
|
|
145
156
|
The = PackageSettings(logicalPathModuleDispatcher=logicalPathModuleDispatcherHARDCODED, callableDispatcher=callableDispatcherHARDCODED, concurrencyPackage=concurrencyPackageHARDCODED)
|
|
146
157
|
|
|
147
|
-
# =============================================================================
|
|
148
|
-
# Flexible Data Structure System Needs Enhanced Paradigm https://github.com/hunterhogan/mapFolding/issues/9
|
|
149
|
-
|
|
150
|
-
NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
|
|
151
|
-
|
|
152
|
-
DatatypeLeavesTotal: TypeAlias = int
|
|
153
|
-
NumPyLeavesTotal: TypeAlias = numpy_int64
|
|
154
|
-
|
|
155
|
-
DatatypeElephino: TypeAlias = int
|
|
156
|
-
NumPyElephino: TypeAlias = numpy_int64
|
|
157
|
-
|
|
158
|
-
DatatypeFoldsTotal: TypeAlias = int
|
|
159
|
-
NumPyFoldsTotal: TypeAlias = numpy_int64
|
|
160
|
-
|
|
161
|
-
Array3D: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]]
|
|
162
|
-
Array1DLeavesTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyLeavesTotal]]
|
|
163
|
-
Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]]
|
|
164
|
-
Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]]
|
|
165
|
-
|
|
166
158
|
@dataclasses.dataclass
|
|
167
159
|
class ComputationState:
|
|
168
160
|
"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mapFolding
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.3
|
|
4
4
|
Summary: Map folding algorithm with code transformation framework for optimizing numerical computations
|
|
5
5
|
Author-email: Hunter Hogan <HunterHogan@pm.me>
|
|
6
6
|
License: CC-BY-NC-4.0
|
|
@@ -8,7 +8,7 @@ Project-URL: Donate, https://www.patreon.com/integrated
|
|
|
8
8
|
Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
|
|
9
9
|
Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
|
|
10
10
|
Project-URL: Issues, https://github.com/hunterhogan/mapFolding/issues
|
|
11
|
-
Keywords: A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
|
|
11
|
+
Keywords: A000136,A001415,A001416,A001417,A001418,A195646,algorithmic optimization,AST manipulation,code generation,code transformation,combinatorics,computational geometry,dataclass transformation,folding pattern enumeration,just-in-time compilation,map folding,Numba optimization,OEIS,performance optimization,source code analysis,stamp folding
|
|
12
12
|
Classifier: Development Status :: 4 - Beta
|
|
13
13
|
Classifier: Environment :: Console
|
|
14
14
|
Classifier: Intended Audience :: Developers
|
|
@@ -106,7 +106,7 @@ def countFolds_optimized(shape_param):
|
|
|
106
106
|
|
|
107
107
|
### 2. Code Generation Framework
|
|
108
108
|
|
|
109
|
-
Study and extend a complete Python code transformation
|
|
109
|
+
Study and extend a complete Python code transformation assembly line:
|
|
110
110
|
|
|
111
111
|
- AST analysis and manipulation
|
|
112
112
|
- Dataclass decomposition ("shattering")
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
mapFolding/
|
|
2
|
-
mapFolding/
|
|
3
|
-
mapFolding/
|
|
4
|
-
mapFolding/
|
|
1
|
+
mapFolding/Z0Z_flowControl.py,sha256=thYz1WA7v0PCigvZcNMcKhNnJoIjLJkVgD4wUTXveGY,3272
|
|
2
|
+
mapFolding/__init__.py,sha256=XbCu7IEPzmIuPRy5iUFKbRbhqTRsie3RemnKVUdACCU,4360
|
|
3
|
+
mapFolding/basecamp.py,sha256=zKqG2lfhaUEicpXjResOrU8zIq3_-3KAFW-DLXATlpc,4749
|
|
4
|
+
mapFolding/beDRY.py,sha256=sTqg_xq3_c4Djer8HRg41ERhDulcl1ZgU4_RMksuv6c,15975
|
|
5
|
+
mapFolding/dataBaskets.py,sha256=CrSEMfAr63l6zFA2v2YGygwSD8YeLb-3ZCKlpbp3Mho,4325
|
|
6
|
+
mapFolding/datatypes.py,sha256=LbDYemnIVLFqMIHPKWutEWId1iPMw6P7XCDm7Uw4it4,912
|
|
7
|
+
mapFolding/oeis.py,sha256=u9xiBrRXVJSFCC8FgLLuvZAsmX852UyjYqREXiulys8,17106
|
|
5
8
|
mapFolding/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
9
|
mapFolding/theDao.py,sha256=kc3rzTX3yts0PxgPCXFbgWvaqsBexsiU5ib2pzpvID0,10019
|
|
7
|
-
mapFolding/
|
|
10
|
+
mapFolding/theDaoOfMapFolding.py,sha256=ncTIiBfTsM8SNVx9qefZ0bBcBtviWLSk4iPv3Z9nGiE,5442
|
|
11
|
+
mapFolding/theSSOT.py,sha256=rbv8esQeBG6uLWpFZu_ncMA4zIuQG3lj4FZNzC_6JGI,16138
|
|
8
12
|
mapFolding/toolboxFilesystem.py,sha256=kVZoU-NBvfYSvI4R8mEpMXRUZee-1JV0fjtMFWPhk8Y,9818
|
|
9
13
|
mapFolding/reference/__init__.py,sha256=GKcSgYE49NcTISx-JZbELXyq-eRkMeTL5g4DXInWFw0,2206
|
|
10
14
|
mapFolding/reference/flattened.py,sha256=QK1xG9SllqCoi68e86Hyl9d9ATUAAFNpTQI-3zmcp5I,16072
|
|
@@ -19,29 +23,29 @@ mapFolding/reference/jobsCompleted/__init__.py,sha256=TU93ZGUW1xEkT6d9mQFn_rp5Dv
|
|
|
19
23
|
mapFolding/reference/jobsCompleted/[2x19]/p2x19.py,sha256=_tvYtfzMWVo2VtUbIAieoscb4N8FFflgTdW4-ljBUuA,19626
|
|
20
24
|
mapFolding/reference/jobsCompleted/p2x19/p2x19.py,sha256=eZEw4Me4ocTt6VXoK2-Sbd5SowZtxRIbN9dZmc7OCVg,6395
|
|
21
25
|
mapFolding/someAssemblyRequired/RecipeJob.py,sha256=JL5Xkmp8ritVMhL1pGhX7eEw5fde3FVD8-9-vZOZwWI,5399
|
|
22
|
-
mapFolding/someAssemblyRequired/__init__.py,sha256=
|
|
23
|
-
mapFolding/someAssemblyRequired/_theTypes.py,sha256=
|
|
26
|
+
mapFolding/someAssemblyRequired/__init__.py,sha256=W2RdYl78-Z17TiXY8dDOKZIEMeowfhSzMTwfGjwR2k4,4111
|
|
27
|
+
mapFolding/someAssemblyRequired/_theTypes.py,sha256=MS_M4fHLxsHA1JzrPxC2YPnXu--_XAsc_j8VAkyCe5Y,4341
|
|
24
28
|
mapFolding/someAssemblyRequired/_tool_Make.py,sha256=0TGZtCUt6uu8h47N833qZ9IIjbn_yhoPFsBDEotQp9A,7222
|
|
25
29
|
mapFolding/someAssemblyRequired/_tool_Then.py,sha256=0Xb-MfKJhXjoVBTC7CSjpgCxxilL_WquL4WzKQWMR5A,4464
|
|
26
30
|
mapFolding/someAssemblyRequired/_toolboxAntecedents.py,sha256=6m80_ThGF47WWIkYweEdc3LRq96fHklys7IpoFSqN7A,13288
|
|
27
|
-
mapFolding/someAssemblyRequired/_toolboxContainers.py,sha256=
|
|
31
|
+
mapFolding/someAssemblyRequired/_toolboxContainers.py,sha256=qDvmr9GYc5rPYQ5A-lbqqGFBUy3UrA0HOAnfULDE_n0,24303
|
|
28
32
|
mapFolding/someAssemblyRequired/_toolboxPython.py,sha256=TuRC5CD_6tTjjLuvGgPbnqCSvIP3Vp2k2r592Dcpff4,7642
|
|
29
33
|
mapFolding/someAssemblyRequired/getLLVMforNoReason.py,sha256=9RPU6vK_eUg64GtVFI_nZnvUryXw8gfHJs9NyDYHIvg,2745
|
|
30
|
-
mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=
|
|
31
|
-
mapFolding/someAssemblyRequired/toolboxNumba.py,sha256=
|
|
32
|
-
mapFolding/someAssemblyRequired/transformationTools.py,sha256=
|
|
34
|
+
mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=yFAGTtKSgv7L87PjPjQMS4wz7Bgs37B1_qqZO8cntQ0,15568
|
|
35
|
+
mapFolding/someAssemblyRequired/toolboxNumba.py,sha256=f2spS6SSobGdDNlpS2ELO7ejurqbMVITS2QZLIXDivk,10759
|
|
36
|
+
mapFolding/someAssemblyRequired/transformationTools.py,sha256=1_qIlrK5a9e5vjRW5vru-v50mkuCmFMaV7lU01cq-ss,35838
|
|
33
37
|
mapFolding/syntheticModules/__init__.py,sha256=evVFqhCGa-WZKDiLcnQWjs-Bj34eRnfSLqz_d7dFYZY,83
|
|
34
38
|
mapFolding/syntheticModules/numbaCount.py,sha256=zM-bp07c9tEDdvidwzZ_bJTd0JC0VUkYEEiHG--P1tQ,15525
|
|
35
|
-
mapfolding-0.9.
|
|
36
|
-
tests/__init__.py,sha256=
|
|
37
|
-
tests/conftest.py,sha256=
|
|
38
|
-
tests/test_computations.py,sha256=
|
|
39
|
+
mapfolding-0.9.3.dist-info/licenses/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
|
|
40
|
+
tests/__init__.py,sha256=5VhHf0JJ2_DSh58zJ0rR5UkpoCon-0IkdljspTCzZ04,1950
|
|
41
|
+
tests/conftest.py,sha256=x8zMZQyTss3sn0GwHm_TSRwD9_LVlR8l_qF8r43Vxl4,14178
|
|
42
|
+
tests/test_computations.py,sha256=H7AlEAGdl4klAGyeF9z0Gzy2SBtN49xWt4rdftqusnU,6335
|
|
39
43
|
tests/test_filesystem.py,sha256=T2DkjBoI3lW6tCxd5BilPmUFrVukNKLjOOZVZxLM560,3004
|
|
40
44
|
tests/test_oeis.py,sha256=uxvwmgbnylSDdsVJfuAT0LuYLbIVFwSgdLxHm-xUGBM,5043
|
|
41
45
|
tests/test_other.py,sha256=UMlK4JPInalpOZuPvTnUrgXWCJOxAw-OsPs6CxMR254,3753
|
|
42
|
-
tests/test_tasks.py,sha256=
|
|
43
|
-
mapfolding-0.9.
|
|
44
|
-
mapfolding-0.9.
|
|
45
|
-
mapfolding-0.9.
|
|
46
|
-
mapfolding-0.9.
|
|
47
|
-
mapfolding-0.9.
|
|
46
|
+
tests/test_tasks.py,sha256=tOQc4uomKXGwWnENfbcThaVa1XofwXNCkGZbg4yS6VI,2833
|
|
47
|
+
mapfolding-0.9.3.dist-info/METADATA,sha256=Jv9oPprJ2nb4ECje2fC3-tcRO3FAL0EAi7gZqKeMLzU,7479
|
|
48
|
+
mapfolding-0.9.3.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
|
|
49
|
+
mapfolding-0.9.3.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
|
|
50
|
+
mapfolding-0.9.3.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
|
|
51
|
+
mapfolding-0.9.3.dist-info/RECORD,,
|
tests/__init__.py
CHANGED
|
@@ -14,7 +14,7 @@ own recipe configurations and job implementations.
|
|
|
14
14
|
- Ensures consistency across different implementation strategies
|
|
15
15
|
|
|
16
16
|
2. **Code Generation Testing**
|
|
17
|
-
- Tests the AST transformation
|
|
17
|
+
- Tests the AST transformation assembly line from source to optimized implementations
|
|
18
18
|
- Validates that generated Numba-accelerated modules produce correct results
|
|
19
19
|
- Ensures robust code generation across different parameter sets
|
|
20
20
|
|
|
@@ -29,7 +29,7 @@ This suite is designed to make it easy to test your custom recipes and jobs:
|
|
|
29
29
|
|
|
30
30
|
### For Custom Recipes (RecipeSynthesizeFlow):
|
|
31
31
|
Copy and adapt the `syntheticDispatcherFixture` and associated tests from
|
|
32
|
-
`test_computations.py` to validate your customized code transformation
|
|
32
|
+
`test_computations.py` to validate your customized code transformation assembly lines.
|
|
33
33
|
|
|
34
34
|
### For Custom Jobs (RecipeJob):
|
|
35
35
|
Copy and adapt the `test_writeJobNumba` function to test specialized job modules
|
tests/conftest.py
CHANGED
|
@@ -56,7 +56,7 @@ See the examples in `test_computations.py` for guidance on adapting these fixtur
|
|
|
56
56
|
"""
|
|
57
57
|
|
|
58
58
|
from collections.abc import Callable, Generator, Sequence
|
|
59
|
-
from mapFolding import
|
|
59
|
+
from mapFolding import The
|
|
60
60
|
from mapFolding.beDRY import getLeavesTotal, validateListDimensions, makeDataContainer
|
|
61
61
|
from mapFolding.oeis import oeisIDsImplemented, settingsOEIS
|
|
62
62
|
from mapFolding.someAssemblyRequired import importLogicalPath2Callable, RecipeSynthesizeFlow
|
|
@@ -171,7 +171,7 @@ def oneTestCuzTestsOverwritingTests(oeisID_1random: str) -> tuple[int, ...]:
|
|
|
171
171
|
pass
|
|
172
172
|
|
|
173
173
|
@pytest.fixture
|
|
174
|
-
def
|
|
174
|
+
def mapShapeTestCountFolds(oeisID: str) -> tuple[int, ...]:
|
|
175
175
|
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestValidation`
|
|
176
176
|
if `validateListDimensions` approves. Each `listDimensions` is suitable for testing counts."""
|
|
177
177
|
while True:
|
|
@@ -202,10 +202,10 @@ def mapShapeTestFunctionality(oeisID_1random: str) -> tuple[int, ...]:
|
|
|
202
202
|
pass
|
|
203
203
|
|
|
204
204
|
@pytest.fixture
|
|
205
|
-
def
|
|
205
|
+
def mapShapeTestParallelization(oeisID: str) -> tuple[int, ...]:
|
|
206
206
|
"""For each `oeisID` from the `pytest.fixture`, returns `listDimensions` from `valuesTestParallelization`"""
|
|
207
207
|
n = random.choice(settingsOEIS[oeisID]['valuesTestParallelization'])
|
|
208
|
-
return
|
|
208
|
+
return settingsOEIS[oeisID]['getMapShape'](n)
|
|
209
209
|
|
|
210
210
|
@pytest.fixture
|
|
211
211
|
def mockBenchmarkTimer() -> Generator[unittest.mock.MagicMock | unittest.mock.AsyncMock, Any, None]:
|
|
@@ -255,8 +255,8 @@ def useThisDispatcher() -> Generator[Callable[..., None], Any, None]:
|
|
|
255
255
|
def patchDispatcher(callableTarget: Callable[..., Any]) -> None:
|
|
256
256
|
"""Patch the dispatcher property to return the target callable."""
|
|
257
257
|
# Create a new property that returns the target callable
|
|
258
|
-
def patched_dispatcher(self: theSSOT.PackageSettings) -> Callable[
|
|
259
|
-
def wrapper(state:
|
|
258
|
+
def patched_dispatcher(self: theSSOT.PackageSettings) -> Callable[..., Any]:
|
|
259
|
+
def wrapper(state: Any) -> Any:
|
|
260
260
|
return callableTarget(state)
|
|
261
261
|
return wrapper
|
|
262
262
|
|
|
@@ -268,7 +268,7 @@ def useThisDispatcher() -> Generator[Callable[..., None], Any, None]:
|
|
|
268
268
|
# Restore the original property
|
|
269
269
|
theSSOT.PackageSettings.dispatcher = original_dispatcher_property # type: ignore
|
|
270
270
|
|
|
271
|
-
def getAlgorithmDispatcher() -> Callable[
|
|
271
|
+
def getAlgorithmDispatcher() -> Callable[..., Any]:
|
|
272
272
|
moduleImported: ModuleType = importlib.import_module(The.logicalPathModuleSourceAlgorithm)
|
|
273
273
|
dispatcherCallable = getattr(moduleImported, The.sourceCallableDispatcher)
|
|
274
274
|
return dispatcherCallable
|
tests/test_computations.py
CHANGED
|
@@ -85,7 +85,7 @@ All tests leverage standardized utilities like `standardizedEqualToCallableRetur
|
|
|
85
85
|
that provide consistent, informative error messages and simplify test validation.
|
|
86
86
|
"""
|
|
87
87
|
|
|
88
|
-
from mapFolding import countFolds, getFoldsTotalKnown, oeisIDfor_n
|
|
88
|
+
from mapFolding import countFolds, getFoldsTotalKnown, oeisIDfor_n
|
|
89
89
|
from mapFolding.oeis import settingsOEIS
|
|
90
90
|
from mapFolding.someAssemblyRequired.RecipeJob import RecipeJob
|
|
91
91
|
from mapFolding.someAssemblyRequired.transformationTools import makeInitializedComputationState
|
|
@@ -98,28 +98,30 @@ import pytest
|
|
|
98
98
|
if __name__ == '__main__':
|
|
99
99
|
multiprocessing.set_start_method('spawn')
|
|
100
100
|
|
|
101
|
-
def test_algorithmSourceParallel(
|
|
102
|
-
standardizedEqualToCallableReturn(getFoldsTotalKnown(
|
|
101
|
+
def test_algorithmSourceParallel(mapShapeTestParallelization: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
|
|
102
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestParallelization), countFolds, mapShapeTestParallelization, None, 'maximum', None)
|
|
103
103
|
|
|
104
|
-
def
|
|
105
|
-
standardizedEqualToCallableReturn(getFoldsTotalKnown(
|
|
104
|
+
def test_theDaoOfMapFolding(mapShapeTestCountFolds: tuple[int, ...]) -> None:
|
|
105
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, None, None, None, None, mapShapeTestCountFolds, None, None, 'theDaoOfMapFolding')
|
|
106
|
+
|
|
107
|
+
def test_algorithmSourceSequential(mapShapeTestCountFolds: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
|
|
108
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, mapShapeTestCountFolds)
|
|
106
109
|
|
|
107
110
|
def test_aOFn_calculate_value(oeisID: str) -> None:
|
|
108
111
|
for n in settingsOEIS[oeisID]['valuesTestValidation']:
|
|
109
112
|
standardizedEqualToCallableReturn(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
|
|
110
113
|
|
|
111
|
-
def test_syntheticParallel(syntheticDispatcherFixture: None,
|
|
112
|
-
standardizedEqualToCallableReturn(getFoldsTotalKnown(
|
|
114
|
+
def test_syntheticParallel(syntheticDispatcherFixture: None, mapShapeTestParallelization: tuple[int, ...]) -> None:
|
|
115
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestParallelization), countFolds, mapShapeTestParallelization, None, 'maximum')
|
|
113
116
|
|
|
114
|
-
def test_syntheticSequential(syntheticDispatcherFixture: None,
|
|
115
|
-
standardizedEqualToCallableReturn(getFoldsTotalKnown(
|
|
117
|
+
def test_syntheticSequential(syntheticDispatcherFixture: None, mapShapeTestCountFolds: tuple[int, ...]) -> None:
|
|
118
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(mapShapeTestCountFolds), countFolds, mapShapeTestCountFolds)
|
|
116
119
|
|
|
117
120
|
@pytest.mark.parametrize('pathFilenameTmpTesting', ['.py'], indirect=True)
|
|
118
|
-
def test_writeJobNumba(oneTestCuzTestsOverwritingTests:
|
|
121
|
+
def test_writeJobNumba(oneTestCuzTestsOverwritingTests: tuple[int, ...], pathFilenameTmpTesting: Path) -> None:
|
|
119
122
|
from mapFolding.someAssemblyRequired.toolboxNumba import SpicesJobNumba
|
|
120
123
|
from mapFolding.someAssemblyRequired.synthesizeNumbaJob import makeJobNumba
|
|
121
|
-
|
|
122
|
-
state = makeInitializedComputationState(mapShape)
|
|
124
|
+
state = makeInitializedComputationState(oneTestCuzTestsOverwritingTests)
|
|
123
125
|
|
|
124
126
|
pathFilenameModule = pathFilenameTmpTesting.absolute()
|
|
125
127
|
pathFilenameFoldsTotal = pathFilenameModule.with_suffix('.foldsTotalTesting')
|
|
@@ -142,4 +144,4 @@ def test_writeJobNumba(oneTestCuzTestsOverwritingTests: list[int], pathFilenameT
|
|
|
142
144
|
module.__name__ = "__main__"
|
|
143
145
|
Don_Lapre_Road_to_Self_Improvement.loader.exec_module(module)
|
|
144
146
|
|
|
145
|
-
standardizedEqualToCallableReturn(str(getFoldsTotalKnown(
|
|
147
|
+
standardizedEqualToCallableReturn(str(getFoldsTotalKnown(oneTestCuzTestsOverwritingTests)), pathFilenameFoldsTotal.read_text().strip)
|
tests/test_tasks.py
CHANGED
|
@@ -15,8 +15,8 @@ if __name__ == '__main__':
|
|
|
15
15
|
def test_countFoldsComputationDivisionsInvalid(mapShapeTestFunctionality: tuple[int, ...]) -> None:
|
|
16
16
|
standardizedEqualToCallableReturn(ValueError, countFolds, mapShapeTestFunctionality, None, {"wrong": "value"})
|
|
17
17
|
|
|
18
|
-
def test_countFoldsComputationDivisionsMaximum(
|
|
19
|
-
standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(
|
|
18
|
+
def test_countFoldsComputationDivisionsMaximum(mapShapeTestParallelization: list[int]) -> None:
|
|
19
|
+
standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(mapShapeTestParallelization)), countFolds, mapShapeTestParallelization, None, 'maximum', None)
|
|
20
20
|
|
|
21
21
|
@pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_defineConcurrencyLimit())
|
|
22
22
|
def test_defineConcurrencyLimit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|