mapFolding 0.8.6__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +60 -13
- mapFolding/basecamp.py +32 -17
- mapFolding/beDRY.py +4 -5
- mapFolding/oeis.py +94 -7
- mapFolding/someAssemblyRequired/RecipeJob.py +103 -0
- mapFolding/someAssemblyRequired/__init__.py +71 -50
- mapFolding/someAssemblyRequired/_theTypes.py +11 -15
- mapFolding/someAssemblyRequired/_tool_Make.py +36 -9
- mapFolding/someAssemblyRequired/_tool_Then.py +59 -25
- mapFolding/someAssemblyRequired/_toolboxAntecedents.py +159 -272
- mapFolding/someAssemblyRequired/_toolboxContainers.py +155 -70
- mapFolding/someAssemblyRequired/_toolboxPython.py +168 -44
- mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +154 -39
- mapFolding/someAssemblyRequired/toolboxNumba.py +72 -230
- mapFolding/someAssemblyRequired/transformationTools.py +370 -141
- mapFolding/syntheticModules/{numbaCount_doTheNeedful.py → numbaCount.py} +7 -4
- mapFolding/theDao.py +19 -16
- mapFolding/theSSOT.py +165 -62
- mapFolding/toolboxFilesystem.py +1 -1
- mapfolding-0.9.1.dist-info/METADATA +177 -0
- mapfolding-0.9.1.dist-info/RECORD +47 -0
- tests/__init__.py +44 -0
- tests/conftest.py +75 -7
- tests/test_computations.py +92 -10
- tests/test_filesystem.py +32 -33
- tests/test_other.py +0 -1
- tests/test_tasks.py +1 -1
- mapFolding/someAssemblyRequired/newInliner.py +0 -22
- mapfolding-0.8.6.dist-info/METADATA +0 -190
- mapfolding-0.8.6.dist-info/RECORD +0 -47
- {mapfolding-0.8.6.dist-info → mapfolding-0.9.1.dist-info}/WHEEL +0 -0
- {mapfolding-0.8.6.dist-info → mapfolding-0.9.1.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.8.6.dist-info → mapfolding-0.9.1.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.8.6.dist-info → mapfolding-0.9.1.dist-info}/top_level.txt +0 -0
mapFolding/__init__.py
CHANGED
|
@@ -11,36 +11,83 @@ Core modules:
|
|
|
11
11
|
- basecamp: Public API with simplified interfaces for end users
|
|
12
12
|
- theDao: Core computational algorithm using a functional state-transformation approach
|
|
13
13
|
- beDRY: Core utility functions implementing consistent data handling, validation, and
|
|
14
|
-
|
|
14
|
+
resource management across the package's computational assembly-line
|
|
15
15
|
- theSSOT: Single Source of Truth for configuration, types, and state management
|
|
16
16
|
- toolboxFilesystem: Cross-platform file management services for storing and retrieving
|
|
17
|
-
|
|
17
|
+
computation results with robust error handling and fallback mechanisms
|
|
18
18
|
- oeis: Interface to the Online Encyclopedia of Integer Sequences for known results
|
|
19
19
|
|
|
20
20
|
Extended functionality:
|
|
21
21
|
- someAssemblyRequired: Code transformation framework that optimizes the core algorithm
|
|
22
|
-
|
|
22
|
+
through AST manipulation, dataclass transformation, and compilation techniques
|
|
23
|
+
- The system converts readable code into high-performance implementations through
|
|
24
|
+
a systematic analysis and transformation pipeline
|
|
25
|
+
- Provides tools to "shatter" complex dataclasses into primitive components,
|
|
26
|
+
enabling compatibility with Numba and other optimization frameworks
|
|
27
|
+
- Creates specialized implementations tailored for specific input parameters
|
|
28
|
+
|
|
29
|
+
Testing and extension:
|
|
30
|
+
- tests: Comprehensive test suite designed for both verification and extension
|
|
31
|
+
- Provides fixtures and utilities that simplify testing of custom implementations
|
|
32
|
+
- Enables users to validate their own recipes and job configurations with minimal code
|
|
33
|
+
- Offers standardized testing patterns that maintain consistency across the codebase
|
|
34
|
+
- See tests/__init__.py for detailed documentation on extending the test suite
|
|
23
35
|
|
|
24
36
|
Special directories:
|
|
25
37
|
- .cache/: Stores cached data from external sources like OEIS to improve performance
|
|
26
38
|
- syntheticModules/: Contains dynamically generated, optimized implementations of the
|
|
27
|
-
|
|
39
|
+
core algorithm created by the code transformation framework
|
|
28
40
|
- reference/: Historical implementations and educational resources for algorithm exploration
|
|
29
|
-
|
|
30
|
-
|
|
41
|
+
- reference/jobsCompleted/: Contains successful computations for previously unknown values,
|
|
42
|
+
including first-ever calculations for 2x19 and 2x20 maps (OEIS A001415)
|
|
31
43
|
|
|
32
44
|
This package balances algorithm readability and understandability with
|
|
33
45
|
high-performance computation capabilities, allowing users to compute map folding
|
|
34
46
|
totals for larger dimensions than previously feasible while also providing
|
|
35
47
|
a foundation for exploring advanced code transformation techniques.
|
|
36
48
|
"""
|
|
37
|
-
from mapFolding.basecamp import countFolds
|
|
38
|
-
from mapFolding.oeis import clearOEIScache, getOEISids, OEIS_for_n, oeisIDfor_n
|
|
39
49
|
|
|
40
50
|
__all__ = [
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
51
|
+
'clearOEIScache',
|
|
52
|
+
'countFolds',
|
|
53
|
+
'getOEISids',
|
|
54
|
+
'OEIS_for_n',
|
|
55
|
+
'oeisIDfor_n',
|
|
46
56
|
]
|
|
57
|
+
|
|
58
|
+
from mapFolding.theSSOT import (
|
|
59
|
+
Array1DElephino,
|
|
60
|
+
Array1DFoldsTotal,
|
|
61
|
+
Array1DLeavesTotal,
|
|
62
|
+
Array3D,
|
|
63
|
+
ComputationState,
|
|
64
|
+
DatatypeElephino,
|
|
65
|
+
DatatypeFoldsTotal,
|
|
66
|
+
DatatypeLeavesTotal,
|
|
67
|
+
NumPyElephino,
|
|
68
|
+
NumPyFoldsTotal,
|
|
69
|
+
NumPyIntegerType,
|
|
70
|
+
NumPyLeavesTotal,
|
|
71
|
+
raiseIfNoneGitHubIssueNumber3,
|
|
72
|
+
The,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
from mapFolding.theDao import countInitialize, doTheNeedful
|
|
76
|
+
|
|
77
|
+
from mapFolding.beDRY import (
|
|
78
|
+
outfitCountFolds,
|
|
79
|
+
setProcessorLimit,
|
|
80
|
+
validateListDimensions,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
from mapFolding.toolboxFilesystem import (
|
|
84
|
+
getPathFilenameFoldsTotal,
|
|
85
|
+
getPathRootJobDEFAULT,
|
|
86
|
+
saveFoldsTotal,
|
|
87
|
+
saveFoldsTotalFAILearly,
|
|
88
|
+
writeStringToHere,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
from mapFolding.basecamp import countFolds
|
|
92
|
+
|
|
93
|
+
from mapFolding.oeis import clearOEIScache, getFoldsTotalKnown, getOEISids, OEIS_for_n, oeisIDfor_n
|
mapFolding/basecamp.py
CHANGED
|
@@ -13,9 +13,16 @@ implementation, and optional persistence of results.
|
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
15
|
from collections.abc import Sequence
|
|
16
|
-
from mapFolding
|
|
17
|
-
|
|
18
|
-
|
|
16
|
+
from mapFolding import (
|
|
17
|
+
ComputationState,
|
|
18
|
+
getPathFilenameFoldsTotal,
|
|
19
|
+
outfitCountFolds,
|
|
20
|
+
saveFoldsTotal,
|
|
21
|
+
saveFoldsTotalFAILearly,
|
|
22
|
+
setProcessorLimit,
|
|
23
|
+
The,
|
|
24
|
+
validateListDimensions,
|
|
25
|
+
)
|
|
19
26
|
from os import PathLike
|
|
20
27
|
from pathlib import PurePath
|
|
21
28
|
|
|
@@ -24,18 +31,27 @@ def countFolds(listDimensions: Sequence[int]
|
|
|
24
31
|
, computationDivisions: int | str | None = None
|
|
25
32
|
, CPUlimit: int | float | bool | None = None
|
|
26
33
|
) -> int:
|
|
27
|
-
"""
|
|
34
|
+
"""
|
|
35
|
+
Count the total number of possible foldings for a given map dimensions.
|
|
36
|
+
|
|
37
|
+
This function serves as the main public interface to the map folding algorithm,
|
|
38
|
+
handling all parameter validation, computation state management, and result
|
|
39
|
+
persistence in a user-friendly way.
|
|
28
40
|
|
|
29
|
-
Parameters
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
listDimensions: List of integers representing the dimensions of the map to be folded.
|
|
44
|
+
pathLikeWriteFoldsTotal (None): Path, filename, or pathFilename to write the total fold count to.
|
|
45
|
+
If a directory is provided, creates a file with a default name based on map dimensions.
|
|
46
|
+
computationDivisions (None):
|
|
47
|
+
Whether and how to divide the computational work. See notes for details.
|
|
48
|
+
CPUlimit (None): This is only relevant if there are `computationDivisions`: whether and how to limit the CPU usage. See notes for details.
|
|
49
|
+
Returns
|
|
50
|
+
-------
|
|
51
|
+
foldsTotal: Total number of distinct ways to fold a map of the given dimensions.
|
|
38
52
|
|
|
53
|
+
Notes
|
|
54
|
+
-----
|
|
39
55
|
Computation divisions:
|
|
40
56
|
- None: no division of the computation into tasks; sets task divisions to 0
|
|
41
57
|
- int: direct set the number of task divisions; cannot exceed the map's total leaves
|
|
@@ -51,7 +67,8 @@ def countFolds(listDimensions: Sequence[int]
|
|
|
51
67
|
- Integer `<= -1`: Subtract the absolute value from total CPUs.
|
|
52
68
|
|
|
53
69
|
N.B.: You probably don't want to divide the computation into tasks.
|
|
54
|
-
|
|
70
|
+
|
|
71
|
+
If you want to compute a large `foldsTotal`, dividing the computation into tasks is usually a bad idea. Dividing the algorithm into tasks is inherently inefficient: efficient division into tasks means there would be no overlap in the work performed by each task. When dividing this algorithm, the amount of overlap is between 50% and 90% by all tasks: at least 50% of the work done by every task must be done by _all_ tasks. If you improve the computation time, it will only change by -10 to -50% depending on (at the very least) the ratio of the map dimensions and the number of leaves. If an undivided computation would take 10 hours on your computer, for example, the computation will still take at least 5 hours but you might reduce the time to 9 hours. Most of the time, however, you will increase the computation time. If logicalCores >= leavesTotal, it will probably be faster. If logicalCores <= 2 * leavesTotal, it will almost certainly be slower for all map dimensions.
|
|
55
72
|
"""
|
|
56
73
|
mapShape: tuple[int, ...] = validateListDimensions(listDimensions)
|
|
57
74
|
concurrencyLimit: int = setProcessorLimit(CPUlimit, The.concurrencyPackage)
|
|
@@ -63,9 +80,7 @@ def countFolds(listDimensions: Sequence[int]
|
|
|
63
80
|
else:
|
|
64
81
|
pathFilenameFoldsTotal = None
|
|
65
82
|
|
|
66
|
-
|
|
67
|
-
computationStateComplete: ComputationState = dispatcherCallableProxy(computationStateInitialized)
|
|
68
|
-
# computationStateComplete: ComputationState = The.dispatcher(computationStateInitialized)
|
|
83
|
+
computationStateComplete: ComputationState = The.dispatcher(computationStateInitialized)
|
|
69
84
|
|
|
70
85
|
computationStateComplete.getFoldsTotal()
|
|
71
86
|
|
mapFolding/beDRY.py
CHANGED
|
@@ -19,7 +19,7 @@ These utilities form a stable internal API that other modules depend on, particu
|
|
|
19
19
|
produce optimized implementations.
|
|
20
20
|
"""
|
|
21
21
|
from collections.abc import Sequence
|
|
22
|
-
from mapFolding
|
|
22
|
+
from mapFolding import ComputationState, NumPyIntegerType
|
|
23
23
|
from numpy import dtype as numpy_dtype, int64 as numpy_int64, ndarray
|
|
24
24
|
from sys import maxsize as sysMaxsize
|
|
25
25
|
from typing import Any
|
|
@@ -165,7 +165,7 @@ def _makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int) -> ndarray
|
|
|
165
165
|
connectionGraph[indexDimension, activeLeaf1ndex, connectee1ndex] = connectee1ndex + cumulativeProduct[indexDimension]
|
|
166
166
|
return connectionGraph
|
|
167
167
|
|
|
168
|
-
def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[
|
|
168
|
+
def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[NumPyIntegerType]) -> ndarray[tuple[int, int, int], numpy_dtype[NumPyIntegerType]]:
|
|
169
169
|
"""
|
|
170
170
|
Create a properly typed connection graph for the map folding algorithm.
|
|
171
171
|
|
|
@@ -193,7 +193,7 @@ def getConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: ty
|
|
|
193
193
|
connectionGraph = connectionGraph.astype(datatype)
|
|
194
194
|
return connectionGraph
|
|
195
195
|
|
|
196
|
-
def makeDataContainer(shape: int | tuple[int, ...], datatype: type[
|
|
196
|
+
def makeDataContainer(shape: int | tuple[int, ...], datatype: type[NumPyIntegerType]) -> ndarray[Any, numpy_dtype[NumPyIntegerType]]:
|
|
197
197
|
"""
|
|
198
198
|
Create a typed NumPy array container with initialized values.
|
|
199
199
|
|
|
@@ -331,8 +331,7 @@ def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
|
|
|
331
331
|
ValueError
|
|
332
332
|
If the input is empty or contains negative values.
|
|
333
333
|
NotImplementedError
|
|
334
|
-
If fewer than two positive dimensions are provided
|
|
335
|
-
represent a valid map folding problem.
|
|
334
|
+
If fewer than two positive dimensions are provided.
|
|
336
335
|
"""
|
|
337
336
|
if not listDimensions:
|
|
338
337
|
raise ValueError("`listDimensions` is a required parameter.")
|
mapFolding/oeis.py
CHANGED
|
@@ -22,8 +22,7 @@ implementation in the package.
|
|
|
22
22
|
from collections.abc import Callable
|
|
23
23
|
from datetime import datetime, timedelta
|
|
24
24
|
from functools import cache
|
|
25
|
-
from mapFolding
|
|
26
|
-
from mapFolding.toolboxFilesystem import writeStringToHere
|
|
25
|
+
from mapFolding import writeStringToHere, The
|
|
27
26
|
from pathlib import Path
|
|
28
27
|
from typing import Any, Final, TYPE_CHECKING
|
|
29
28
|
import argparse
|
|
@@ -60,14 +59,20 @@ class SettingsOEIShardcodedValues(TypedDict):
|
|
|
60
59
|
valuesTestValidation: list[int]
|
|
61
60
|
|
|
62
61
|
settingsOEIShardcodedValues: dict[str, SettingsOEIShardcodedValues] = {
|
|
62
|
+
'A000136': {
|
|
63
|
+
'getMapShape': lambda n: tuple(sorted([1, n])),
|
|
64
|
+
'valuesBenchmark': [14],
|
|
65
|
+
'valuesTestParallelization': [*range(3, 7)],
|
|
66
|
+
'valuesTestValidation': [random.randint(2, 9)],
|
|
67
|
+
},
|
|
63
68
|
'A001415': {
|
|
64
|
-
'getMapShape': lambda n: (2, n)
|
|
69
|
+
'getMapShape': lambda n: tuple(sorted([2, n])),
|
|
65
70
|
'valuesBenchmark': [14],
|
|
66
71
|
'valuesTestParallelization': [*range(3, 7)],
|
|
67
72
|
'valuesTestValidation': [random.randint(2, 9)],
|
|
68
73
|
},
|
|
69
74
|
'A001416': {
|
|
70
|
-
'getMapShape': lambda n: (3, n)
|
|
75
|
+
'getMapShape': lambda n: tuple(sorted([3, n])),
|
|
71
76
|
'valuesBenchmark': [9],
|
|
72
77
|
'valuesTestParallelization': [*range(3, 5)],
|
|
73
78
|
'valuesTestValidation': [random.randint(2, 6)],
|
|
@@ -149,9 +154,9 @@ def _parseBFileOEIS(OEISbFile: str, oeisID: str) -> dict[int, int]:
|
|
|
149
154
|
sequence ID or if the content format is invalid.
|
|
150
155
|
"""
|
|
151
156
|
bFileLines: list[str] = OEISbFile.strip().splitlines()
|
|
152
|
-
if not bFileLines.pop(0).startswith(f"# {oeisID}"):
|
|
153
|
-
|
|
154
|
-
|
|
157
|
+
# if not bFileLines.pop(0).startswith(f"# {oeisID}"):
|
|
158
|
+
# warnings.warn(f"Content does not match sequence {oeisID}")
|
|
159
|
+
# return {-1: -1}
|
|
155
160
|
|
|
156
161
|
OEISsequence: dict[int, int] = {}
|
|
157
162
|
for line in bFileLines:
|
|
@@ -162,6 +167,27 @@ def _parseBFileOEIS(OEISbFile: str, oeisID: str) -> dict[int, int]:
|
|
|
162
167
|
return OEISsequence
|
|
163
168
|
|
|
164
169
|
def getOEISofficial(pathFilenameCache: Path, url: str) -> None | str:
|
|
170
|
+
"""
|
|
171
|
+
Retrieve OEIS sequence data from cache or online source.
|
|
172
|
+
|
|
173
|
+
This function implements a caching strategy for OEIS sequence data, first checking
|
|
174
|
+
if a local cached copy exists and is not expired. If a valid cache exists, it returns
|
|
175
|
+
the cached content; otherwise, it fetches the data from the OEIS website and
|
|
176
|
+
writes it to the cache for future use.
|
|
177
|
+
|
|
178
|
+
Parameters:
|
|
179
|
+
pathFilenameCache: Path to the local cache file.
|
|
180
|
+
url: URL to retrieve the OEIS sequence data from if cache is invalid or missing.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
oeisInformation: The retrieved OEIS sequence information as a string, or None if
|
|
184
|
+
the information could not be retrieved.
|
|
185
|
+
|
|
186
|
+
Notes:
|
|
187
|
+
The cache expiration period is controlled by the global `cacheDays` variable.
|
|
188
|
+
If the function fails to retrieve data from both cache and online source,
|
|
189
|
+
it will return None and issue a warning.
|
|
190
|
+
"""
|
|
165
191
|
tryCache: bool = False
|
|
166
192
|
if pathFilenameCache.exists():
|
|
167
193
|
fileAge: timedelta = datetime.now() - datetime.fromtimestamp(pathFilenameCache.stat().st_mtime)
|
|
@@ -175,6 +201,7 @@ def getOEISofficial(pathFilenameCache: Path, url: str) -> None | str:
|
|
|
175
201
|
tryCache = False
|
|
176
202
|
|
|
177
203
|
if not tryCache:
|
|
204
|
+
# Change http handling #13
|
|
178
205
|
httpResponse: urllib.response.addinfourl = urllib.request.urlopen(url)
|
|
179
206
|
oeisInformation = httpResponse.read().decode('utf-8')
|
|
180
207
|
writeStringToHere(oeisInformation, pathFilenameCache)
|
|
@@ -214,6 +241,27 @@ def getOEISidValues(oeisID: str) -> dict[int, int]:
|
|
|
214
241
|
return {-1: -1}
|
|
215
242
|
|
|
216
243
|
def getOEISidInformation(oeisID: str) -> tuple[str, int]:
|
|
244
|
+
"""
|
|
245
|
+
Retrieve the description and offset for an OEIS sequence.
|
|
246
|
+
|
|
247
|
+
This function fetches the metadata for a given OEIS sequence ID, including
|
|
248
|
+
its textual description and index offset value. It uses a caching mechanism
|
|
249
|
+
to avoid redundant network requests while ensuring data freshness.
|
|
250
|
+
|
|
251
|
+
Parameters:
|
|
252
|
+
oeisID: The OEIS sequence identifier to retrieve information for.
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
A tuple containing:
|
|
256
|
+
- description: A string describing the sequence's mathematical meaning.
|
|
257
|
+
- offset: An integer representing the starting index of the sequence.
|
|
258
|
+
Usually 0 or 1, depending on the mathematical context.
|
|
259
|
+
|
|
260
|
+
Notes:
|
|
261
|
+
Sequence descriptions are parsed from the machine-readable format of OEIS.
|
|
262
|
+
If information cannot be retrieved, warning messages are issued and
|
|
263
|
+
fallback values are returned.
|
|
264
|
+
"""
|
|
217
265
|
oeisID = validateOEISid(oeisID)
|
|
218
266
|
pathFilenameCache: Path = pathCache / f"{oeisID}.txt"
|
|
219
267
|
url: str = f"https://oeis.org/search?q=id:{oeisID}&fmt=text"
|
|
@@ -243,6 +291,26 @@ def getOEISidInformation(oeisID: str) -> tuple[str, int]:
|
|
|
243
291
|
return description, offset
|
|
244
292
|
|
|
245
293
|
def makeSettingsOEIS() -> dict[str, SettingsOEIS]:
|
|
294
|
+
"""
|
|
295
|
+
Construct the comprehensive settings dictionary for all implemented OEIS sequences.
|
|
296
|
+
|
|
297
|
+
This function builds a complete configuration dictionary for all supported OEIS
|
|
298
|
+
sequences by retrieving and combining:
|
|
299
|
+
1. Sequence values from OEIS b-files
|
|
300
|
+
2. Sequence metadata (descriptions and offsets)
|
|
301
|
+
3. Hardcoded mapping functions and test values
|
|
302
|
+
|
|
303
|
+
The resulting dictionary provides a single authoritative source for all OEIS-related
|
|
304
|
+
configurations used throughout the package, including:
|
|
305
|
+
- Mathematical descriptions of each sequence
|
|
306
|
+
- Functions to convert between sequence indices and map dimensions
|
|
307
|
+
- Known sequence values retrieved from OEIS
|
|
308
|
+
- Testing and benchmarking reference values
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
A dictionary mapping OEIS sequence IDs to their complete settings objects,
|
|
312
|
+
containing all metadata and known values needed for computation and validation.
|
|
313
|
+
"""
|
|
246
314
|
settingsTarget: dict[str, SettingsOEIS] = {}
|
|
247
315
|
for oeisID in oeisIDsImplemented:
|
|
248
316
|
valuesKnownSherpa: dict[int, int] = getOEISidValues(oeisID)
|
|
@@ -277,6 +345,25 @@ def makeDictionaryFoldsTotalKnown() -> dict[tuple[int, ...], int]:
|
|
|
277
345
|
return dictionaryMapDimensionsToFoldsTotalKnown
|
|
278
346
|
|
|
279
347
|
def getFoldsTotalKnown(mapShape: tuple[int, ...]) -> int:
|
|
348
|
+
"""
|
|
349
|
+
Retrieve the known total number of foldings for a given map shape.
|
|
350
|
+
|
|
351
|
+
This function looks up precalculated folding totals for specific map dimensions
|
|
352
|
+
from OEIS sequences. It serves as a rapid reference for known values without
|
|
353
|
+
requiring computation, and can be used to validate algorithm results.
|
|
354
|
+
|
|
355
|
+
Parameters:
|
|
356
|
+
mapShape: A tuple of integers representing the dimensions of the map.
|
|
357
|
+
|
|
358
|
+
Returns:
|
|
359
|
+
foldingsTotal: The known total number of foldings for the given map shape,
|
|
360
|
+
or -1 if the map shape doesn't match any known values in the OEIS sequences.
|
|
361
|
+
|
|
362
|
+
Notes:
|
|
363
|
+
The function uses a cached dictionary (via makeDictionaryFoldsTotalKnown) to
|
|
364
|
+
efficiently retrieve values without repeatedly parsing OEIS data. Map shape
|
|
365
|
+
tuples are sorted internally to ensure consistent lookup regardless of dimension order.
|
|
366
|
+
"""
|
|
280
367
|
lookupFoldsTotal = makeDictionaryFoldsTotalKnown()
|
|
281
368
|
return lookupFoldsTotal.get(tuple(mapShape), -1)
|
|
282
369
|
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
from mapFolding.someAssemblyRequired import ShatteredDataclass, ast_Identifier, parsePathFilename2astModule, str_nameDOTname
|
|
2
|
+
from mapFolding.someAssemblyRequired.toolboxNumba import theNumbaFlow
|
|
3
|
+
from mapFolding.someAssemblyRequired.transformationTools import shatter_dataclassesDOTdataclass
|
|
4
|
+
from mapFolding.theSSOT import ComputationState, DatatypeElephino as TheDatatypeElephino, DatatypeFoldsTotal as TheDatatypeFoldsTotal, DatatypeLeavesTotal as TheDatatypeLeavesTotal
|
|
5
|
+
from mapFolding.toolboxFilesystem import getPathFilenameFoldsTotal, getPathRootJobDEFAULT
|
|
6
|
+
|
|
7
|
+
import dataclasses
|
|
8
|
+
from pathlib import Path, PurePosixPath
|
|
9
|
+
from typing import TypeAlias
|
|
10
|
+
|
|
11
|
+
@dataclasses.dataclass
|
|
12
|
+
class RecipeJob:
|
|
13
|
+
state: ComputationState
|
|
14
|
+
# TODO create function to calculate `foldsTotalEstimated`
|
|
15
|
+
foldsTotalEstimated: int = 0
|
|
16
|
+
shatteredDataclass: ShatteredDataclass = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
|
|
17
|
+
|
|
18
|
+
# ========================================
|
|
19
|
+
# Source
|
|
20
|
+
source_astModule = parsePathFilename2astModule(theNumbaFlow.pathFilenameSequential)
|
|
21
|
+
sourceCountCallable: ast_Identifier = theNumbaFlow.callableSequential
|
|
22
|
+
|
|
23
|
+
sourceLogicalPathModuleDataclass: str_nameDOTname = theNumbaFlow.logicalPathModuleDataclass
|
|
24
|
+
sourceDataclassIdentifier: ast_Identifier = theNumbaFlow.dataclassIdentifier
|
|
25
|
+
sourceDataclassInstance: ast_Identifier = theNumbaFlow.dataclassInstance
|
|
26
|
+
|
|
27
|
+
sourcePathPackage: PurePosixPath | None = theNumbaFlow.pathPackage
|
|
28
|
+
sourcePackageIdentifier: ast_Identifier | None = theNumbaFlow.packageIdentifier
|
|
29
|
+
|
|
30
|
+
# ========================================
|
|
31
|
+
# Filesystem (names of physical objects)
|
|
32
|
+
pathPackage: PurePosixPath | None = None
|
|
33
|
+
pathModule: PurePosixPath | None = PurePosixPath(getPathRootJobDEFAULT())
|
|
34
|
+
""" `pathModule` will override `pathPackage` and `logicalPathRoot`."""
|
|
35
|
+
fileExtension: str = theNumbaFlow.fileExtension
|
|
36
|
+
pathFilenameFoldsTotal: PurePosixPath = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
|
|
37
|
+
|
|
38
|
+
# ========================================
|
|
39
|
+
# Logical identifiers (as opposed to physical identifiers)
|
|
40
|
+
packageIdentifier: ast_Identifier | None = None
|
|
41
|
+
logicalPathRoot: str_nameDOTname | None = None
|
|
42
|
+
""" `logicalPathRoot` likely corresponds to a physical filesystem directory."""
|
|
43
|
+
moduleIdentifier: ast_Identifier = dataclasses.field(default=None, init=True) # pyright: ignore[reportAssignmentType]
|
|
44
|
+
countCallable: ast_Identifier = sourceCountCallable
|
|
45
|
+
dataclassIdentifier: ast_Identifier | None = sourceDataclassIdentifier
|
|
46
|
+
dataclassInstance: ast_Identifier | None = sourceDataclassInstance
|
|
47
|
+
logicalPathModuleDataclass: str_nameDOTname | None = sourceLogicalPathModuleDataclass
|
|
48
|
+
|
|
49
|
+
# ========================================
|
|
50
|
+
# Datatypes
|
|
51
|
+
DatatypeFoldsTotal: TypeAlias = TheDatatypeFoldsTotal
|
|
52
|
+
DatatypeElephino: TypeAlias = TheDatatypeElephino
|
|
53
|
+
DatatypeLeavesTotal: TypeAlias = TheDatatypeLeavesTotal
|
|
54
|
+
|
|
55
|
+
def _makePathFilename(self,
|
|
56
|
+
pathRoot: PurePosixPath | None = None,
|
|
57
|
+
logicalPathINFIX: str_nameDOTname | None = None,
|
|
58
|
+
filenameStem: str | None = None,
|
|
59
|
+
fileExtension: str | None = None,
|
|
60
|
+
) -> PurePosixPath:
|
|
61
|
+
if pathRoot is None:
|
|
62
|
+
pathRoot = self.pathPackage or PurePosixPath(Path.cwd())
|
|
63
|
+
if logicalPathINFIX:
|
|
64
|
+
whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
|
|
65
|
+
pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
|
|
66
|
+
if filenameStem is None:
|
|
67
|
+
filenameStem = self.moduleIdentifier
|
|
68
|
+
if fileExtension is None:
|
|
69
|
+
fileExtension = self.fileExtension
|
|
70
|
+
filename: str = filenameStem + fileExtension
|
|
71
|
+
return pathRoot.joinpath(filename)
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def pathFilenameModule(self) -> PurePosixPath:
|
|
75
|
+
if self.pathModule is None:
|
|
76
|
+
return self._makePathFilename()
|
|
77
|
+
else:
|
|
78
|
+
return self._makePathFilename(pathRoot=self.pathModule, logicalPathINFIX=None)
|
|
79
|
+
|
|
80
|
+
def __post_init__(self):
|
|
81
|
+
pathFilenameFoldsTotal = PurePosixPath(getPathFilenameFoldsTotal(self.state.mapShape))
|
|
82
|
+
|
|
83
|
+
if self.moduleIdentifier is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
84
|
+
self.moduleIdentifier = pathFilenameFoldsTotal.stem
|
|
85
|
+
|
|
86
|
+
if self.pathFilenameFoldsTotal is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
87
|
+
self.pathFilenameFoldsTotal = pathFilenameFoldsTotal
|
|
88
|
+
|
|
89
|
+
if self.shatteredDataclass is None and self.logicalPathModuleDataclass and self.dataclassIdentifier and self.dataclassInstance: # pyright: ignore[reportUnnecessaryComparison]
|
|
90
|
+
self.shatteredDataclass = shatter_dataclassesDOTdataclass(self.logicalPathModuleDataclass, self.dataclassIdentifier, self.dataclassInstance)
|
|
91
|
+
|
|
92
|
+
# ========================================
|
|
93
|
+
# Fields you probably don't need =================================
|
|
94
|
+
# Dispatcher =================================
|
|
95
|
+
sourceDispatcherCallable: ast_Identifier = theNumbaFlow.callableDispatcher
|
|
96
|
+
dispatcherCallable: ast_Identifier = sourceDispatcherCallable
|
|
97
|
+
# Parallel counting =================================
|
|
98
|
+
sourceDataclassInstanceTaskDistribution: ast_Identifier = theNumbaFlow.dataclassInstanceTaskDistribution
|
|
99
|
+
sourceConcurrencyManagerNamespace: ast_Identifier = theNumbaFlow.concurrencyManagerNamespace
|
|
100
|
+
sourceConcurrencyManagerIdentifier: ast_Identifier = theNumbaFlow.concurrencyManagerIdentifier
|
|
101
|
+
dataclassInstanceTaskDistribution: ast_Identifier = sourceDataclassInstanceTaskDistribution
|
|
102
|
+
concurrencyManagerNamespace: ast_Identifier = sourceConcurrencyManagerNamespace
|
|
103
|
+
concurrencyManagerIdentifier: ast_Identifier = sourceConcurrencyManagerIdentifier
|
|
@@ -1,66 +1,87 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Code
|
|
2
|
+
Code Transformation Framework for Algorithm Optimization and Testing
|
|
3
3
|
|
|
4
4
|
This package implements a comprehensive framework for programmatically analyzing,
|
|
5
|
-
transforming, and generating Python code. It
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
different execution environments or specific computational tasks.
|
|
5
|
+
transforming, and generating optimized Python code. It serves as the algorithmic
|
|
6
|
+
optimization engine for the mapFolding package, enabling the conversion of readable,
|
|
7
|
+
functional implementations into highly-optimized variants with verified correctness.
|
|
9
8
|
|
|
10
|
-
Core
|
|
11
|
-
1. AST Pattern Recognition - Precisely identify and match code patterns using composable predicates
|
|
12
|
-
2. Algorithm Transformation - Convert functional state-based implementations to primitive operations
|
|
13
|
-
3. Dataclass "Shattering" - Decompose complex state objects into primitive components
|
|
14
|
-
4. Performance Optimization - Apply domain-specific optimizations for numerical computation
|
|
15
|
-
5. Code Generation - Generate specialized implementations with appropriate imports and syntax
|
|
9
|
+
## Core Architecture Components
|
|
16
10
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
11
|
+
1. **AST Manipulation Tools**
|
|
12
|
+
- Pattern recognition with composable predicates (ifThis)
|
|
13
|
+
- Node access with consistent interfaces (DOT)
|
|
14
|
+
- AST traversal and transformation (NodeChanger, NodeTourist)
|
|
15
|
+
- AST construction with sane defaults (Make)
|
|
16
|
+
- Node transformation operations (grab, Then)
|
|
22
17
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
18
|
+
2. **Container and Organization**
|
|
19
|
+
- Import tracking and management (LedgerOfImports)
|
|
20
|
+
- Function packaging with dependencies (IngredientsFunction)
|
|
21
|
+
- Module assembly with structured components (IngredientsModule)
|
|
22
|
+
- Recipe configuration for generating optimized code (RecipeSynthesizeFlow)
|
|
23
|
+
- Dataclass decomposition for compatibility (ShatteredDataclass)
|
|
24
|
+
|
|
25
|
+
3. **Optimization Pipelines**
|
|
26
|
+
- General-purpose Numba acceleration (makeNumbaFlow)
|
|
27
|
+
- Job-specific optimization for concrete parameters (makeJobNumba)
|
|
28
|
+
- Specialized component transformation (decorateCallableWithNumba)
|
|
29
|
+
|
|
30
|
+
## Integration with Testing Framework
|
|
31
|
+
|
|
32
|
+
The transformation components are extensively tested through the package's test suite,
|
|
33
|
+
which provides specialized fixtures and utilities for validating both the transformation
|
|
34
|
+
process and the resulting optimized code:
|
|
35
|
+
|
|
36
|
+
- **syntheticDispatcherFixture**: Creates and tests a complete Numba-optimized module
|
|
37
|
+
using RecipeSynthesizeFlow configuration
|
|
38
|
+
|
|
39
|
+
- **test_writeJobNumba**: Tests the job-specific optimization process with RecipeJob
|
|
40
|
+
|
|
41
|
+
These fixtures enable users to test their own custom recipes and job configurations
|
|
42
|
+
with minimal effort. See the documentation in tests/__init__.py for details on
|
|
43
|
+
extending the test suite for custom implementations.
|
|
44
|
+
|
|
45
|
+
The framework balances multiple optimization levels - from general algorithmic
|
|
46
|
+
improvements to parameter-specific optimizations - while maintaining the ability
|
|
47
|
+
to verify correctness at each transformation stage through the integrated test suite.
|
|
26
48
|
"""
|
|
49
|
+
|
|
27
50
|
from mapFolding.someAssemblyRequired._theTypes import (
|
|
28
|
-
ast_expr_Slice,
|
|
29
|
-
ast_Identifier,
|
|
30
|
-
astClassHasDOTnameNotName,
|
|
31
|
-
astClassHasDOTtarget,
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
TypeCertified,
|
|
44
|
-
个,
|
|
51
|
+
ast_expr_Slice as ast_expr_Slice,
|
|
52
|
+
ast_Identifier as ast_Identifier,
|
|
53
|
+
astClassHasDOTnameNotName as astClassHasDOTnameNotName,
|
|
54
|
+
astClassHasDOTtarget as astClassHasDOTtarget,
|
|
55
|
+
astClassHasDOTvalue_expr as astClassHasDOTvalue_expr,
|
|
56
|
+
astClassHasDOTvalue_exprNone as astClassHasDOTvalue_exprNone,
|
|
57
|
+
astClassHasDOTtargetAttributeNameSubscript as astClassHasDOTtargetAttributeNameSubscript,
|
|
58
|
+
astClassHasDOTtarget_expr as astClassHasDOTtarget_expr,
|
|
59
|
+
astClassHasDOTvalue as astClassHasDOTvalue,
|
|
60
|
+
astClassOptionallyHasDOTnameNotName as astClassOptionallyHasDOTnameNotName,
|
|
61
|
+
intORlist_ast_type_paramORstr_orNone as intORlist_ast_type_paramORstr_orNone,
|
|
62
|
+
intORstr_orNone as intORstr_orNone,
|
|
63
|
+
list_ast_type_paramORstr_orNone as list_ast_type_paramORstr_orNone,
|
|
64
|
+
str_nameDOTname as str_nameDOTname,
|
|
65
|
+
个 as 个,
|
|
45
66
|
)
|
|
46
67
|
|
|
47
68
|
from mapFolding.someAssemblyRequired._toolboxPython import (
|
|
48
|
-
importLogicalPath2Callable,
|
|
49
|
-
importPathFilename2Callable,
|
|
50
|
-
NodeChanger,
|
|
51
|
-
NodeTourist,
|
|
52
|
-
parseLogicalPath2astModule,
|
|
53
|
-
parsePathFilename2astModule,
|
|
69
|
+
importLogicalPath2Callable as importLogicalPath2Callable,
|
|
70
|
+
importPathFilename2Callable as importPathFilename2Callable,
|
|
71
|
+
NodeChanger as NodeChanger,
|
|
72
|
+
NodeTourist as NodeTourist,
|
|
73
|
+
parseLogicalPath2astModule as parseLogicalPath2astModule,
|
|
74
|
+
parsePathFilename2astModule as parsePathFilename2astModule,
|
|
54
75
|
)
|
|
55
76
|
|
|
56
|
-
from mapFolding.someAssemblyRequired._toolboxAntecedents import be, DOT, ifThis
|
|
57
|
-
from mapFolding.someAssemblyRequired._tool_Make import Make
|
|
58
|
-
from mapFolding.someAssemblyRequired._tool_Then import Then
|
|
77
|
+
from mapFolding.someAssemblyRequired._toolboxAntecedents import be as be, DOT as DOT, ifThis as ifThis
|
|
78
|
+
from mapFolding.someAssemblyRequired._tool_Make import Make as Make
|
|
79
|
+
from mapFolding.someAssemblyRequired._tool_Then import grab as grab, Then as Then
|
|
59
80
|
|
|
60
81
|
from mapFolding.someAssemblyRequired._toolboxContainers import (
|
|
61
|
-
IngredientsFunction,
|
|
62
|
-
IngredientsModule,
|
|
63
|
-
LedgerOfImports,
|
|
64
|
-
RecipeSynthesizeFlow,
|
|
65
|
-
ShatteredDataclass,
|
|
82
|
+
IngredientsFunction as IngredientsFunction,
|
|
83
|
+
IngredientsModule as IngredientsModule,
|
|
84
|
+
LedgerOfImports as LedgerOfImports,
|
|
85
|
+
RecipeSynthesizeFlow as RecipeSynthesizeFlow,
|
|
86
|
+
ShatteredDataclass as ShatteredDataclass,
|
|
66
87
|
)
|