mapFolding 0.7.1__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/beDRY.py +77 -81
- mapFolding/noHomeYet.py +2 -2
- mapFolding/oeis.py +2 -2
- mapFolding/someAssemblyRequired/Z0Z_workbench.py +347 -30
- mapFolding/someAssemblyRequired/__init__.py +4 -3
- mapFolding/someAssemblyRequired/getLLVMforNoReason.py +0 -1
- mapFolding/someAssemblyRequired/ingredientsNumba.py +87 -2
- mapFolding/someAssemblyRequired/synthesizeDataConverters.py +34 -52
- mapFolding/someAssemblyRequired/{synthesizeNumbaJob.py → synthesizeNumbaJobVESTIGIAL.py} +18 -21
- mapFolding/someAssemblyRequired/transformationTools.py +546 -208
- mapFolding/syntheticModules/numbaCount_doTheNeedful.py +197 -12
- mapFolding/theDao.py +23 -16
- mapFolding/theSSOT.py +28 -43
- {mapfolding-0.7.1.dist-info → mapfolding-0.8.0.dist-info}/METADATA +6 -7
- mapfolding-0.8.0.dist-info/RECORD +41 -0
- {mapfolding-0.7.1.dist-info → mapfolding-0.8.0.dist-info}/WHEEL +1 -1
- tests/conftest.py +2 -3
- tests/test_filesystem.py +0 -2
- tests/test_other.py +2 -3
- tests/test_tasks.py +0 -4
- mapFolding/someAssemblyRequired/synthesizeCountingFunctions.py +0 -7
- mapFolding/someAssemblyRequired/synthesizeNumba.py +0 -91
- mapFolding/someAssemblyRequired/synthesizeNumbaModules.py +0 -91
- mapFolding/someAssemblyRequired/whatWillBe.py +0 -357
- mapFolding/syntheticModules/__init__.py +0 -0
- mapFolding/syntheticModules/dataNamespaceFlattened.py +0 -30
- mapFolding/syntheticModules/multiprocessingCount_doTheNeedful.py +0 -216
- mapFolding/syntheticModules/numbaCount.py +0 -90
- mapFolding/syntheticModules/numbaCountSequential.py +0 -111
- mapFolding/syntheticModules/numba_doTheNeedful.py +0 -12
- mapfolding-0.7.1.dist-info/RECORD +0 -51
- /mapFolding/syntheticModules/{numbaCountExample.py → numbaCountHistoricalExample.py} +0 -0
- /mapFolding/syntheticModules/{numba_doTheNeedfulExample.py → numba_doTheNeedfulHistoricalExample.py} +0 -0
- {mapfolding-0.7.1.dist-info → mapfolding-0.8.0.dist-info}/LICENSE +0 -0
- {mapfolding-0.7.1.dist-info → mapfolding-0.8.0.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.7.1.dist-info → mapfolding-0.8.0.dist-info}/top_level.txt +0 -0
mapFolding/beDRY.py
CHANGED
|
@@ -1,30 +1,11 @@
|
|
|
1
1
|
"""A relatively stable API for oft-needed functionality."""
|
|
2
|
-
from mapFolding.theSSOT import (
|
|
3
|
-
Array3D,
|
|
4
|
-
ComputationState,
|
|
5
|
-
getDatatypePackage,
|
|
6
|
-
getNumpyDtypeDefault,
|
|
7
|
-
)
|
|
8
2
|
from collections.abc import Sequence
|
|
3
|
+
from mapFolding.theSSOT import Array3D, ComputationState, getDatatypePackage, getNumpyDtypeDefault
|
|
9
4
|
from sys import maxsize as sysMaxsize
|
|
10
5
|
from typing import Any
|
|
11
6
|
from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
|
|
12
7
|
import numpy
|
|
13
8
|
|
|
14
|
-
def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
|
|
15
|
-
if not listDimensions:
|
|
16
|
-
raise ValueError("listDimensions is a required parameter.")
|
|
17
|
-
listValidated: list[int] = intInnit(listDimensions, 'listDimensions')
|
|
18
|
-
listNonNegative: list[int] = []
|
|
19
|
-
for dimension in listValidated:
|
|
20
|
-
if dimension < 0:
|
|
21
|
-
raise ValueError(f"Dimension {dimension} must be non-negative")
|
|
22
|
-
listNonNegative.append(dimension)
|
|
23
|
-
dimensionsValid = [dimension for dimension in listNonNegative if dimension > 0]
|
|
24
|
-
if len(dimensionsValid) < 2:
|
|
25
|
-
raise NotImplementedError(f"This function requires listDimensions, {listDimensions}, to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
|
|
26
|
-
return tuple(sorted(dimensionsValid))
|
|
27
|
-
|
|
28
9
|
def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
|
|
29
10
|
productDimensions = 1
|
|
30
11
|
for dimension in mapShape:
|
|
@@ -33,7 +14,59 @@ def getLeavesTotal(mapShape: tuple[int, ...]) -> int:
|
|
|
33
14
|
productDimensions *= dimension
|
|
34
15
|
return productDimensions
|
|
35
16
|
|
|
36
|
-
def
|
|
17
|
+
def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: int, leavesTotal: int) -> int:
|
|
18
|
+
"""
|
|
19
|
+
Determines whether to divide the computation into tasks and how many divisions.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
computationDivisions (None)
|
|
24
|
+
Specifies how to divide computations:
|
|
25
|
+
- `None`: no division of the computation into tasks; sets task divisions to 0.
|
|
26
|
+
- int: direct set the number of task divisions; cannot exceed the map's total leaves.
|
|
27
|
+
- `'maximum'`: divides into `leavesTotal`-many `taskDivisions`.
|
|
28
|
+
- `'cpu'`: limits the divisions to the number of available CPUs, i.e. `concurrencyLimit`.
|
|
29
|
+
concurrencyLimit
|
|
30
|
+
Maximum number of concurrent tasks allowed.
|
|
31
|
+
CPUlimit
|
|
32
|
+
for error reporting.
|
|
33
|
+
listDimensions
|
|
34
|
+
for error reporting.
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
taskDivisions
|
|
39
|
+
How many tasks must finish before the job can compute the total number of folds; `0` means no tasks, only job.
|
|
40
|
+
|
|
41
|
+
Raises
|
|
42
|
+
------
|
|
43
|
+
ValueError
|
|
44
|
+
If computationDivisions is an unsupported type or if resulting task divisions exceed total leaves.
|
|
45
|
+
|
|
46
|
+
Notes
|
|
47
|
+
-----
|
|
48
|
+
Task divisions should not exceed total leaves or the folds will be over-counted.
|
|
49
|
+
"""
|
|
50
|
+
taskDivisions = 0
|
|
51
|
+
if not computationDivisions:
|
|
52
|
+
pass
|
|
53
|
+
elif isinstance(computationDivisions, int):
|
|
54
|
+
taskDivisions = computationDivisions
|
|
55
|
+
elif isinstance(computationDivisions, str): # type: ignore
|
|
56
|
+
# 'Unnecessary isinstance call; "str" is always an instance of "str", so sayeth Pylance'. Yeah, well "User is not always an instance of "correct input" so sayeth the programmer.
|
|
57
|
+
computationDivisions = computationDivisions.lower()
|
|
58
|
+
if computationDivisions == 'maximum':
|
|
59
|
+
taskDivisions = leavesTotal
|
|
60
|
+
elif computationDivisions == 'cpu':
|
|
61
|
+
taskDivisions = min(concurrencyLimit, leavesTotal)
|
|
62
|
+
else:
|
|
63
|
+
raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the so-called programmer didn't implement code for that.")
|
|
64
|
+
|
|
65
|
+
if taskDivisions > leavesTotal:
|
|
66
|
+
raise ValueError(f"Problem: `taskDivisions`, ({taskDivisions}), is greater than `leavesTotal`, ({leavesTotal}), which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`. They are derived from parameters that may or may not still be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from dubious-quality Python code.")
|
|
67
|
+
return int(max(0, taskDivisions))
|
|
68
|
+
|
|
69
|
+
def interpretParameter_datatype(datatype: type[numpy.signedinteger[Any]] | None = None) -> type[numpy.signedinteger[Any]]:
|
|
37
70
|
"""An imperfect way to reduce code duplication."""
|
|
38
71
|
if 'numpy' == getDatatypePackage():
|
|
39
72
|
numpyDtype = datatype or getNumpyDtypeDefault()
|
|
@@ -42,7 +75,7 @@ def getNumpyDtype(datatype: type[numpy.signedinteger[Any]] | None = None) -> typ
|
|
|
42
75
|
return numpyDtype
|
|
43
76
|
|
|
44
77
|
def makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: type[numpy.signedinteger[Any]] | None = None) -> Array3D:
|
|
45
|
-
numpyDtype =
|
|
78
|
+
numpyDtype = interpretParameter_datatype(datatype)
|
|
46
79
|
dimensionsTotal = len(mapShape)
|
|
47
80
|
cumulativeProduct = numpy.multiply.accumulate([1] + list(mapShape), dtype=numpyDtype)
|
|
48
81
|
arrayDimensions = numpy.array(mapShape, dtype=numpyDtype)
|
|
@@ -69,9 +102,15 @@ def makeConnectionGraph(mapShape: tuple[int, ...], leavesTotal: int, datatype: t
|
|
|
69
102
|
return connectionGraph
|
|
70
103
|
|
|
71
104
|
def makeDataContainer(shape: int | tuple[int, ...], datatype: type[numpy.signedinteger[Any]] | None = None) -> numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]:
|
|
72
|
-
numpyDtype =
|
|
105
|
+
numpyDtype = interpretParameter_datatype(datatype)
|
|
73
106
|
return numpy.zeros(shape, dtype=numpyDtype)
|
|
74
107
|
|
|
108
|
+
def outfitCountFolds(mapShape: tuple[int, ...], computationDivisions: int | str | None = None, concurrencyLimit: int = 1) -> ComputationState:
|
|
109
|
+
leavesTotal = getLeavesTotal(mapShape)
|
|
110
|
+
taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
|
|
111
|
+
computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions, concurrencyLimit)
|
|
112
|
+
return computationStateInitialized
|
|
113
|
+
|
|
75
114
|
def setCPUlimit(CPUlimit: Any | None) -> int:
|
|
76
115
|
"""Sets CPU limit for concurrent operations.
|
|
77
116
|
|
|
@@ -106,66 +145,23 @@ def setCPUlimit(CPUlimit: Any | None) -> int:
|
|
|
106
145
|
concurrencyLimit: int = defineConcurrencyLimit(CPUlimit, get_num_threads())
|
|
107
146
|
set_num_threads(concurrencyLimit)
|
|
108
147
|
concurrencyLimit = get_num_threads()
|
|
109
|
-
elif concurrencyPackage == '
|
|
148
|
+
elif concurrencyPackage == 'multiprocessing':
|
|
110
149
|
# When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
|
|
111
|
-
concurrencyLimit
|
|
150
|
+
concurrencyLimit = defineConcurrencyLimit(CPUlimit)
|
|
112
151
|
else:
|
|
113
152
|
raise NotImplementedError(f"I received {concurrencyPackage=} but I don't know what to do with that.")
|
|
114
153
|
return concurrencyLimit
|
|
115
154
|
|
|
116
|
-
def
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
Maximum number of concurrent tasks allowed.
|
|
130
|
-
CPUlimit
|
|
131
|
-
for error reporting.
|
|
132
|
-
listDimensions
|
|
133
|
-
for error reporting.
|
|
134
|
-
|
|
135
|
-
Returns
|
|
136
|
-
-------
|
|
137
|
-
taskDivisions
|
|
138
|
-
How many tasks must finish before the job can compute the total number of folds; `0` means no tasks, only job.
|
|
139
|
-
|
|
140
|
-
Raises
|
|
141
|
-
------
|
|
142
|
-
ValueError
|
|
143
|
-
If computationDivisions is an unsupported type or if resulting task divisions exceed total leaves.
|
|
144
|
-
|
|
145
|
-
Notes
|
|
146
|
-
-----
|
|
147
|
-
Task divisions should not exceed total leaves or the folds will be over-counted.
|
|
148
|
-
"""
|
|
149
|
-
taskDivisions = 0
|
|
150
|
-
if not computationDivisions:
|
|
151
|
-
pass
|
|
152
|
-
elif isinstance(computationDivisions, int):
|
|
153
|
-
taskDivisions = computationDivisions
|
|
154
|
-
elif isinstance(computationDivisions, str): # type: ignore 'Unnecessary isinstance call; "str" is always an instance of "str", so sayeth Pylance'. Yeah, well "User is not always an instance of "correct input" so sayeth the programmer.
|
|
155
|
-
computationDivisions = computationDivisions.lower()
|
|
156
|
-
if computationDivisions == 'maximum':
|
|
157
|
-
taskDivisions = leavesTotal
|
|
158
|
-
elif computationDivisions == 'cpu':
|
|
159
|
-
taskDivisions = min(concurrencyLimit, leavesTotal)
|
|
160
|
-
else:
|
|
161
|
-
raise ValueError(f"I received {computationDivisions} for the parameter, `computationDivisions`, but the so-called programmer didn't implement code for that.")
|
|
162
|
-
|
|
163
|
-
if taskDivisions > leavesTotal:
|
|
164
|
-
raise ValueError(f"Problem: `taskDivisions`, ({taskDivisions}), is greater than `leavesTotal`, ({leavesTotal}), which will cause duplicate counting of the folds.\n\nChallenge: you cannot directly set `taskDivisions` or `leavesTotal`. They are derived from parameters that may or may not still be named `computationDivisions`, `CPUlimit` , and `listDimensions` and from dubious-quality Python code.")
|
|
165
|
-
return int(max(0, taskDivisions))
|
|
166
|
-
|
|
167
|
-
def outfitCountFolds(mapShape: tuple[int, ...], computationDivisions: int | str | None = None, concurrencyLimit: int = 1) -> ComputationState:
|
|
168
|
-
leavesTotal = getLeavesTotal(mapShape)
|
|
169
|
-
taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
|
|
170
|
-
computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions, concurrencyLimit)
|
|
171
|
-
return computationStateInitialized
|
|
155
|
+
def validateListDimensions(listDimensions: Sequence[int]) -> tuple[int, ...]:
|
|
156
|
+
if not listDimensions:
|
|
157
|
+
raise ValueError("listDimensions is a required parameter.")
|
|
158
|
+
listValidated: list[int] = intInnit(listDimensions, 'listDimensions')
|
|
159
|
+
listNonNegative: list[int] = []
|
|
160
|
+
for dimension in listValidated:
|
|
161
|
+
if dimension < 0:
|
|
162
|
+
raise ValueError(f"Dimension {dimension} must be non-negative")
|
|
163
|
+
listNonNegative.append(dimension)
|
|
164
|
+
dimensionsValid = [dimension for dimension in listNonNegative if dimension > 0]
|
|
165
|
+
if len(dimensionsValid) < 2:
|
|
166
|
+
raise NotImplementedError(f"This function requires listDimensions, {listDimensions}, to have at least two dimensions greater than 0. You may want to look at https://oeis.org/.")
|
|
167
|
+
return tuple(sorted(dimensionsValid))
|
mapFolding/noHomeYet.py
CHANGED
|
@@ -11,8 +11,8 @@ def makeDictionaryFoldsTotalKnown() -> dict[tuple[int, ...], int]:
|
|
|
11
11
|
|
|
12
12
|
for n, foldingsTotal in sequence.items():
|
|
13
13
|
mapShape = settings['getMapShape'](n)
|
|
14
|
-
mapShape = sorted(mapShape)
|
|
15
|
-
dictionaryMapDimensionsToFoldsTotalKnown[
|
|
14
|
+
mapShape = tuple(sorted(mapShape))
|
|
15
|
+
dictionaryMapDimensionsToFoldsTotalKnown[mapShape] = foldingsTotal
|
|
16
16
|
return dictionaryMapDimensionsToFoldsTotalKnown
|
|
17
17
|
|
|
18
18
|
def getFoldsTotalKnown(mapShape: tuple[int, ...]) -> int:
|
mapFolding/oeis.py
CHANGED
|
@@ -144,10 +144,10 @@ def _parseBFileOEIS(OEISbFile: str, oeisID: str) -> dict[int, int]:
|
|
|
144
144
|
return OEISsequence
|
|
145
145
|
|
|
146
146
|
def getOEISofficial(pathFilenameCache: pathlib.Path, url: str) -> None | str:
|
|
147
|
-
tryCache = False
|
|
147
|
+
tryCache: bool = False
|
|
148
148
|
if pathFilenameCache.exists():
|
|
149
149
|
fileAge: timedelta = datetime.now() - datetime.fromtimestamp(pathFilenameCache.stat().st_mtime)
|
|
150
|
-
tryCache
|
|
150
|
+
tryCache = fileAge < timedelta(days=cacheDays)
|
|
151
151
|
|
|
152
152
|
oeisInformation: str | None = None
|
|
153
153
|
if tryCache:
|
|
@@ -1,33 +1,350 @@
|
|
|
1
|
-
from
|
|
2
|
-
from mapFolding.
|
|
3
|
-
from mapFolding.someAssemblyRequired
|
|
1
|
+
from autoflake import fix_code as autoflake_fix_code
|
|
2
|
+
from mapFolding.filesystem import writeStringToHere
|
|
3
|
+
from mapFolding.someAssemblyRequired import (
|
|
4
|
+
ast_Identifier,
|
|
5
|
+
extractFunctionDef,
|
|
6
|
+
ifThis,
|
|
7
|
+
IngredientsFunction,
|
|
8
|
+
IngredientsModule,
|
|
9
|
+
LedgerOfImports,
|
|
10
|
+
Make,
|
|
11
|
+
makeDictionaryReplacementStatements,
|
|
12
|
+
NodeCollector,
|
|
13
|
+
NodeReplacer,
|
|
14
|
+
RecipeSynthesizeFlow,
|
|
15
|
+
strDotStrCuzPyStoopid,
|
|
16
|
+
Then,
|
|
17
|
+
)
|
|
18
|
+
from mapFolding.someAssemblyRequired.ingredientsNumba import decorateCallableWithNumba
|
|
19
|
+
from mapFolding.someAssemblyRequired.synthesizeDataConverters import shatter_dataclassesDOTdataclass
|
|
20
|
+
from mapFolding.theSSOT import raiseIfNoneGitHubIssueNumber3
|
|
21
|
+
from pathlib import Path
|
|
4
22
|
import ast
|
|
5
23
|
|
|
24
|
+
# Would `LibCST` be better than `ast` in some cases? https://github.com/hunterhogan/mapFolding/issues/7
|
|
25
|
+
|
|
26
|
+
def Z0Z_alphaTest_putModuleOnDisk(ingredients: IngredientsModule, recipeFlow: RecipeSynthesizeFlow):
|
|
27
|
+
# Physical namespace
|
|
28
|
+
filenameStem: str = recipeFlow.moduleDispatcher
|
|
29
|
+
fileExtension: str = recipeFlow.fileExtension
|
|
30
|
+
pathPackage: Path = Path(recipeFlow.pathPackage)
|
|
31
|
+
|
|
32
|
+
# Physical and logical namespace
|
|
33
|
+
packageName: ast_Identifier | None = recipeFlow.packageName # module name of the package, if any
|
|
34
|
+
logicalPathINFIX: ast_Identifier | strDotStrCuzPyStoopid | None = recipeFlow.Z0Z_flowLogicalPathRoot
|
|
35
|
+
|
|
36
|
+
def _getLogicalPathParent() -> str | None:
|
|
37
|
+
listModules: list[ast_Identifier] = []
|
|
38
|
+
if packageName:
|
|
39
|
+
listModules.append(packageName)
|
|
40
|
+
if logicalPathINFIX:
|
|
41
|
+
listModules.append(logicalPathINFIX)
|
|
42
|
+
if listModules:
|
|
43
|
+
return '.'.join(listModules)
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
def _getLogicalPathAbsolute() -> str:
|
|
47
|
+
listModules: list[ast_Identifier] = []
|
|
48
|
+
logicalPathParent: str | None = _getLogicalPathParent()
|
|
49
|
+
if logicalPathParent:
|
|
50
|
+
listModules.append(logicalPathParent)
|
|
51
|
+
listModules.append(filenameStem)
|
|
52
|
+
return '.'.join(listModules)
|
|
53
|
+
|
|
54
|
+
def getPathFilename():
|
|
55
|
+
pathRoot: Path = pathPackage
|
|
56
|
+
filename: str = filenameStem + fileExtension
|
|
57
|
+
if logicalPathINFIX:
|
|
58
|
+
whyIsThisStillAThing: list[str] = logicalPathINFIX.split('.')
|
|
59
|
+
pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
|
|
60
|
+
return pathRoot.joinpath(filename)
|
|
61
|
+
|
|
62
|
+
def absoluteImport() -> ast.Import:
|
|
63
|
+
return Make.astImport(_getLogicalPathAbsolute())
|
|
64
|
+
|
|
65
|
+
def absoluteImportFrom() -> ast.ImportFrom:
|
|
66
|
+
""" `from . import theModule` """
|
|
67
|
+
logicalPathParent: str = _getLogicalPathParent() or '.'
|
|
68
|
+
return Make.astImportFrom(logicalPathParent, [Make.astAlias(filenameStem)])
|
|
69
|
+
|
|
70
|
+
def writeModule() -> None:
|
|
71
|
+
astModule = ingredients.export()
|
|
72
|
+
ast.fix_missing_locations(astModule)
|
|
73
|
+
pythonSource: str = ast.unparse(astModule)
|
|
74
|
+
if not pythonSource: raise raiseIfNoneGitHubIssueNumber3
|
|
75
|
+
autoflake_additional_imports: list[str] = ingredients.imports.exportListModuleNames()
|
|
76
|
+
if packageName:
|
|
77
|
+
autoflake_additional_imports.append(packageName)
|
|
78
|
+
pythonSource = autoflake_fix_code(pythonSource, autoflake_additional_imports, expand_star_imports=False, remove_all_unused_imports=False, remove_duplicate_keys = False, remove_unused_variables = False,)
|
|
79
|
+
pathFilename = getPathFilename()
|
|
80
|
+
writeStringToHere(pythonSource, pathFilename)
|
|
81
|
+
|
|
82
|
+
writeModule()
|
|
83
|
+
|
|
84
|
+
def inlineThisFunctionWithTheseValues(astFunctionDef: ast.FunctionDef, dictionaryReplacementStatements: dict[str, ast.stmt | list[ast.stmt]]) -> ast.FunctionDef:
|
|
85
|
+
class FunctionInliner(ast.NodeTransformer):
|
|
86
|
+
def __init__(self, dictionaryReplacementStatements: dict[str, ast.stmt | list[ast.stmt]]) -> None:
|
|
87
|
+
self.dictionaryReplacementStatements = dictionaryReplacementStatements
|
|
88
|
+
|
|
89
|
+
def generic_visit(self, node: ast.AST) -> ast.AST:
|
|
90
|
+
"""Visit all nodes and replace them if necessary."""
|
|
91
|
+
return super().generic_visit(node)
|
|
92
|
+
|
|
93
|
+
def visit_Expr(self, node: ast.Expr) -> ast.AST | list[ast.stmt]:
|
|
94
|
+
"""Visit Expr nodes and replace value if it's a function call in our dictionary."""
|
|
95
|
+
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node.value):
|
|
96
|
+
return self.dictionaryReplacementStatements[node.value.func.id] # type: ignore[attr-defined]
|
|
97
|
+
return node
|
|
98
|
+
|
|
99
|
+
def visit_Assign(self, node: ast.Assign) -> ast.AST | list[ast.stmt]:
|
|
100
|
+
"""Visit Assign nodes and replace value if it's a function call in our dictionary."""
|
|
101
|
+
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node.value):
|
|
102
|
+
return self.dictionaryReplacementStatements[node.value.func.id] # type: ignore[attr-defined]
|
|
103
|
+
return node
|
|
104
|
+
|
|
105
|
+
def visit_Call(self, node: ast.Call) -> ast.AST | list[ast.stmt]:
|
|
106
|
+
"""Replace call nodes with their replacement statements if they're in the dictionary."""
|
|
107
|
+
if ifThis.CallDoesNotCallItselfAndNameDOTidIsIn(self.dictionaryReplacementStatements)(node):
|
|
108
|
+
replacement = self.dictionaryReplacementStatements[node.func.id] # type: ignore[attr-defined]
|
|
109
|
+
if not isinstance(replacement, list):
|
|
110
|
+
return replacement
|
|
111
|
+
return node
|
|
112
|
+
|
|
113
|
+
import copy
|
|
114
|
+
keepGoing = True
|
|
115
|
+
ImaInlineFunction = copy.deepcopy(astFunctionDef)
|
|
116
|
+
while keepGoing:
|
|
117
|
+
ImaInlineFunction = copy.deepcopy(astFunctionDef)
|
|
118
|
+
FunctionInliner(copy.deepcopy(dictionaryReplacementStatements)).visit(ImaInlineFunction)
|
|
119
|
+
if ast.unparse(ImaInlineFunction) == ast.unparse(astFunctionDef):
|
|
120
|
+
keepGoing = False
|
|
121
|
+
else:
|
|
122
|
+
astFunctionDef = copy.deepcopy(ImaInlineFunction)
|
|
123
|
+
return ImaInlineFunction
|
|
124
|
+
|
|
125
|
+
def replaceMatchingASTnodes(astTree: ast.AST, replacementMap: list[tuple[ast.AST, ast.AST]]) -> ast.AST:
|
|
126
|
+
"""Replace matching AST nodes using type-specific visitors.
|
|
127
|
+
|
|
128
|
+
Parameters:
|
|
129
|
+
astTree: The AST to transform
|
|
130
|
+
replacementMap: List of (find, replace) node pairs
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
The transformed AST
|
|
134
|
+
"""
|
|
135
|
+
class TargetedNodeReplacer(ast.NodeTransformer):
|
|
136
|
+
def __init__(self, replacementMap: list[tuple[ast.AST, ast.AST]]) -> None:
|
|
137
|
+
# Group replacements by node type for more efficient lookups
|
|
138
|
+
self.replacementByType: dict[type[ast.AST], list[tuple[ast.AST, ast.AST]]] = {}
|
|
139
|
+
for findNode, replaceNode in replacementMap:
|
|
140
|
+
nodeType = type(findNode)
|
|
141
|
+
if nodeType not in self.replacementByType:
|
|
142
|
+
self.replacementByType[nodeType] = []
|
|
143
|
+
self.replacementByType[nodeType].append((findNode, replaceNode))
|
|
144
|
+
|
|
145
|
+
def visit(self, node: ast.AST) -> ast.AST:
|
|
146
|
+
"""Check if this node should be replaced before continuing traversal."""
|
|
147
|
+
nodeType = type(node)
|
|
148
|
+
if nodeType in self.replacementByType:
|
|
149
|
+
for findNode, replaceNode in self.replacementByType[nodeType]:
|
|
150
|
+
if self.nodesMatchStructurally(node, findNode):
|
|
151
|
+
return replaceNode
|
|
152
|
+
return super().visit(node)
|
|
153
|
+
|
|
154
|
+
def nodesMatchStructurally(self, node1: ast.AST | list, node2: ast.AST | list) -> bool:
|
|
155
|
+
"""Compare two AST nodes structurally, ignoring position information."""
|
|
156
|
+
# Different types can't be equal
|
|
157
|
+
if type(node1) != type(node2):
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
if isinstance(node1, ast.AST):
|
|
161
|
+
# Compare fields that matter for structural equality
|
|
162
|
+
fields = [f for f in node1._fields
|
|
163
|
+
if f not in ('lineno', 'col_offset', 'end_lineno', 'end_col_offset', 'ctx')]
|
|
164
|
+
|
|
165
|
+
for field in fields:
|
|
166
|
+
smurf1 = getattr(node1, field, None)
|
|
167
|
+
smurf2 = getattr(node2, field, None)
|
|
168
|
+
|
|
169
|
+
if isinstance(smurf1, (ast.AST, list)) and isinstance(smurf2, (ast.AST, list)):
|
|
170
|
+
if not self.nodesMatchStructurally(smurf1, smurf2):
|
|
171
|
+
return False
|
|
172
|
+
elif smurf1 != smurf2:
|
|
173
|
+
return False
|
|
174
|
+
return True
|
|
175
|
+
|
|
176
|
+
elif isinstance(node1, list) and isinstance(node2, list):
|
|
177
|
+
if len(node1) != len(node2):
|
|
178
|
+
return False
|
|
179
|
+
return all(self.nodesMatchStructurally(x, y) for x, y in zip(node1, node2))
|
|
180
|
+
|
|
181
|
+
else:
|
|
182
|
+
# Direct comparison for non-AST objects (strings, numbers, etc.)
|
|
183
|
+
return node1 == node2
|
|
184
|
+
|
|
185
|
+
import copy
|
|
186
|
+
keepGoing = True
|
|
187
|
+
astResult = copy.deepcopy(astTree)
|
|
188
|
+
|
|
189
|
+
while keepGoing:
|
|
190
|
+
astBeforeChange = copy.deepcopy(astResult)
|
|
191
|
+
TargetedNodeReplacer(copy.deepcopy(replacementMap)).visit(astResult)
|
|
192
|
+
|
|
193
|
+
# Check if we've reached a fixed point (no more changes)
|
|
194
|
+
if ast.unparse(astResult) == ast.unparse(astBeforeChange):
|
|
195
|
+
keepGoing = False
|
|
196
|
+
|
|
197
|
+
return astResult
|
|
198
|
+
|
|
199
|
+
def Z0Z_main() -> None:
|
|
200
|
+
numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
|
|
201
|
+
dictionaryReplacementStatements = makeDictionaryReplacementStatements(numbaFlow.source_astModule)
|
|
202
|
+
# TODO remove hardcoding
|
|
203
|
+
theCountingIdentifierHARDCODED = 'groupsOfFolds'
|
|
204
|
+
theCountingIdentifier = theCountingIdentifierHARDCODED
|
|
205
|
+
|
|
206
|
+
# TODO remember that `sequentialCallable` and `sourceSequentialCallable` are two different values.
|
|
207
|
+
# Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
|
|
208
|
+
|
|
209
|
+
# ===========================================================
|
|
210
|
+
sourcePython = numbaFlow.sourceDispatcherCallable
|
|
211
|
+
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
212
|
+
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
213
|
+
ingredientsDispatcher = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
214
|
+
|
|
215
|
+
# sourceParallelCallable
|
|
216
|
+
(astName_dataclassesDOTdataclass, ledgerDataclassANDFragments, listAnnAssign4DataclassUnpack,
|
|
217
|
+
astTuple4AssignTargetsToFragments, listNameDataclassFragments4Parameters, list_ast_argAnnotated4ArgumentsSpecification,
|
|
218
|
+
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns, astAssignDataclassRepack, list_keyword4DataclassInitialization) = shatter_dataclassesDOTdataclass(
|
|
219
|
+
numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
|
|
220
|
+
ingredientsDispatcher.imports.update(ledgerDataclassANDFragments)
|
|
221
|
+
|
|
222
|
+
# TODO remove hardcoding
|
|
223
|
+
namespaceHARDCODED = 'concurrencyManager'
|
|
224
|
+
identifierHARDCODED = 'submit'
|
|
225
|
+
namespace = namespaceHARDCODED
|
|
226
|
+
identifier = identifierHARDCODED
|
|
227
|
+
NodeReplacer(
|
|
228
|
+
findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(namespace, identifier)
|
|
229
|
+
, doThat = Then.insertThisAbove(listAnnAssign4DataclassUnpack)
|
|
230
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
231
|
+
NodeReplacer(
|
|
232
|
+
findThis = ifThis.isCallNamespace_Identifier(namespace, identifier)
|
|
233
|
+
, doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(namespace), identifier)
|
|
234
|
+
, listArguments=[Make.astName(numbaFlow.parallelCallable)] + listNameDataclassFragments4Parameters))
|
|
235
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
236
|
+
|
|
237
|
+
CapturedAssign: list[ast.AST] = []
|
|
238
|
+
CapturedCall: list[ast.Call] = []
|
|
239
|
+
findThis = ifThis.isCall
|
|
240
|
+
doThat = [Then.appendTo(CapturedCall)]
|
|
241
|
+
capture = NodeCollector(findThis, doThat)
|
|
242
|
+
|
|
243
|
+
NodeCollector(
|
|
244
|
+
findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
|
|
245
|
+
, doThat = [Then.appendTo(CapturedAssign)
|
|
246
|
+
, lambda node: capture.visit(node)]
|
|
247
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
248
|
+
|
|
249
|
+
newAssign = CapturedAssign[0]
|
|
250
|
+
NodeReplacer(
|
|
251
|
+
findThis = lambda node: ifThis.isSubscript(node) and ifThis.isAttribute(node.value) and ifThis.isCall(node.value.value)
|
|
252
|
+
, doThat = Then.replaceWith(CapturedCall[0])
|
|
253
|
+
).visit(newAssign)
|
|
254
|
+
|
|
255
|
+
NodeReplacer(
|
|
256
|
+
findThis = ifThis.isAssignAndTargets0Is(ifThis.isSubscript_Identifier(numbaFlow.sourceDataclassInstance))
|
|
257
|
+
, doThat = Then.replaceWith(newAssign)
|
|
258
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
259
|
+
|
|
260
|
+
# sourceSequentialCallable
|
|
261
|
+
(astName_dataclassesDOTdataclass, ledgerDataclassANDFragments, listAnnAssign4DataclassUnpack,
|
|
262
|
+
astTuple4AssignTargetsToFragments, listNameDataclassFragments4Parameters, list_ast_argAnnotated4ArgumentsSpecification,
|
|
263
|
+
astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns, astAssignDataclassRepack, list_keyword4DataclassInitialization) = shatter_dataclassesDOTdataclass(
|
|
264
|
+
numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstance)
|
|
265
|
+
ingredientsDispatcher.imports.update(ledgerDataclassANDFragments)
|
|
266
|
+
|
|
267
|
+
NodeReplacer(
|
|
268
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
269
|
+
, doThat = Then.insertThisAbove(listAnnAssign4DataclassUnpack)
|
|
270
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
271
|
+
NodeReplacer(
|
|
272
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
273
|
+
# findThis = ifThis.isReturn
|
|
274
|
+
, doThat = Then.insertThisBelow([astAssignDataclassRepack])
|
|
275
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
276
|
+
# TODO reconsider: This calls a function, but I don't inspect the function for its parameters or return.
|
|
277
|
+
NodeReplacer(
|
|
278
|
+
findThis = ifThis.isAssignAndValueIsCall_Identifier(numbaFlow.sourceSequentialCallable)
|
|
279
|
+
, doThat = Then.replaceWith(Make.astAssign(listTargets=[astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), listNameDataclassFragments4Parameters)))
|
|
280
|
+
).visit(ingredientsDispatcher.astFunctionDef)
|
|
281
|
+
|
|
282
|
+
# ===========================================================
|
|
283
|
+
sourcePython = numbaFlow.sourceInitializeCallable
|
|
284
|
+
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
285
|
+
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
286
|
+
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
287
|
+
ingredientsInitialize = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
288
|
+
|
|
289
|
+
# ===========================================================
|
|
290
|
+
sourcePython = numbaFlow.sourceParallelCallable
|
|
291
|
+
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
292
|
+
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
293
|
+
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
294
|
+
ingredientsParallel = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
295
|
+
ingredientsParallel.astFunctionDef.name = numbaFlow.parallelCallable
|
|
296
|
+
ingredientsParallel.astFunctionDef.args = Make.astArgumentsSpecification(args=list_ast_argAnnotated4ArgumentsSpecification)
|
|
297
|
+
NodeReplacer(
|
|
298
|
+
findThis = ifThis.isReturn
|
|
299
|
+
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
300
|
+
).visit(ingredientsParallel.astFunctionDef)
|
|
301
|
+
NodeReplacer(
|
|
302
|
+
findThis = ifThis.isReturn
|
|
303
|
+
# , doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
304
|
+
, doThat = Then.replaceWith(Make.astReturn(Make.astName(theCountingIdentifier)))
|
|
305
|
+
).visit(ingredientsParallel.astFunctionDef)
|
|
306
|
+
theCountingIdentifierAnnotation = next(
|
|
307
|
+
ast_arg.annotation for ast_arg in list_ast_argAnnotated4ArgumentsSpecification if ast_arg.arg == theCountingIdentifier)
|
|
308
|
+
ingredientsParallel.astFunctionDef.returns = theCountingIdentifierAnnotation
|
|
309
|
+
# ingredientsParallel.astFunctionDef.returns = astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
|
|
310
|
+
replacementMap = [(statement.value, statement.target) for statement in listAnnAssign4DataclassUnpack]
|
|
311
|
+
ingredientsParallel.astFunctionDef = replaceMatchingASTnodes(
|
|
312
|
+
ingredientsParallel.astFunctionDef, replacementMap) # type: ignore
|
|
313
|
+
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
314
|
+
# But, I would need to update the calling function, too.
|
|
315
|
+
ingredientsParallel = decorateCallableWithNumba(ingredientsParallel) # parametersNumbaParallelDEFAULT
|
|
316
|
+
|
|
317
|
+
# ===========================================================
|
|
318
|
+
sourcePython = numbaFlow.sourceSequentialCallable
|
|
319
|
+
astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
|
|
320
|
+
if not astFunctionDef: raise raiseIfNoneGitHubIssueNumber3
|
|
321
|
+
astFunctionDef = inlineThisFunctionWithTheseValues(astFunctionDef, dictionaryReplacementStatements)
|
|
322
|
+
ingredientsSequential = IngredientsFunction(astFunctionDef, LedgerOfImports(numbaFlow.source_astModule))
|
|
323
|
+
ingredientsSequential.astFunctionDef.name = numbaFlow.sequentialCallable
|
|
324
|
+
ingredientsSequential.astFunctionDef.args = Make.astArgumentsSpecification(args=list_ast_argAnnotated4ArgumentsSpecification)
|
|
325
|
+
NodeReplacer(
|
|
326
|
+
findThis = ifThis.isReturn
|
|
327
|
+
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
328
|
+
).visit(ingredientsSequential.astFunctionDef)
|
|
329
|
+
NodeReplacer(
|
|
330
|
+
findThis = ifThis.isReturn
|
|
331
|
+
, doThat = Then.replaceWith(Make.astReturn(astTuple4AssignTargetsToFragments))
|
|
332
|
+
).visit(ingredientsSequential.astFunctionDef)
|
|
333
|
+
ingredientsSequential.astFunctionDef.returns = astSubscriptPrimitiveTupleAnnotations4FunctionDef_returns
|
|
334
|
+
replacementMap = [(statement.value, statement.target) for statement in listAnnAssign4DataclassUnpack]
|
|
335
|
+
ingredientsSequential.astFunctionDef = replaceMatchingASTnodes(
|
|
336
|
+
ingredientsSequential.astFunctionDef, replacementMap) # type: ignore
|
|
337
|
+
# TODO a tool to automatically remove unused variables from the ArgumentsSpecification (return, and returns) _might_ be nice.
|
|
338
|
+
# But, I would need to update the calling function, too.
|
|
339
|
+
ingredientsSequential = decorateCallableWithNumba(ingredientsSequential)
|
|
340
|
+
|
|
341
|
+
ingredientsModuleNumbaUnified = IngredientsModule(
|
|
342
|
+
ingredientsFunction=[ingredientsInitialize,
|
|
343
|
+
ingredientsParallel,
|
|
344
|
+
ingredientsSequential,
|
|
345
|
+
ingredientsDispatcher], imports=LedgerOfImports(numbaFlow.source_astModule))
|
|
346
|
+
|
|
347
|
+
Z0Z_alphaTest_putModuleOnDisk(ingredientsModuleNumbaUnified, numbaFlow)
|
|
348
|
+
|
|
6
349
|
if __name__ == '__main__':
|
|
7
|
-
|
|
8
|
-
dataclassIdentifier=numbaFlow.sourceDataclassIdentifier
|
|
9
|
-
, logicalPathModuleDataclass=numbaFlow.logicalPathModuleDataclass
|
|
10
|
-
, dataclassInstance=numbaFlow.dataclassInstance
|
|
11
|
-
|
|
12
|
-
, dispatcherCallable=numbaFlow.dispatcherCallable
|
|
13
|
-
, logicalPathModuleDispatcher=numbaFlow.logicalPathModuleDispatcher
|
|
14
|
-
, dataConverterCallable=numbaFlow.dataConverterCallable
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
# initialize with theDao
|
|
18
|
-
dataInitializationHack = "state=makeStateJob(state.mapShape,writeJob=False)"
|
|
19
|
-
ingredientsFunctionDataConverter.FunctionDef.body.insert(0, ast.parse(dataInitializationHack).body[0])
|
|
20
|
-
ingredientsFunctionDataConverter.imports.addImportFromStr('mapFolding.someAssemblyRequired', 'makeStateJob')
|
|
21
|
-
|
|
22
|
-
ingredientsSequential = Z0Z_makeCountingFunction(numbaFlow.sequentialCallable
|
|
23
|
-
, numbaFlow.sourceAlgorithm
|
|
24
|
-
, inline=True
|
|
25
|
-
, dataclass=False)
|
|
26
|
-
|
|
27
|
-
ingredientsModuleDataConverter = IngredientsModule(
|
|
28
|
-
name=numbaFlow.dataConverterModule,
|
|
29
|
-
ingredientsFunction=ingredientsFunctionDataConverter,
|
|
30
|
-
logicalPathINFIX=numbaFlow.Z0Z_flowLogicalPathRoot,
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
ingredientsModuleDataConverter.writeModule()
|
|
350
|
+
Z0Z_main()
|
|
@@ -1,16 +1,17 @@
|
|
|
1
|
-
from mapFolding.someAssemblyRequired.
|
|
1
|
+
from mapFolding.someAssemblyRequired.transformationTools import (
|
|
2
2
|
ast_Identifier as ast_Identifier,
|
|
3
3
|
extractClassDef as extractClassDef,
|
|
4
4
|
extractFunctionDef as extractFunctionDef,
|
|
5
|
-
executeActionUnlessDescendantMatches as executeActionUnlessDescendantMatches,
|
|
6
5
|
ifThis as ifThis,
|
|
7
6
|
IngredientsFunction as IngredientsFunction,
|
|
8
7
|
IngredientsModule as IngredientsModule,
|
|
9
8
|
LedgerOfImports as LedgerOfImports,
|
|
10
|
-
listNumbaCallableDispatchees as listNumbaCallableDispatchees,
|
|
11
9
|
Make as Make,
|
|
10
|
+
makeDictionaryReplacementStatements as makeDictionaryReplacementStatements,
|
|
12
11
|
NodeCollector as NodeCollector,
|
|
13
12
|
NodeReplacer as NodeReplacer,
|
|
13
|
+
RecipeSynthesizeFlow as RecipeSynthesizeFlow,
|
|
14
14
|
strDotStrCuzPyStoopid as strDotStrCuzPyStoopid,
|
|
15
15
|
Then as Then,
|
|
16
|
+
Z0Z_executeActionUnlessDescendantMatches as Z0Z_executeActionUnlessDescendantMatches,
|
|
16
17
|
)
|