mapFolding 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mapFolding/__init__.py CHANGED
@@ -22,7 +22,7 @@ from mapFolding.theSSOT import (
22
22
  # Synthesize modules
23
23
  from mapFolding.theSSOT import (
24
24
  formatModuleNameDEFAULT,
25
- getAlgorithmCallable,
25
+ getAlgorithmDispatcher,
26
26
  getAlgorithmSource,
27
27
  getPathJobRootDEFAULT,
28
28
  getPathSyntheticModules,
mapFolding/beDRY.py CHANGED
@@ -2,6 +2,7 @@
2
2
  from mapFolding import (
3
3
  computationState,
4
4
  getPathJobRootDEFAULT,
5
+ hackSSOTdatatype,
5
6
  hackSSOTdtype,
6
7
  indexMy,
7
8
  indexTrack,
@@ -9,9 +10,9 @@ from mapFolding import (
9
10
  setDatatypeFoldsTotal,
10
11
  setDatatypeLeavesTotal,
11
12
  )
12
- from numpy import integer
13
+ from numpy import dtype, integer, ndarray
13
14
  from numpy.typing import DTypeLike, NDArray
14
- from typing import Any, List, Optional, Sequence, Tuple, Type, Union
15
+ from typing import Any, List, Optional, Sequence, Tuple, Union
15
16
  from Z0Z_tools import defineConcurrencyLimit, intInnit, oopsieKwargsie
16
17
  import numba
17
18
  import numpy
@@ -19,7 +20,7 @@ import os
19
20
  import pathlib
20
21
  import sys
21
22
 
22
- def getFilenameFoldsTotal(mapShape: Union[Sequence[int], numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]]) -> str:
23
+ def getFilenameFoldsTotal(mapShape: Union[Sequence[int], ndarray[Tuple[int], dtype[integer[Any]]]]) -> str:
23
24
  """Make a standardized filename for the computed value `foldsTotal`.
24
25
 
25
26
  The filename takes into account
@@ -67,30 +68,29 @@ def getLeavesTotal(listDimensions: Sequence[int]) -> int:
67
68
 
68
69
  return productDimensions
69
70
 
70
- def getPathFilenameFoldsTotal(mapShape: Union[Sequence[int], numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]], pathLikeWriteFoldsTotal: Optional[Union[str, os.PathLike[str]]] = None) -> pathlib.Path:
71
- """Get path for folds total file.
71
+ def getPathFilenameFoldsTotal(mapShape: Union[Sequence[int], ndarray[Tuple[int], dtype[integer[Any]]]], pathLikeWriteFoldsTotal: Optional[Union[str, os.PathLike[str]]] = None) -> pathlib.Path:
72
+ """Get a standardized path and filename for the computed value `foldsTotal`.
72
73
 
73
- This function determines the file path for storing fold totals. If a path is provided,
74
- it will use that path. If the path is a directory, it will append a default filename.
75
- The function ensures the parent directory exists by creating it if necessary.
74
+ If you provide a directory, the function will append a standardized filename. If you provide a filename
75
+ or a relative path and filename, the function will prepend the default path.
76
76
 
77
77
  Parameters:
78
- mapShape (Sequence[int]): List of dimensions for the map folding problem.
79
- pathLikeWriteFoldsTotal (Union[str, os.PathLike[str]], optional): Path where to save
80
- the folds total. Can be a file path or directory path. If None, uses default path.
78
+ mapShape: List of dimensions for the map folding problem.
79
+ pathLikeWriteFoldsTotal (pathJobRootDEFAULT): Path, filename, or relative path and filename. If None, uses default path.
81
80
  Defaults to None.
82
81
 
83
82
  Returns:
84
- pathlib.Path: Complete path to the folds total file.
83
+ pathFilenameFoldsTotal: Absolute path and filename.
85
84
  """
86
- pathFilenameFoldsTotal = pathlib.Path(pathLikeWriteFoldsTotal) if pathLikeWriteFoldsTotal is not None else getPathJobRootDEFAULT()
87
- if pathFilenameFoldsTotal.is_dir():
88
- filenameFoldsTotalDEFAULT = getFilenameFoldsTotal(mapShape)
89
- pathFilenameFoldsTotal = pathFilenameFoldsTotal / filenameFoldsTotalDEFAULT
90
- elif pathlib.Path(pathLikeWriteFoldsTotal).is_absolute(): # type: ignore
91
- pathFilenameFoldsTotal = pathlib.Path(pathLikeWriteFoldsTotal) # type: ignore
85
+ pathLikeSherpa = pathlib.Path(pathLikeWriteFoldsTotal) if pathLikeWriteFoldsTotal is not None else None
86
+ if not pathLikeSherpa:
87
+ pathLikeSherpa = getPathJobRootDEFAULT()
88
+ if pathLikeSherpa.is_dir():
89
+ pathFilenameFoldsTotal = pathLikeSherpa / getFilenameFoldsTotal(mapShape)
90
+ elif pathLikeSherpa.is_absolute():
91
+ pathFilenameFoldsTotal = pathLikeSherpa
92
92
  else:
93
- pathFilenameFoldsTotal = pathlib.Path(getPathJobRootDEFAULT(), pathLikeWriteFoldsTotal) # type: ignore
93
+ pathFilenameFoldsTotal = getPathJobRootDEFAULT() / pathLikeSherpa
94
94
 
95
95
  pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
96
96
  return pathFilenameFoldsTotal
@@ -107,23 +107,23 @@ def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrenc
107
107
  - int: direct set the number of task divisions; cannot exceed the map's total leaves
108
108
  - "maximum": divides into `leavesTotal`-many `taskDivisions`
109
109
  - "cpu": limits the divisions to the number of available CPUs, i.e. `concurrencyLimit`
110
- concurrencyLimit:
111
- Maximum number of concurrent tasks allowed
112
- CPUlimit: for error reporting
113
- listDimensions: for error reporting
110
+ concurrencyLimit:
111
+ Maximum number of concurrent tasks allowed
112
+ CPUlimit: for error reporting
113
+ listDimensions: for error reporting
114
114
 
115
115
  Returns
116
116
  -------
117
- taskDivisions:
117
+ taskDivisions:
118
118
 
119
119
  Raises
120
120
  ------
121
- ValueError
122
- If computationDivisions is an unsupported type or if resulting task divisions exceed total leaves
121
+ ValueError
122
+ If computationDivisions is an unsupported type or if resulting task divisions exceed total leaves
123
123
 
124
124
  Notes
125
125
  -----
126
- Task divisions cannot exceed total leaves to prevent duplicate counting of folds.
126
+ Task divisions should not exceed total leaves to prevent duplicate counting of folds.
127
127
  """
128
128
  taskDivisions = 0
129
129
  leavesTotal = getLeavesTotal(listDimensions)
@@ -145,7 +145,7 @@ def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrenc
145
145
 
146
146
  return taskDivisions
147
147
 
148
- def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optional[Type]) -> numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]]:
148
+ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optional[str]) -> ndarray[Tuple[int, int, int], dtype[integer[Any]]]:
149
149
  """
150
150
  Constructs a multi-dimensional connection graph representing the connections between the leaves of a map with the given dimensions.
151
151
  Also called a Cartesian product decomposition or dimensional product mapping.
@@ -157,21 +157,22 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
157
157
  Returns
158
158
  connectionGraph: A 3D numpy array with shape of (dimensionsTotal, leavesTotal + 1, leavesTotal + 1).
159
159
  """
160
- if keywordArguments.get('datatype', None):
161
- setDatatypeLeavesTotal(keywordArguments['datatype']) # type: ignore
162
- datatype = hackSSOTdtype('connectionGraph')
160
+ ImaSetTheDatatype = keywordArguments.get('datatype', None)
161
+ if ImaSetTheDatatype:
162
+ setDatatypeLeavesTotal(ImaSetTheDatatype)
163
+ dtype = hackSSOTdtype('connectionGraph')
163
164
  mapShape = validateListDimensions(listDimensions)
164
165
  leavesTotal = getLeavesTotal(mapShape)
165
- arrayDimensions = numpy.array(mapShape, dtype=datatype)
166
+ arrayDimensions = numpy.array(mapShape, dtype=dtype)
166
167
  dimensionsTotal = len(arrayDimensions)
167
168
 
168
- cumulativeProduct = numpy.multiply.accumulate([1] + mapShape, dtype=datatype)
169
- coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=datatype)
169
+ cumulativeProduct = numpy.multiply.accumulate([1] + mapShape, dtype=dtype)
170
+ coordinateSystem = numpy.zeros((dimensionsTotal, leavesTotal + 1), dtype=dtype)
170
171
  for indexDimension in range(dimensionsTotal):
171
172
  for leaf1ndex in range(1, leavesTotal + 1):
172
173
  coordinateSystem[indexDimension, leaf1ndex] = ( ((leaf1ndex - 1) // cumulativeProduct[indexDimension]) % arrayDimensions[indexDimension] + 1 )
173
174
 
174
- connectionGraph = numpy.zeros((dimensionsTotal, leavesTotal + 1, leavesTotal + 1), dtype=datatype)
175
+ connectionGraph = numpy.zeros((dimensionsTotal, leavesTotal + 1, leavesTotal + 1), dtype=dtype)
175
176
  for indexDimension in range(dimensionsTotal):
176
177
  for activeLeaf1ndex in range(1, leavesTotal + 1):
177
178
  for connectee1ndex in range(1, activeLeaf1ndex + 1):
@@ -190,70 +191,54 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
190
191
  return connectionGraph
191
192
 
192
193
  def makeDataContainer(shape: Union[int, Tuple[int, ...]], datatype: Optional[DTypeLike] = None) -> NDArray[integer[Any]]:
193
- """Create a zeroed-out `numpy.ndarray` with the given shape and datatype.
194
+ """Create a zeroed-out `ndarray` with the given shape and datatype.
194
195
 
195
196
  Parameters:
196
- shape (Union[int, Tuple[int, ...]]): The shape of the array. Can be an integer for 1D arrays
197
+ shape: The shape of the array. Can be an integer for 1D arrays
197
198
  or a tuple of integers for multi-dimensional arrays.
198
- datatype (Optional[DTypeLike], optional): The desired data type for the array.
199
+ datatype: The desired data type for the array.
199
200
  If None, defaults to dtypeLargeDEFAULT. Defaults to None.
200
201
 
201
202
  Returns:
202
- numpy.ndarray: A new array of given shape and type, filled with zeros.
203
+ dataContainer: A new array of given shape and type, filled with zeros.
203
204
  """
204
205
  if datatype is None:
205
206
  datatype = hackSSOTdtype('dtypeFoldsTotal')
206
207
  return numpy.zeros(shape, dtype=datatype)
207
208
 
208
- def outfitCountFolds(listDimensions: Sequence[int]
209
- , computationDivisions: Optional[Union[int, str]] = None
210
- , CPUlimit: Optional[Union[bool, float, int]] = None
211
- , **keywordArguments: Optional[Union[str, bool]]) -> computationState:
209
+ def outfitCountFolds(listDimensions: Sequence[int], computationDivisions: Optional[Union[int, str]] = None, CPUlimit: Optional[Union[bool, float, int]] = None, **keywordArguments: Optional[Union[str, bool]]) -> computationState:
212
210
  """
213
211
  Initializes and configures the computation state for map folding computations.
214
212
 
215
- Parameters
216
- ----------
217
- listDimensions:
218
- The dimensions of the map to be folded
219
- computationDivisions (None):
220
- Specifies how to divide computations:
221
- - None: no division of the computation into tasks; sets task divisions to 0
222
- - int: direct set the number of task divisions; cannot exceed the map's total leaves
223
- - "maximum": divides into `leavesTotal`-many `taskDivisions`
224
- - "cpu": limits the divisions to the number of available CPUs, i.e. `concurrencyLimit`
225
- CPUlimit (None):
226
- Whether and how to limit the CPU usage. See notes for details.
227
- **keywordArguments:
228
- Datatype management.
229
-
230
- Returns
231
- -------
232
- computationState
233
- An initialized computation state containing:
234
- - connectionGraph: Graph representing connections in the map
235
- - foldsSubTotals: Array tracking total folds
236
- - mapShape: Validated and sorted dimensions of the map
237
- - my: Array for internal state tracking
238
- - gapsWhere: Array tracking gap positions
239
- - the: Static settings and metadata
240
- - track: Array for tracking computation progress
213
+ Parameters:
214
+ listDimensions: The dimensions of the map to be folded
215
+ computationDivisions (None): see `getTaskDivisions`
216
+ CPUlimit (None): see `setCPUlimit`
217
+ **keywordArguments: Datatype management.
241
218
 
242
- Limits on CPU usage `CPUlimit`:
243
- - `False`, `None`, or `0`: No limits on CPU usage; uses all available CPUs. All other values will potentially limit CPU usage.
244
- - `True`: Yes, limit the CPU usage; limits to 1 CPU.
245
- - Integer `>= 1`: Limits usage to the specified number of CPUs.
246
- - Decimal value (`float`) between 0 and 1: Fraction of total CPUs to use.
247
- - Decimal value (`float`) between -1 and 0: Fraction of CPUs to *not* use.
248
- - Integer `<= -1`: Subtract the absolute value from total CPUs.
219
+ Returns:
220
+ stateInitialized: The initialized computation state
249
221
  """
250
- kwourGrapes = keywordArguments.get('sourGrapes', False)
251
- kwatatype = keywordArguments.get('datatypeElephino', None)
252
- if kwatatype: setDatatypeElephino(kwatatype, sourGrapes=kwourGrapes) # type: ignore
253
- kwatatype = keywordArguments.get('datatypeFoldsTotal', None)
254
- if kwatatype: setDatatypeFoldsTotal(kwatatype, sourGrapes=kwourGrapes) # type: ignore
255
- kwatatype = keywordArguments.get('datatypeLeavesTotal', None)
256
- if kwatatype: setDatatypeLeavesTotal(kwatatype, sourGrapes=kwourGrapes) # type: ignore
222
+ kwourGrapes = keywordArguments.get('sourGrapes', None)
223
+ if kwourGrapes:
224
+ sourGrapes = True
225
+ else:
226
+ sourGrapes = False
227
+
228
+ ImaSetTheDatatype = keywordArguments.get('datatypeElephino', None)
229
+ if ImaSetTheDatatype:
230
+ ImaSetTheDatatype = str(ImaSetTheDatatype)
231
+ setDatatypeElephino(ImaSetTheDatatype, sourGrapes)
232
+
233
+ ImaSetTheDatatype = keywordArguments.get('datatypeFoldsTotal', None)
234
+ if ImaSetTheDatatype:
235
+ ImaSetTheDatatype = str(ImaSetTheDatatype)
236
+ setDatatypeFoldsTotal(ImaSetTheDatatype, sourGrapes)
237
+
238
+ ImaSetTheDatatype = keywordArguments.get('datatypeLeavesTotal', None)
239
+ if ImaSetTheDatatype:
240
+ ImaSetTheDatatype = str(ImaSetTheDatatype)
241
+ setDatatypeLeavesTotal(ImaSetTheDatatype, sourGrapes)
257
242
 
258
243
  my = makeDataContainer(len(indexMy), hackSSOTdtype('my'))
259
244
 
@@ -268,7 +253,7 @@ def outfitCountFolds(listDimensions: Sequence[int]
268
253
  my[indexMy.dimensionsTotal] = len(mapShape)
269
254
  my[indexMy.leaf1ndex] = 1
270
255
  stateInitialized = computationState(
271
- connectionGraph = makeConnectionGraph(mapShape, datatype=hackSSOTdtype('connectionGraph')),
256
+ connectionGraph = makeConnectionGraph(mapShape, datatype=hackSSOTdatatype('connectionGraph')),
272
257
  foldGroups = foldGroups,
273
258
  mapShape = numpy.array(mapShape, dtype=hackSSOTdtype('mapShape')),
274
259
  my = my,
mapFolding/oeis.py CHANGED
@@ -37,31 +37,31 @@ settingsOEIShardcodedValues: Dict[str, Dict[str, Any]] = {
37
37
  'getMapShape': lambda n: sorted([2, n]),
38
38
  'valuesBenchmark': [14],
39
39
  'valuesTestParallelization': [*range(3, 7)],
40
- 'valuesTestValidation': [0, 1, random.randint(2, 9)],
40
+ 'valuesTestValidation': [random.randint(2, 9)],
41
41
  },
42
42
  'A001416': {
43
43
  'getMapShape': lambda n: sorted([3, n]),
44
44
  'valuesBenchmark': [9],
45
45
  'valuesTestParallelization': [*range(3, 5)],
46
- 'valuesTestValidation': [0, 1, random.randint(2, 6)],
46
+ 'valuesTestValidation': [random.randint(2, 6)],
47
47
  },
48
48
  'A001417': {
49
49
  'getMapShape': lambda n: [2] * n,
50
50
  'valuesBenchmark': [6],
51
51
  'valuesTestParallelization': [*range(2, 4)],
52
- 'valuesTestValidation': [0, 1, random.randint(2, 4)],
52
+ 'valuesTestValidation': [random.randint(2, 4)],
53
53
  },
54
54
  'A195646': {
55
55
  'getMapShape': lambda n: [3] * n,
56
56
  'valuesBenchmark': [3],
57
57
  'valuesTestParallelization': [*range(2, 3)],
58
- 'valuesTestValidation': [0, 1, 2],
58
+ 'valuesTestValidation': [2],
59
59
  },
60
60
  'A001418': {
61
61
  'getMapShape': lambda n: [n, n],
62
62
  'valuesBenchmark': [5],
63
63
  'valuesTestParallelization': [*range(2, 4)],
64
- 'valuesTestValidation': [1, random.randint(2, 4)],
64
+ 'valuesTestValidation': [random.randint(2, 4)],
65
65
  },
66
66
  }
67
67
 
@@ -229,7 +229,7 @@ def makeSettingsOEIS() -> Dict[str, SettingsOEIS]:
229
229
  getMapShape=settingsOEIShardcodedValues[oeisID]['getMapShape'],
230
230
  valuesBenchmark=settingsOEIShardcodedValues[oeisID]['valuesBenchmark'],
231
231
  valuesTestParallelization=settingsOEIShardcodedValues[oeisID]['valuesTestParallelization'],
232
- valuesTestValidation=settingsOEIShardcodedValues[oeisID]['valuesTestValidation'],
232
+ valuesTestValidation=settingsOEIShardcodedValues[oeisID]['valuesTestValidation'] + list(range(offsetSherpa, 2)),
233
233
  valuesKnown=valuesKnownSherpa,
234
234
  valueUnknown=max(valuesKnownSherpa.keys(), default=0) + 1
235
235
  )
@@ -1,2 +1,5 @@
1
1
  from mapFolding.someAssemblyRequired.getLLVMforNoReason import writeModuleLLVM
2
2
  from mapFolding.someAssemblyRequired.makeJob import makeStateJob
3
+ from mapFolding.someAssemblyRequired.synthesizeNumbaGeneralized import youOughtaKnow
4
+ from mapFolding.someAssemblyRequired.synthesizeNumbaJob import writeJobNumba
5
+ from mapFolding.someAssemblyRequired.synthesizeNumbaModules import makeFlowNumbaOptimized
@@ -5,15 +5,9 @@ import pathlib
5
5
  import pickle
6
6
 
7
7
  @overload
8
- def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[True]
9
- , **keywordArguments: Optional[str]) -> pathlib.Path:
10
- ...
11
-
8
+ def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[True] , **keywordArguments: Optional[str]) -> pathlib.Path: ...
12
9
  @overload
13
- def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[False]
14
- , **keywordArguments: Optional[str]) -> computationState:
15
- ...
16
-
10
+ def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[False] , **keywordArguments: Optional[str]) -> computationState: ...
17
11
  def makeStateJob(listDimensions: Sequence[int], *, writeJob: bool = True, **keywordArguments: Optional[Any]) -> computationState | pathlib.Path:
18
12
  """
19
13
  Creates a computation state job for map folding calculations and optionally saves it to disk.
@@ -16,11 +16,11 @@ def insertArrayIn_body(FunctionDefTarget: ast.FunctionDef, identifier: str, arra
16
16
 
17
17
  def insertAssign(assignee: str, arraySlice: numpy.ndarray) -> None:
18
18
  nonlocal FunctionDefTarget
19
- onlyDataRLE = makeStrRLEcompacted(arraySlice) #NOTE
19
+ onlyDataRLE = autoDecodingRLE(arraySlice, addSpaces=True)
20
20
  astStatement = cast(ast.Expr, ast.parse(onlyDataRLE).body[0])
21
21
  dataAst = astStatement.value
22
22
 
23
- arrayCall = Then.make_astCall(name=constructorName, args=[dataAst], dictionaryKeywords={'dtype': ast.Name(id=argData_dtypeName, ctx=ast.Load())})
23
+ arrayCall = Then.make_astCall(name=constructorName, args=[dataAst], list_astKeywords=[ast.keyword(arg='dtype', value=ast.Name(id=argData_dtypeName, ctx=ast.Load()))])
24
24
 
25
25
  assignment = ast.Assign(targets=[ast.Name(id=assignee, ctx=ast.Store())], value=arrayCall)#NOTE
26
26
  FunctionDefTarget.body.insert(0, assignment)
@@ -51,15 +51,15 @@ def findAndReplaceArrayIn_body(FunctionDefTarget: ast.FunctionDef, identifier: s
51
51
  if isinstance(astSubscript.value, ast.Name) and astSubscript.value.id == identifier and isinstance(astSubscript.slice, ast.Attribute):
52
52
  indexAs_astAttribute: ast.Attribute = astSubscript.slice
53
53
  indexAsStr = ast.unparse(indexAs_astAttribute)
54
- argDataSlice = arrayTarget[eval(indexAsStr)]
54
+ arraySlice = arrayTarget[eval(indexAsStr)]
55
55
 
56
- onlyDataRLE = makeStrRLEcompacted(argDataSlice)
56
+ onlyDataRLE = autoDecodingRLE(arraySlice, addSpaces=True)
57
57
  astStatement = cast(ast.Expr, ast.parse(onlyDataRLE).body[0])
58
58
  dataAst = astStatement.value
59
59
 
60
- arrayCall = Then.make_astCall(name=constructorName, args=[dataAst], dictionaryKeywords={'dtype': ast.Name(id=argData_dtypeName, ctx=ast.Load())})
60
+ arrayCall = Then.make_astCall(name=constructorName, args=[dataAst], list_astKeywords=[ast.keyword(arg='dtype', value=ast.Name(id=argData_dtypeName, ctx=ast.Load()))])
61
61
 
62
- assignment = ast.Assign( targets=[astAssignee], value=arrayCall )
62
+ assignment = ast.Assign(targets=[astAssignee], value=arrayCall)
63
63
  FunctionDefTarget.body.insert(0, assignment)
64
64
  FunctionDefTarget.body.remove(stmt)
65
65
  return FunctionDefTarget, allImports
@@ -77,7 +77,7 @@ def findAndReplaceArraySubscriptIn_body(FunctionDefTarget: ast.FunctionDef, iden
77
77
  indexAs_astAttribute: ast.Attribute = astSubscript.slice
78
78
  indexAsStr = ast.unparse(indexAs_astAttribute)
79
79
  argDataSlice: int = arrayTarget[eval(indexAsStr)].item()
80
- astCall = ast.Call(func=ast.Name(id=argData_dtypeName, ctx=ast.Load()) , args=[ast.Constant(value=argDataSlice)], keywords=[])
80
+ astCall = ast.Call(func=ast.Name(id=argData_dtypeName, ctx=ast.Load()), args=[ast.Constant(value=argDataSlice)], keywords=[])
81
81
  assignment = ast.Assign(targets=[astAssignee], value=astCall)
82
82
  if astAssignee.id not in Z0Z_listChaff:
83
83
  FunctionDefTarget.body.insert(0, assignment)
@@ -252,6 +252,7 @@ def makeAstModuleForOneCallable(pythonSource: str, callableTarget: str, paramete
252
252
  if inlineCallables:
253
253
  dictionaryFunctionDef = {statement.name: statement for statement in astModule.body if isinstance(statement, ast.FunctionDef)}
254
254
  callableInlinerWorkhorse = RecursiveInliner(dictionaryFunctionDef)
255
+ # NOTE the inliner assumes each function is not called more than once
255
256
  FunctionDefTarget = callableInlinerWorkhorse.inlineFunctionBody(callableTarget)
256
257
  else:
257
258
  FunctionDefTarget = next((node for node in astModule.body if isinstance(node, ast.FunctionDef) and node.name == callableTarget), None)
@@ -3,6 +3,7 @@ from mapFolding import (
3
3
  EnumIndices,
4
4
  formatModuleNameDEFAULT,
5
5
  FREAKOUT,
6
+ getAlgorithmDispatcher,
6
7
  getAlgorithmSource,
7
8
  getFilenameFoldsTotal,
8
9
  getPathFilenameFoldsTotal,
@@ -35,6 +36,7 @@ from numpy import integer
35
36
  from numpy.typing import NDArray
36
37
  from types import ModuleType
37
38
  from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
39
+ from Z0Z_tools import autoDecodingRLE
38
40
  import ast
39
41
  import autoflake
40
42
  import collections
@@ -50,54 +52,6 @@ import python_minifier
50
52
 
51
53
  youOughtaKnow = collections.namedtuple('youOughtaKnow', ['callableSynthesized', 'pathFilenameForMe', 'astForCompetentProgrammers'])
52
54
 
53
- # TODO move to Z0Z_tools
54
- def makeStrRLEcompacted(arrayTarget: NDArray[integer[Any]]) -> str:
55
- """Converts a NumPy array into a compressed string representation using run-length encoding (RLE).
56
-
57
- This function takes a NumPy array and converts it into an optimized string representation by:
58
- 1. Compressing consecutive sequences of numbers into range objects
59
- 2. Minimizing repeated zeros using array multiplication syntax
60
-
61
- Parameters:
62
- arrayTarget (numpy.ndarray): The input NumPy array to be converted
63
-
64
- Returns:
65
- str: A string containing Python code that recreates the input array in compressed form.
66
- """
67
-
68
- def compressRangesNDArrayNoFlatten(arraySlice: NDArray[integer[Any]]) -> List[List[Any] | Any | NDArray[integer[Any]]] | List[Any] | Any | NDArray[integer[Any]]:
69
- if isinstance(arraySlice, numpy.ndarray) and arraySlice.ndim > 1:
70
- return [compressRangesNDArrayNoFlatten(arraySlice[index]) for index in range(arraySlice.shape[0])]
71
- elif isinstance(arraySlice, numpy.ndarray) and arraySlice.ndim == 1:
72
- listWithRanges = []
73
- for group in more_itertools.consecutive_groups(arraySlice.tolist()):
74
- ImaSerious = list(group)
75
- ImaRange = [range(ImaSerious[0], ImaSerious[-1] + 1)]
76
- spaces = True #NOTE
77
- lengthAsList = spaces*(len(ImaSerious)-1) + len(python_minifier.minify(str(ImaSerious))) # brackets are proxies for commas
78
- lengthAsRange = spaces*1 + len(str('*')) + len(python_minifier.minify(str(ImaRange))) # brackets are proxies for commas
79
- if lengthAsRange < lengthAsList:
80
- listWithRanges += ImaRange
81
- else:
82
- listWithRanges += ImaSerious
83
- return listWithRanges
84
- return arraySlice
85
-
86
- arrayAsNestedLists = compressRangesNDArrayNoFlatten(arrayTarget)
87
-
88
- arrayAsStr = python_minifier.minify(str(arrayAsNestedLists))
89
-
90
- commaIntMaximum = arrayTarget.shape[-1] - 1
91
-
92
- for X in range(1):
93
- arrayAsStr = arrayAsStr.replace(f'[{X}' + f',{X}'*commaIntMaximum + ']', f'[{X}]*'+str(commaIntMaximum+1))
94
- for countInt in range(commaIntMaximum, 2, -1):
95
- arrayAsStr = arrayAsStr.replace(f',{X}'*countInt + ']', f']+[{X}]*'+str(countInt))
96
-
97
- arrayAsStr = arrayAsStr.replace('range', '*range')
98
-
99
- return arrayAsStr
100
-
101
55
  # Generic
102
56
  class ifThis:
103
57
  """Generic AST node predicate builder."""
@@ -28,9 +28,9 @@ def doUnrollCountGaps(FunctionDefTarget: ast.FunctionDef, stateJob: computationS
28
28
  return FunctionDefTarget, allImports
29
29
 
30
30
  def writeJobNumba(mapShape: Sequence[int]
31
- , callableTarget: str
32
31
  , algorithmSource: ModuleType
33
- , parametersNumba: Optional[ParametersNumba]=None
32
+ , callableTarget: Optional[str] = None
33
+ , parametersNumba: Optional[ParametersNumba] = None
34
34
  , pathFilenameWriteJob: Optional[Union[str, os.PathLike[str]]] = None
35
35
  , unrollCountGaps: Optional[bool] = False
36
36
  , **keywordArguments: Optional[Any]
@@ -63,7 +63,15 @@ def writeJobNumba(mapShape: Sequence[int]
63
63
  stateJob = makeStateJob(mapShape, writeJob=False, **keywordArguments)
64
64
  pythonSource = inspect.getsource(algorithmSource)
65
65
  astModule = ast.parse(pythonSource)
66
- FunctionDefTarget = next((node for node in astModule.body if isinstance(node, ast.FunctionDef) and node.name == callableTarget), None)
66
+ setFunctionDef = {statement for statement in astModule.body if isinstance(statement, ast.FunctionDef)}
67
+ if not callableTarget:
68
+ if len(setFunctionDef) == 1:
69
+ FunctionDefTarget = setFunctionDef.pop()
70
+ callableTarget = FunctionDefTarget.name
71
+ else:
72
+ raise ValueError(f"I did not receive a `callableTarget` and {algorithmSource.__name__=} has more than one callable: {setFunctionDef}. Please select one.")
73
+ else:
74
+ FunctionDefTarget = setFunctionDef.pop() if callableTarget in {statement.name for statement in setFunctionDef} else None
67
75
  if not FunctionDefTarget: raise ValueError(f"I received `{callableTarget=}` and {algorithmSource.__name__=}, but I could not find that function in that source.")
68
76
 
69
77
  # NOTE `allImports` is a complementary container to `FunctionDefTarget`; the `FunctionDefTarget` cannot track its own imports very well.
@@ -116,6 +124,21 @@ def writeJobNumba(mapShape: Sequence[int]
116
124
  ast.fix_missing_locations(astModule)
117
125
  pythonSource = ast.unparse(astModule)
118
126
  pythonSource = autoflake.fix_code(pythonSource, ['mapFolding', 'numba', 'numpy'])
127
+ pythonSource = python_minifier.minify(pythonSource, remove_annotations = False,
128
+ remove_pass = False,
129
+ remove_literal_statements = False,
130
+ combine_imports = True,
131
+ hoist_literals = False,
132
+ rename_locals = False,
133
+ rename_globals = False,
134
+ remove_object_base = False,
135
+ convert_posargs_to_args = False,
136
+ preserve_shebang = True,
137
+ remove_asserts = False,
138
+ remove_debug = False,
139
+ remove_explicit_return_none = False,
140
+ remove_builtin_exception_brackets = False,
141
+ constant_folding = False)
119
142
 
120
143
  # NOTE put on disk
121
144
  if pathFilenameWriteJob is None:
@@ -132,11 +155,11 @@ def writeJobNumba(mapShape: Sequence[int]
132
155
 
133
156
  if __name__ == '__main__':
134
157
  mapShape = [5,5]
135
- callableTarget = 'countSequential'
136
-
137
158
  from mapFolding.syntheticModules import numba_countSequential
138
159
  algorithmSource: ModuleType = numba_countSequential
139
160
 
161
+ callableTarget = None
162
+
140
163
  parametersNumba = parametersNumbaDEFAULT
141
164
 
142
165
  pathFilenameWriteJob = None
@@ -147,4 +170,4 @@ if __name__ == '__main__':
147
170
  Z0Z_setDatatypeModuleScalar('numba')
148
171
  Z0Z_setDecoratorCallable('jit')
149
172
 
150
- writeJobNumba(mapShape, callableTarget, algorithmSource, parametersNumba, pathFilenameWriteJob)
173
+ writeJobNumba(mapShape, algorithmSource, callableTarget, parametersNumba, pathFilenameWriteJob)
@@ -4,7 +4,7 @@ everything I am doing. I would rather benefit from humanity's
4
4
  collective wisdom."""
5
5
  from mapFolding.someAssemblyRequired.synthesizeNumba import *
6
6
 
7
- def makeFlowNumbaOptimized(listCallablesInline: List[str], callableDispatcher: Optional[str] = None, algorithmSource: Optional[ModuleType] = None, relativePathWrite: Optional[pathlib.Path] = None, formatFilenameWrite: Optional[str] = None) -> None:
7
+ def makeFlowNumbaOptimized(listCallablesInline: List[str], callableDispatcher: Optional[bool] = False, algorithmSource: Optional[ModuleType] = None, relativePathWrite: Optional[pathlib.Path] = None, formatFilenameWrite: Optional[str] = None) -> List[youOughtaKnow]:
8
8
  if relativePathWrite and relativePathWrite.is_absolute():
9
9
  raise ValueError("The path to write the module must be relative to the root of the package.")
10
10
  if not algorithmSource:
@@ -51,7 +51,7 @@ def makeFlowNumbaOptimized(listCallablesInline: List[str], callableDispatcher: O
51
51
  listStuffYouOughtaKnow.append(doThisStuff(callableTarget, parametersNumba, inlineCallables, unpackArrays, allImports, relativePathWrite, formatFilenameWrite))
52
52
 
53
53
  if callableDispatcher:
54
- callableTarget = callableDispatcher
54
+ callableTarget = getAlgorithmDispatcher().__name__
55
55
  parametersNumba = None
56
56
  inlineCallables = False
57
57
  unpackArrays = False
@@ -63,6 +63,8 @@ def makeFlowNumbaOptimized(listCallablesInline: List[str], callableDispatcher: O
63
63
 
64
64
  listStuffYouOughtaKnow.append(doThisStuff(callableTarget, parametersNumba, inlineCallables, unpackArrays, allImports, relativePathWrite, formatFilenameWrite))
65
65
 
66
+ return listStuffYouOughtaKnow
67
+
66
68
  if __name__ == '__main__':
67
69
  setDatatypeModule('numpy', sourGrapes=True)
68
70
  setDatatypeFoldsTotal('int64', sourGrapes=True)
@@ -71,5 +73,5 @@ if __name__ == '__main__':
71
73
  Z0Z_setDatatypeModuleScalar('numba')
72
74
  Z0Z_setDecoratorCallable('jit')
73
75
  listCallablesInline: List[str] = ['countInitialize', 'countParallel', 'countSequential']
74
- callableDispatcher = 'doTheNeedful'
76
+ callableDispatcher = True
75
77
  makeFlowNumbaOptimized(listCallablesInline, callableDispatcher)
@@ -1,12 +1,12 @@
1
1
  from mapFolding import indexMy
2
2
  from mapFolding import indexTrack
3
- from numba import uint8
4
3
  from numba import jit
5
- from numpy import integer
4
+ from numba import uint8
6
5
  from numpy import dtype
7
6
  from numpy import ndarray
8
- from typing import Any
7
+ from numpy import integer
9
8
  from typing import Tuple
9
+ from typing import Any
10
10
 
11
11
  @jit((uint8[:, :, ::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
12
12
  def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
@@ -1,14 +1,14 @@
1
1
  from mapFolding import indexMy
2
2
  from mapFolding import indexTrack
3
- from numba import uint8
4
- from numba import jit
5
- from numba import int64
6
3
  from numba import prange
7
- from numpy import integer
4
+ from numba import int64
5
+ from numba import jit
6
+ from numba import uint8
8
7
  from numpy import dtype
9
8
  from numpy import ndarray
10
- from typing import Any
9
+ from numpy import integer
11
10
  from typing import Tuple
11
+ from typing import Any
12
12
 
13
13
  @jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
14
14
  def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
@@ -1,13 +1,13 @@
1
1
  from mapFolding import indexMy
2
2
  from mapFolding import indexTrack
3
- from numba import uint8
4
- from numba import jit
5
3
  from numba import int64
6
- from numpy import integer
4
+ from numba import jit
5
+ from numba import uint8
7
6
  from numpy import dtype
8
7
  from numpy import ndarray
9
- from typing import Any
8
+ from numpy import integer
10
9
  from typing import Tuple
10
+ from typing import Any
11
11
 
12
12
  @jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
13
13
  def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
@@ -34,10 +34,10 @@ def countSequential(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
34
34
  gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
35
35
  indexDimension = 0
36
36
  while indexDimension < dimensionsTotal:
37
- if connectionGraph[indexDimension, leaf1ndex, leaf1ndex] == leaf1ndex:
37
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
38
+ if leafConnectee == leaf1ndex:
38
39
  dimensionsUnconstrained -= 1
39
40
  else:
40
- leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
41
41
  while leafConnectee != leaf1ndex:
42
42
  gapsWhere[gap1ndexCeiling] = leafConnectee
43
43
  if countDimensionsGapped[leafConnectee] == 0:
@@ -2,14 +2,14 @@ from mapFolding.syntheticModules.numba_countInitialize import countInitialize
2
2
  from mapFolding.syntheticModules.numba_countParallel import countParallel
3
3
  from mapFolding.syntheticModules.numba_countSequential import countSequential
4
4
  from mapFolding import indexMy
5
- from numba import uint8
6
- from numba import jit
7
5
  from numba import int64
8
- from numpy import integer
6
+ from numba import jit
7
+ from numba import uint8
9
8
  from numpy import dtype
10
9
  from numpy import ndarray
11
- from typing import Any
10
+ from numpy import integer
12
11
  from typing import Tuple
12
+ from typing import Any
13
13
 
14
14
  @jit((uint8[:, :, ::1], int64[::1], uint8[::1], uint8[::1], uint8[::1], uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=True, inline='always', looplift=False, no_cfunc_wrapper=False, no_cpython_wrapper=False, nopython=True, parallel=False)
15
15
  def doTheNeedful(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], foldGroups: ndarray[Tuple[int], dtype[integer[Any]]], gapsWhere: ndarray[Tuple[int], dtype[integer[Any]]], mapShape: ndarray[Tuple[int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
mapFolding/theDao.py CHANGED
@@ -78,6 +78,9 @@ def leafConnecteeInitialization(connectionGraph: ndarray[Tuple[int, int, int], d
78
78
  def leafConnecteeUpdate(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]], my: ndarray[Tuple[int], dtype[integer[Any]]], track: ndarray[Tuple[int, int], dtype[integer[Any]]]) -> None:
79
79
  my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], track[indexTrack.leafBelow.value, my[indexMy.leafConnectee.value]]]
80
80
 
81
+ def activeLeafConnectedToItself(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
82
+ return my[indexMy.leafConnectee.value] == my[indexMy.leaf1ndex.value]
83
+
81
84
  def loopingLeavesConnectedToActiveLeaf(my: ndarray[Tuple[int], dtype[integer[Any]]]) -> Any:
82
85
  return my[indexMy.leafConnectee.value] != my[indexMy.leaf1ndex.value]
83
86
 
@@ -103,9 +106,9 @@ def thereAreComputationDivisionsYouMightSkip(my: ndarray[Tuple[int], dtype[integ
103
106
  return my[indexMy.leaf1ndex.value] != my[indexMy.taskDivisions.value] or my[indexMy.leafConnectee.value] % my[indexMy.taskDivisions.value] == my[indexMy.taskIndex.value]
104
107
 
105
108
  def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
106
- , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
107
- , my: ndarray[Tuple[int] , dtype[integer[Any]]]
108
- , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
109
+ , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
110
+ , my: ndarray[Tuple[int] , dtype[integer[Any]]]
111
+ , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
109
112
  ) -> None:
110
113
 
111
114
  while activeLeafGreaterThan0Condition(my=my):
@@ -132,10 +135,10 @@ def countInitialize(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer
132
135
  return
133
136
 
134
137
  def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
135
- , foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
136
- , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
137
- , my: ndarray[Tuple[int] , dtype[integer[Any]]]
138
- , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
138
+ , foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
139
+ , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
140
+ , my: ndarray[Tuple[int] , dtype[integer[Any]]]
141
+ , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
139
142
  ) -> None:
140
143
 
141
144
  gapsWherePARALLEL = gapsWhere.copy()
@@ -182,8 +185,8 @@ def countParallel(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[A
182
185
  def countSequential( connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[Any]]]
183
186
  , foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
184
187
  , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
185
- , my: ndarray[Tuple[int] , dtype[integer[Any]]]
186
- , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
188
+ , my: ndarray[Tuple[int] , dtype[integer[Any]]]
189
+ , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
187
190
  ) -> None:
188
191
 
189
192
  groupsOfFolds: int = 0
@@ -195,10 +198,10 @@ def countSequential( connectionGraph: ndarray[Tuple[int, int, int], dtype[intege
195
198
  else:
196
199
  findGapsInitializeVariables(my=my, track=track)
197
200
  while loopUpToDimensionsTotal(my=my):
198
- if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
201
+ leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
202
+ if activeLeafConnectedToItself(my=my):
199
203
  dimensionsUnconstrainedDecrement(my=my)
200
204
  else:
201
- leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
202
205
  while loopingLeavesConnectedToActiveLeaf(my=my):
203
206
  countGaps(gapsWhere=gapsWhere, my=my, track=track)
204
207
  leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
@@ -217,8 +220,8 @@ def doTheNeedful(connectionGraph: ndarray[Tuple[int, int, int], dtype[integer[An
217
220
  , foldGroups: ndarray[Tuple[int] , dtype[integer[Any]]]
218
221
  , gapsWhere: ndarray[Tuple[int] , dtype[integer[Any]]]
219
222
  , mapShape: ndarray[Tuple[int] , dtype[integer[Any]]]
220
- , my: ndarray[Tuple[int] , dtype[integer[Any]]]
221
- , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
223
+ , my: ndarray[Tuple[int] , dtype[integer[Any]]]
224
+ , track: ndarray[Tuple[int, int] , dtype[integer[Any]]]
222
225
  ) -> None:
223
226
 
224
227
  countInitialize(connectionGraph, gapsWhere, my, track)
mapFolding/theSSOT.py CHANGED
@@ -82,9 +82,9 @@ def getAlgorithmSource() -> ModuleType:
82
82
  from mapFolding import theDao
83
83
  return theDao
84
84
 
85
- def getAlgorithmCallable() -> Callable[..., None]:
85
+ def getAlgorithmDispatcher() -> Callable[..., None]:
86
86
  algorithmSource = getAlgorithmSource()
87
- return cast(Callable[..., None], algorithmSource.doTheNeedful)
87
+ return cast(Callable[..., None], algorithmSource.doTheNeedful) # 'doTheNeedful' is duplicated and there is not a SSOT for it
88
88
 
89
89
  def getDispatcherCallable() -> Callable[..., None]:
90
90
  from mapFolding.syntheticModules import numba_doTheNeedful
@@ -18,23 +18,16 @@ Old notes that are not entirely accurate.
18
18
 
19
19
  | **Option** | **Description** | **Why** | **Size** | **But** |
20
20
  | ----------------------- | --------------------------------------------------- | --------------------- | --------------- | ------------------------ |
21
- | `_dbg_extend_lifetimes` | Debug option to extend object lifetimes | Debugging | | |
22
- | `_dbg_optnone` | Disable optimization for debugging | Debugging | | |
23
- | `debug` | Enable debug mode with additional checks | Debugging | | |
24
21
  | `no_rewrites` | Disable AST rewrites optimization | Debugging | | |
25
- | `boundscheck` | Enable array bounds checking (slows execution) | Error checking | Larger | Slower |
26
22
  | `error_model` | Divide by zero: kill or chill? | Error checking | ? | |
27
23
  | `_nrt` | Enable No Runtime type checking | Startup speed | Smaller | No type protection |
28
- | `fastmath` | Reduce float potential precision | Float speed | Smaller | Discriminatory, untested |
29
24
  | `forceinline` | Force function inlining | Reduce function calls | Likely larger | |
30
- | `forceobj` | Force object mode compilation | Inclusiveness | Larger | Slower execution |
31
25
  | `inline` | Algorithmically choose inlining | Speed | Slightly larger | |
32
26
  | `looplift` | Enable loop lifting optimization | Speed (if applicable) | Larger | Exclusionary |
33
27
  | `no_cfunc_wrapper` | Disable C function wrapper generation | Size | Smaller | Exclusionary |
34
28
  | `no_cpython_wrapper` | Disable Python C-API wrapper generation | Size | Smallest | Exclusionary |
35
29
 
36
30
  """
37
- # NOTE Deepseek removed forceinline=True, inline='always'
38
31
  # TODO try to implement all possible parameters, but use `NotRequired` for the more esoteric ones
39
32
  class ParametersNumba(TypedDict):
40
33
  _dbg_extend_lifetimes: NotRequired[bool]
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mapFolding
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: Count distinct ways to fold a map (or a strip of stamps)
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
7
7
  Project-URL: Donate, https://www.patreon.com/integrated
8
8
  Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
9
9
  Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
10
- Keywords: A001415,A001416,A001417,A001418,A195646,folding,map folding,OEIS,stamp folding
10
+ Keywords: A001415,A001416,A001417,A001418,A195646,combinatorics,folding,map folding,OEIS,optimization,stamp folding
11
11
  Classifier: Development Status :: 5 - Production/Stable
12
12
  Classifier: Environment :: Console
13
13
  Classifier: Intended Audience :: Education
@@ -16,8 +16,14 @@ Classifier: Intended Audience :: Other Audience
16
16
  Classifier: Intended Audience :: Science/Research
17
17
  Classifier: Natural Language :: English
18
18
  Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3.13
23
+ Classifier: Programming Language :: Python :: 3
19
24
  Classifier: Programming Language :: Python
20
25
  Classifier: Topic :: Scientific/Engineering :: Mathematics
26
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
27
  Classifier: Typing :: Typed
22
28
  Requires-Python: >=3.10
23
29
  Description-Content-Type: text/markdown
@@ -25,17 +31,14 @@ License-File: LICENSE
25
31
  Requires-Dist: numba
26
32
  Requires-Dist: numpy
27
33
  Requires-Dist: Z0Z_tools
28
- Provides-Extra: synthesizemodules
29
- Requires-Dist: autoflake; extra == "synthesizemodules"
30
- Requires-Dist: more_itertools; extra == "synthesizemodules"
31
- Requires-Dist: python_minifier; extra == "synthesizemodules"
32
34
  Provides-Extra: testing
33
- Requires-Dist: mypy; extra == "testing"
35
+ Requires-Dist: autoflake; extra == "testing"
36
+ Requires-Dist: more_itertools; extra == "testing"
34
37
  Requires-Dist: pytest-cov; extra == "testing"
35
38
  Requires-Dist: pytest-env; extra == "testing"
36
- Requires-Dist: pytest-mypy; extra == "testing"
37
39
  Requires-Dist: pytest-xdist; extra == "testing"
38
40
  Requires-Dist: pytest; extra == "testing"
41
+ Requires-Dist: python_minifier; extra == "testing"
39
42
  Requires-Dist: updateCitation; extra == "testing"
40
43
 
41
44
  # mapFolding: Algorithms for enumerating distinct map/stamp folding patterns 🗺️
@@ -104,6 +107,35 @@ Available OEIS sequences:
104
107
  - Transform the algorithm using AST
105
108
  - Create hyper-optimized modules to compute a specific map.
106
109
 
110
+ ### 4. **Customizing your algorithm**
111
+
112
+ - mapFolding\someAssemblyRequired\synthesizeNumbaJob.py (and/or synthesizeNumba____.py, as applicable)
113
+ - Synthesize a Numba-optimized module for a specific mapShape
114
+ - Synthesize _from_ a module in mapFolding\syntheticModules or from any source you select
115
+ - Use the existing transformation options
116
+ - Or create new ways of transforming the algorithm from its source to a specific job
117
+ - mapFolding\someAssemblyRequired\makeJob.py
118
+ - Initialize data for a specific mapShape
119
+ - mapFolding\someAssemblyRequired\synthesizeNumbaModules.py (and/or synthesizeNumba____.py, as applicable)
120
+ - Synthesize one or more Numba-optimized modules for parallel or sequential computation
121
+ - Overwrite the modules in mapFolding\syntheticModules or save the module(s) to a custom path
122
+ - Synthesize _from_ the algorithm(s) in mapFolding\theDao.py or from any source you select
123
+ - Use the existing transformation options
124
+ - Or create new ways of transforming the algorithm from its source to a new module
125
+ - Use your new module in synthesizeNumbaJob.py, above, as the source to create a mapShape-specific job module
126
+ - mapFolding\theDao.py
127
+ - Modify the algorithms for initializing values, parallel computation, and/or sequential computation
128
+ - Use the modified algorithm(s) in synthesizeNumbaModules.py, above, to create Numba-optimized version(s)
129
+ - Then use a Numba-optimized version in synthesizeNumbaJob.py, above, to create a hyper-optimized version for a specific mapShape
130
+ - mapFolding\theSSOT.py (and/or theSSOTnumba.py and/ or theSSOT____.py, if they exist)
131
+ - Modify broad settings or find functions to modify broad settings, such as data structures and their data types
132
+ - Create new settings or groups of settings
133
+ - mapFolding\beDRY.py
134
+ - Functions to handle common tasks, such as parsing parameters or creating the `connectionGraph` for a mapShape (a Cartesian product decomposition)
135
+ - mapFolding\someAssemblyRequired
136
+ - Create new transformations to optimize the algorithm, such as for JAX, CuPy, or CUDA
137
+ - (mapFolding\reference\jax.py has a once-functional JAX implementation, and synthesizeModuleJAX.py might be a useful starting point)
138
+
107
139
  ## Map-folding Video
108
140
 
109
141
  ~~This caused my neurosis:~~ I enjoyed the following video, which is what introduced me to map folding.
@@ -0,0 +1,42 @@
1
+ mapFolding/__init__.py,sha256=_YjoypHXmWLmEWwhFVgKO83Uf28ksesT9F73oJoAIPE,1323
2
+ mapFolding/basecamp.py,sha256=v0VCF_Zgm_XBHcz4bqblsxfHwAxZKgenW6um77quWLk,3751
3
+ mapFolding/beDRY.py,sha256=ejl-eIZTqAHCBNIND0pP_F9BxkPEWIlVXbCTk2Ki8x8,15751
4
+ mapFolding/oeis.py,sha256=3hv71o8bhckjY0nsSY5JTJ2LrpJcuhZ9j3mP6LWLIQc,11124
5
+ mapFolding/theDao.py,sha256=vyln6gXdRp87Xhtuy31bjCpMv_8vL4KH6s1fH40V9oA,12832
6
+ mapFolding/theSSOT.py,sha256=QrEMPREjEbt1H8HcrM2Nm_hv7JsFWRG3lHdUU0Jrv-w,10238
7
+ mapFolding/theSSOTnumba.py,sha256=5jTbJyu9XASYC4eIoUj98eVKWdWyjZw-XF3V2GK8cLg,4445
8
+ mapFolding/reference/flattened.py,sha256=S6D9wiFTlbeoetEqaMLOcA-R22BHOzjqPRujffNxxUM,14875
9
+ mapFolding/reference/hunterNumba.py,sha256=jDS0ORHkIhcJ1rzA5hT49sZHKf3rgJOoGesUCcbKFFY,6054
10
+ mapFolding/reference/irvineJavaPort.py,sha256=7GvBU0tnS6wpFgkYad3465do9jBQW-2bYvbCYyABPHM,3341
11
+ mapFolding/reference/jax.py,sha256=7ji9YWia6Kof0cjcNdiS1GG1rMbC5SBjcyVr_07AeUk,13845
12
+ mapFolding/reference/lunnan.py,sha256=iAbJELfW6RKNMdPcBY9b6rGQ-z1zoRf-1XCurCRMOo8,3951
13
+ mapFolding/reference/lunnanNumpy.py,sha256=rwVP3WIDXimpAuaxhRIuBYU56nVDTKlfGiclw_FkgUU,3765
14
+ mapFolding/reference/lunnanWhile.py,sha256=uRrMT23jTJvoQDlD_FzeIQe_pfMXJG6_bRvs7uhC8z0,3271
15
+ mapFolding/reference/rotatedEntryPoint.py,sha256=USZY3n3zwhSE68ATscUuN66t1qShuEbMI790Gz9JFTw,9352
16
+ mapFolding/reference/total_countPlus1vsPlusN.py,sha256=wpgay-uqPOBd64Z4Pg6tg40j7-4pzWHGMM6v0bnmjhE,6288
17
+ mapFolding/someAssemblyRequired/__init__.py,sha256=wtec_hIz-AKz0_hGdXsWnCKTcCxdMV9-WK6SiIriAeU,396
18
+ mapFolding/someAssemblyRequired/getLLVMforNoReason.py,sha256=nX8tghZClYt7zJd6RpZBXhE_h-CGRHOS17biqiEdf-o,855
19
+ mapFolding/someAssemblyRequired/makeJob.py,sha256=c9sTRUK90snTCcXCvs86VKBH6z_nt3OVFjNs_WgCoIg,2422
20
+ mapFolding/someAssemblyRequired/synthesizeModuleJAX.py,sha256=jatvtYhK5ZJK-YmCKATt7w3icFXXO79cZDAYVrU9bgA,1258
21
+ mapFolding/someAssemblyRequired/synthesizeNumba.py,sha256=3JF1FZwSB-dhLUM_pMy11dI4x1dnWOFQSfJytZHYl0M,17351
22
+ mapFolding/someAssemblyRequired/synthesizeNumbaGeneralized.py,sha256=k8IaCT74ZPhHyra0MbCRdt_5k0Ov3vJgXlN5tbLVnf4,13998
23
+ mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=2sKZgc5kyyz2KaoApcazj_37UgBqAkxORFeROWWU5tk,9038
24
+ mapFolding/someAssemblyRequired/synthesizeNumbaModules.py,sha256=_iRXjMASB_BnYJeH8Rt7FlC-GE7lkZ1Hy292XTaUCu4,3785
25
+ mapFolding/syntheticModules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ mapFolding/syntheticModules/numba_countInitialize.py,sha256=9u-Skda9ntDxo_CiD5T4lUU1-Ts5GDZ33zs_Ybf77-Q,4274
27
+ mapFolding/syntheticModules/numba_countParallel.py,sha256=96-TDwLJgZhR9tFm2bqVDpk8TuWPx_2qFH7oNV78yUI,5517
28
+ mapFolding/syntheticModules/numba_countSequential.py,sha256=WCcPYnkyNpZS7SRMvx1A-IMIAFP5iHL9u9sj9gR8Yhw,3688
29
+ mapFolding/syntheticModules/numba_doTheNeedful.py,sha256=zBXDEN4DJTpCu_ho314-MWdS_08kP_vAgV-8sJDVgvo,1368
30
+ tests/__init__.py,sha256=eg9smg-6VblOr0kisM40CpGnuDtU2JgEEWGDTFVOlW8,57
31
+ tests/conftest.py,sha256=vH1BSOs8G6nIPWKFkSQFmX031IZqNFC4oiEia74mkHU,10554
32
+ tests/test_computations.py,sha256=qBha4IggMfr6ZH06W3M66enTA6PWsx8vkDp5eqYFM9M,4765
33
+ tests/test_oeis.py,sha256=31kdO1vnu2Lon43vM-YJVS4g40Ic03DWNER-cJcpxX4,4916
34
+ tests/test_other.py,sha256=u0vINT5EyVsXTNTR2DZIMpWCg4FH471jjHLRzC2JX7U,8351
35
+ tests/test_tasks.py,sha256=iq6_dh43JQkC2vAWXua0Xe915BKFGbvRJAkmbco854A,2389
36
+ tests/test_types.py,sha256=58tmPG9WOeGGAQbdQK_h_7t4SnENnZugH4WXlI8-L-M,171
37
+ mapFolding-0.4.2.dist-info/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
38
+ mapFolding-0.4.2.dist-info/METADATA,sha256=EPivaT4-_Ygz9_DobQKBIigNnDkWtUq7vm6hJtYiDeQ,7633
39
+ mapFolding-0.4.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ mapFolding-0.4.2.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
41
+ mapFolding-0.4.2.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
42
+ mapFolding-0.4.2.dist-info/RECORD,,
tests/conftest.py CHANGED
@@ -3,11 +3,13 @@
3
3
  # TODO learn how to run tests and coverage analysis without `env = ["NUMBA_DISABLE_JIT=1"]`
4
4
 
5
5
  from mapFolding import *
6
- from mapFolding import basecamp, getAlgorithmCallable, getDispatcherCallable
6
+ from mapFolding import basecamp, getAlgorithmDispatcher, getDispatcherCallable
7
7
  from mapFolding.beDRY import *
8
+ from mapFolding.someAssemblyRequired import *
8
9
  from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidInformation, _getOEISidValues
9
10
  from mapFolding.oeis import *
10
- from typing import Any, Callable, ContextManager, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type, Union
11
+ from types import ModuleType
12
+ from typing import Any, Callable, ContextManager, Dict, Generator, List, Literal, NoReturn, Optional, Sequence, Set, Tuple, Type, Union
11
13
  from Z0Z_tools.pytestForYourUse import PytestFor_defineConcurrencyLimit, PytestFor_intInnit, PytestFor_oopsieKwargsie
12
14
  import pathlib
13
15
  import pytest
@@ -50,7 +52,8 @@ def setupTeardownTmpObjects() -> Generator[None, None, None]:
50
52
 
51
53
  @pytest.fixture
52
54
  def pathTmpTesting(request: pytest.FixtureRequest) -> pathlib.Path:
53
- pathTmp = pathTmpRoot / str(uuid.uuid4().hex)
55
+ # "Z0Z_" ensures the directory name does not start with a number, which would make it an invalid Python identifier
56
+ pathTmp = pathTmpRoot / ("Z0Z_" + str(uuid.uuid4().hex))
54
57
  pathTmp.mkdir(parents=True, exist_ok=False)
55
58
 
56
59
  registrarRecordsTmpObject(pathTmp)
@@ -63,9 +66,10 @@ def pathFilenameTmpTesting(request: pytest.FixtureRequest) -> pathlib.Path:
63
66
  except AttributeError:
64
67
  extension = ".txt"
65
68
 
69
+ # "Z0Z_" ensures the name does not start with a number, which would make it an invalid Python identifier
66
70
  uuidHex = uuid.uuid4().hex
67
- subpath = uuidHex[0:-8]
68
- filenameStem = uuidHex[-8:None]
71
+ subpath = "Z0Z_" + uuidHex[0:-8]
72
+ filenameStem = "Z0Z_" + uuidHex[-8:None]
69
73
 
70
74
  pathFilenameTmp = pathlib.Path(pathTmpRoot, subpath, filenameStem + extension)
71
75
  pathFilenameTmp.parent.mkdir(parents=True, exist_ok=False)
@@ -97,36 +101,6 @@ def makeDictionaryFoldsTotalKnown() -> Dict[Tuple[int,...], int]:
97
101
  dimensions = settings['getMapShape'](n)
98
102
  dimensions.sort()
99
103
  dictionaryMapDimensionsToFoldsTotalKnown[tuple(dimensions)] = foldingsTotal
100
-
101
- # Are we in a place that has jobs?
102
- pathJobDEFAULT = getPathJobRootDEFAULT()
103
- if pathJobDEFAULT.exists():
104
- # Are there foldsTotal files?
105
- for pathFilenameFoldsTotal in pathJobDEFAULT.rglob('*.foldsTotal'):
106
- if pathFilenameFoldsTotal.is_file():
107
- try:
108
- listDimensions = eval(pathFilenameFoldsTotal.stem)
109
- except Exception:
110
- continue
111
- # Are the dimensions in the dictionary?
112
- if isinstance(listDimensions, list) and all(isinstance(dimension, int) for dimension in listDimensions):
113
- listDimensions.sort()
114
- if tuple(listDimensions) in dictionaryMapDimensionsToFoldsTotalKnown:
115
- continue
116
- # Are the contents a reasonably large integer?
117
- try:
118
- foldsTotal = pathFilenameFoldsTotal.read_text()
119
- except Exception:
120
- continue
121
- # Why did I sincerely believe this would only be three lines of code?
122
- if foldsTotal.isdigit():
123
- foldsTotalInteger = int(foldsTotal)
124
- if foldsTotalInteger > 85109616 * 10**3:
125
- # You made it this far, so fuck it: put it in the dictionary
126
- dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
127
- dictionaryMapDimensionsToFoldsTotalKnown[tuple(listDimensions)] = foldsTotalInteger
128
- # The sunk-costs fallacy claims another victim!
129
-
130
104
  return dictionaryMapDimensionsToFoldsTotalKnown
131
105
 
132
106
  """
@@ -229,17 +203,27 @@ def oeisID_1random() -> str:
229
203
  return random.choice(oeisIDsImplemented)
230
204
 
231
205
  @pytest.fixture
232
- def useAlgorithmDirectly() -> Generator[None, Any, None]:
233
- """Temporarily patches getDispatcherCallable to return the algorithm source directly."""
234
- original_dispatcher = basecamp.getDispatcherCallable
206
+ def useThisDispatcher():
207
+ """A fixture providing a context manager for temporarily replacing the dispatcher.
235
208
 
236
- # Patch the function at module level
237
- basecamp.getDispatcherCallable = getAlgorithmCallable
209
+ Returns
210
+ A context manager for patching the dispatcher
211
+ """
212
+ dispatcherOriginal = basecamp.getDispatcherCallable
238
213
 
239
- yield
214
+ def patchDispatcher(callableTarget: Callable) -> None:
215
+ def callableParameterized(*arguments: Any, **keywordArguments: Any) -> Callable:
216
+ return callableTarget
217
+ basecamp.getDispatcherCallable = callableParameterized
240
218
 
241
- # Restore original function
242
- basecamp.getDispatcherCallable = original_dispatcher
219
+ yield patchDispatcher
220
+ basecamp.getDispatcherCallable = dispatcherOriginal
221
+
222
+ @pytest.fixture
223
+ def useAlgorithmSourceDispatcher(useThisDispatcher: Callable) -> Generator[None, None, None]:
224
+ """Temporarily patches getDispatcherCallable to return the algorithm dispatcher."""
225
+ useThisDispatcher(getAlgorithmDispatcher())
226
+ yield
243
227
 
244
228
  def uniformTestMessage(expected: Any, actual: Any, functionName: str, *arguments: Any) -> str:
245
229
  """Format assertion message for any test comparison."""
@@ -0,0 +1,79 @@
1
+ from pathlib import Path
2
+ from tests.conftest import *
3
+ import importlib.util
4
+ import pytest
5
+
6
+ def test_algorithmSourceParallel(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], useAlgorithmSourceDispatcher: None) -> None:
7
+ standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
8
+
9
+ def test_algorithmSourceSequential(listDimensionsTestCountFolds: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], useAlgorithmSourceDispatcher: None) -> None:
10
+ standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestCountFolds)], countFolds, listDimensionsTestCountFolds)
11
+
12
+ def test_aOFn_calculate_value(oeisID: str) -> None:
13
+ for n in settingsOEIS[oeisID]['valuesTestValidation']:
14
+ standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
15
+
16
+ # Python doesn't want me to test this
17
+ # @pytest.mark.parametrize('pathFilenameTmpTesting', ['.py'], indirect=True)
18
+ # def test_writeJobNumba(listDimensionsTestCountFolds: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], pathFilenameTmpTesting: Path) -> None:
19
+ # from mapFolding.syntheticModules import numba_countSequential
20
+ # algorithmSourceHARDCODED: ModuleType = numba_countSequential
21
+ # algorithmSource = algorithmSourceHARDCODED
22
+ # pathFilenameModule = writeJobNumba(listDimensionsTestCountFolds, algorithmSource, pathFilenameWriteJob=pathFilenameTmpTesting.absolute())
23
+
24
+ # Don_Lapre_Road_to_Self_Improvement = importlib.util.spec_from_file_location("__main__", pathFilenameModule)
25
+ # if Don_Lapre_Road_to_Self_Improvement is None:
26
+ # raise ImportError(f"Failed to create module specification from {pathFilenameModule}")
27
+ # if Don_Lapre_Road_to_Self_Improvement.loader is None:
28
+ # raise ImportError(f"Failed to get loader for module {pathFilenameModule}")
29
+ # module = importlib.util.module_from_spec(Don_Lapre_Road_to_Self_Improvement)
30
+
31
+ # module.__name__ = "__main__"
32
+ # Don_Lapre_Road_to_Self_Improvement.loader.exec_module(module)
33
+
34
+ # pathFilenameFoldsTotal = getPathFilenameFoldsTotal(listDimensionsTestCountFolds)
35
+ # standardizedEqualTo(str(foldsTotalKnown[tuple(listDimensionsTestCountFolds)]), pathFilenameFoldsTotal.read_text().strip)
36
+
37
+ # def test_makeFlowNumbaOptimized(pathTmpTesting: Path, useThisDispatcher):
38
+ # def test_makeFlowNumbaOptimized(useThisDispatcher):
39
+ # """To get this to work:
40
+ # walk_up=True doesn't work on 3.10, so that has to go
41
+ # the _logical_ import must be in the logical path of the package
42
+ # fuck python
43
+ # """
44
+ # listCallablesInlineHARDCODED: List[str] = ['countInitialize', 'countParallel', 'countSequential']
45
+ # listCallablesInline = listCallablesInlineHARDCODED
46
+ # callableDispatcher = True
47
+ # algorithmSource = None
48
+ # relativePathWrite = None
49
+ # # relativePathWrite = pathTmpTesting.absolute().relative_to(getPathPackage(), walk_up=True)
50
+ # formatFilenameWrite = "pytest_{callableTarget}"
51
+ # listSynthesizedModules: List[youOughtaKnow] = makeFlowNumbaOptimized(listCallablesInline, callableDispatcher, algorithmSource, relativePathWrite, formatFilenameWrite)
52
+ # for stuff in listSynthesizedModules:
53
+ # registrarRecordsTmpObject(stuff.pathFilenameForMe)
54
+ # if stuff.callableSynthesized not in listCallablesInline:
55
+ # dispatcherSynthetic: youOughtaKnow = stuff
56
+ # if not dispatcherSynthetic: raise FREAKOUT
57
+ # # dispatcherSynthetic: youOughtaKnow = next(filter(lambda x: x.callableSynthesized not in listCallablesInline, listSynthesizedModules))
58
+
59
+ # # Import the synthetic dispatcher module to get the callable
60
+ # dispatcherSpec = importlib.util.spec_from_file_location(
61
+ # dispatcherSynthetic.callableSynthesized,
62
+ # dispatcherSynthetic.pathFilenameForMe
63
+ # )
64
+ # if dispatcherSpec is None:
65
+ # raise ImportError(f"Failed to create module specification from {dispatcherSynthetic.pathFilenameForMe}")
66
+ # if dispatcherSpec.loader is None:
67
+ # raise ImportError(f"Failed to get loader for module {dispatcherSynthetic.pathFilenameForMe}")
68
+
69
+ # dispatcherModule = importlib.util.module_from_spec(dispatcherSpec)
70
+ # dispatcherSpec.loader.exec_module(dispatcherModule)
71
+ # callableDispatcherSynthetic = getattr(dispatcherModule, dispatcherSynthetic.callableSynthesized)
72
+
73
+ # useThisDispatcher(callableDispatcherSynthetic)
74
+
75
+ # def test_syntheticSequential(listDimensionsTestCountFolds: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int]):
76
+ # standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestCountFolds)], countFolds, listDimensionsTestCountFolds)
77
+
78
+ # def test_syntheticParallel(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int]):
79
+ # standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
tests/test_oeis.py CHANGED
@@ -2,7 +2,6 @@ from contextlib import redirect_stdout
2
2
  from datetime import datetime, timedelta
3
3
  from mapFolding.oeis import _getFilenameOEISbFile, _getOEISidValues, _parseBFileOEIS, _validateOEISid, _getOEISidInformation
4
4
  from tests.conftest import *
5
- from typing import NoReturn
6
5
  from urllib.error import URLError
7
6
  import io
8
7
  import os
@@ -15,14 +14,6 @@ import unittest.mock
15
14
  import urllib
16
15
  import urllib.request
17
16
 
18
- def test_algorithmSourceSequential(oeisID: str, useAlgorithmDirectly: None) -> None:
19
- for n in settingsOEIS[oeisID]['valuesTestValidation']:
20
- standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
21
-
22
- def test_aOFn_calculate_value(oeisID: str) -> None:
23
- for n in settingsOEIS[oeisID]['valuesTestValidation']:
24
- standardizedEqualTo(settingsOEIS[oeisID]['valuesKnown'][n], oeisIDfor_n, oeisID, n)
25
-
26
17
  @pytest.mark.parametrize("badID", ["A999999", " A999999 ", "A999999extra"])
27
18
  def test__validateOEISid_invalid_id(badID: str) -> None:
28
19
  standardizedEqualTo(KeyError, _validateOEISid, badID)
tests/test_other.py CHANGED
@@ -1,6 +1,5 @@
1
1
  from contextlib import redirect_stdout
2
2
  from tests.conftest import *
3
- from typing import Dict, List, Any, Literal, Callable, Generator, Optional, Union
4
3
  from Z0Z_tools import intInnit
5
4
  import io
6
5
  import itertools
tests/test_tasks.py CHANGED
@@ -1,13 +1,9 @@
1
1
  from tests.conftest import *
2
- from typing import List, Dict, Literal, Tuple
3
2
  import pytest
4
3
 
5
4
  # TODO add a test. `C` = number of logical cores available. `n = C + 1`. Ensure that `[2,n]` is computed correctly.
6
5
  # Or, probably smarter: limit the number of cores, then run a test with C+1.
7
6
 
8
- def test_algorithmSourceParallel(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], useAlgorithmDirectly: None) -> None:
9
- standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
10
-
11
7
  def test_countFoldsComputationDivisionsInvalid(listDimensionsTestFunctionality: List[int]) -> None:
12
8
  standardizedEqualTo(ValueError, countFolds, listDimensionsTestFunctionality, None, {"wrong": "value"})
13
9
 
@@ -1,41 +0,0 @@
1
- mapFolding/__init__.py,sha256=6HgDJ2_-lnTMVwXDhTs36MgQT6Ph0kYVvuvkeUP2u9w,1321
2
- mapFolding/basecamp.py,sha256=v0VCF_Zgm_XBHcz4bqblsxfHwAxZKgenW6um77quWLk,3751
3
- mapFolding/beDRY.py,sha256=XVtLraG9VnC4yG2HkaFwZRh2td4ZHMjTQvnbcD_W130,17133
4
- mapFolding/oeis.py,sha256=rTMK4aQXxudmUGS-RkzikmSIPLdottVgJHKwY0atZqg,11120
5
- mapFolding/theDao.py,sha256=ktYQSsF6pQrWA0bj6UVbvTsWgZP8mlEj1JKGQeqeFuI,12701
6
- mapFolding/theSSOT.py,sha256=anbFOL2daNLcy9SuO_4EyFFlPsnKSpkvyO11Zf4QCoU,10174
7
- mapFolding/theSSOTnumba.py,sha256=zGq2zlZZeuxiNSO2Fs_AqV6UhybJAJuDw-2lMVvDS2w,5133
8
- mapFolding/reference/flattened.py,sha256=S6D9wiFTlbeoetEqaMLOcA-R22BHOzjqPRujffNxxUM,14875
9
- mapFolding/reference/hunterNumba.py,sha256=jDS0ORHkIhcJ1rzA5hT49sZHKf3rgJOoGesUCcbKFFY,6054
10
- mapFolding/reference/irvineJavaPort.py,sha256=7GvBU0tnS6wpFgkYad3465do9jBQW-2bYvbCYyABPHM,3341
11
- mapFolding/reference/jax.py,sha256=7ji9YWia6Kof0cjcNdiS1GG1rMbC5SBjcyVr_07AeUk,13845
12
- mapFolding/reference/lunnan.py,sha256=iAbJELfW6RKNMdPcBY9b6rGQ-z1zoRf-1XCurCRMOo8,3951
13
- mapFolding/reference/lunnanNumpy.py,sha256=rwVP3WIDXimpAuaxhRIuBYU56nVDTKlfGiclw_FkgUU,3765
14
- mapFolding/reference/lunnanWhile.py,sha256=uRrMT23jTJvoQDlD_FzeIQe_pfMXJG6_bRvs7uhC8z0,3271
15
- mapFolding/reference/rotatedEntryPoint.py,sha256=USZY3n3zwhSE68ATscUuN66t1qShuEbMI790Gz9JFTw,9352
16
- mapFolding/reference/total_countPlus1vsPlusN.py,sha256=wpgay-uqPOBd64Z4Pg6tg40j7-4pzWHGMM6v0bnmjhE,6288
17
- mapFolding/someAssemblyRequired/__init__.py,sha256=nqW7FcV65IS0tctqzzJ0MDex_z24es8OGYApzKcx1Ys,144
18
- mapFolding/someAssemblyRequired/getLLVMforNoReason.py,sha256=nX8tghZClYt7zJd6RpZBXhE_h-CGRHOS17biqiEdf-o,855
19
- mapFolding/someAssemblyRequired/makeJob.py,sha256=TZBJHgAEJ6c5XU4DHuyQgKOY-nf19Fvvoicyzn2huG0,2436
20
- mapFolding/someAssemblyRequired/synthesizeModuleJAX.py,sha256=jatvtYhK5ZJK-YmCKATt7w3icFXXO79cZDAYVrU9bgA,1258
21
- mapFolding/someAssemblyRequired/synthesizeNumba.py,sha256=949ZrnKy8rFsgZy8I5e82RMoFdCnVM9JML1l7lW-rOQ,17226
22
- mapFolding/someAssemblyRequired/synthesizeNumbaGeneralized.py,sha256=9Xa-ftqDScsvhs-2zq5vsJ6LofCdaz1hMUbKPF49vPE,16080
23
- mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=5q8ROqNHODy_WacVZq0vu4AHjqfwuS6mOmRZQWQ_XWE,8128
24
- mapFolding/someAssemblyRequired/synthesizeNumbaModules.py,sha256=1V8tF-pjcmpFIywZuC-_i1EN3KsQAvI0SnIlzlXxDCs,3731
25
- mapFolding/syntheticModules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
- mapFolding/syntheticModules/numba_countInitialize.py,sha256=6-OlRBCEAKstJm-E0cs4VjPolYxiz9zSNvUGU6BRoTg,4274
27
- mapFolding/syntheticModules/numba_countParallel.py,sha256=64HHCOPOnsw7bSuLClaWpeH6C8D1aksZSDj8b_xQJec,5517
28
- mapFolding/syntheticModules/numba_countSequential.py,sha256=1_V4cIqiqKO2lvvc5IyNEX3ElMJs3ZiWB3HUU3sjdMI,3732
29
- mapFolding/syntheticModules/numba_doTheNeedful.py,sha256=koAKvwn5gUO4gVoeXaA12qguZAYz_Tso42xAH9UVw8o,1368
30
- tests/__init__.py,sha256=eg9smg-6VblOr0kisM40CpGnuDtU2JgEEWGDTFVOlW8,57
31
- tests/conftest.py,sha256=gZTZlqW814yucdRhtSV6IsfAM67QgoW5rkdvlveyEFQ,11095
32
- tests/test_oeis.py,sha256=72mL0MtBEYCh58t_qP8qvt6s9kV1fDk4Z9xVEapaapQ,5367
33
- tests/test_other.py,sha256=fg7t8zW7zG26mw7d06E5cLfirhDrCjelMjwFAZ0C2KM,8433
34
- tests/test_tasks.py,sha256=jjp-PzxrXMG9L7BBP_2NYLel5xlm-I552wLu3c_a4rw,2741
35
- tests/test_types.py,sha256=58tmPG9WOeGGAQbdQK_h_7t4SnENnZugH4WXlI8-L-M,171
36
- mapFolding-0.4.0.dist-info/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
37
- mapFolding-0.4.0.dist-info/METADATA,sha256=H82VPfUNoVNwkEeSSDHSD6JzB05x1vEl7ADd1y-7Ht8,5241
38
- mapFolding-0.4.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
39
- mapFolding-0.4.0.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
40
- mapFolding-0.4.0.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
41
- mapFolding-0.4.0.dist-info/RECORD,,