mapFolding 0.7.0__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mapFolding/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from mapFolding.basecamp import countFolds
1
+ from mapFolding.basecamp import countFolds as countFolds
2
2
  from mapFolding.oeis import clearOEIScache, getOEISids, OEIS_for_n
3
3
 
4
4
  __all__ = [
mapFolding/basecamp.py CHANGED
@@ -43,8 +43,8 @@ def countFolds(listDimensions: Sequence[int]
43
43
  concurrencyLimit: int = setCPUlimit(CPUlimit)
44
44
  computationStateInitialized: ComputationState = outfitCountFolds(mapShape, computationDivisions, concurrencyLimit)
45
45
 
46
- dispatcher = getPackageDispatcher()
47
- computationStateComplete: ComputationState = dispatcher(computationStateInitialized)
46
+ dispatcherCallableProxy = getPackageDispatcher()
47
+ computationStateComplete: ComputationState = dispatcherCallableProxy(computationStateInitialized)
48
48
 
49
49
  computationStateComplete.getFoldsTotal()
50
50
 
mapFolding/beDRY.py CHANGED
@@ -73,7 +73,14 @@ def makeDataContainer(shape: int | tuple[int, ...], datatype: type[numpy.signedi
73
73
  return numpy.zeros(shape, dtype=numpyDtype)
74
74
 
75
75
  def setCPUlimit(CPUlimit: Any | None) -> int:
76
- """Sets CPU limit for Numba concurrent operations. Note that it can only affect Numba-jitted functions that have not yet been imported.
76
+ """Sets CPU limit for concurrent operations.
77
+
78
+ If the concurrency is managed by `numba`, the maximum number of CPUs is retrieved from `numba.get_num_threads()` and not by polling the hardware. Therefore, if there are
79
+ numba environment variables limiting the number of available CPUs, that will effect this function. That _should_ be a good thing: you control the number of CPUs available
80
+ to numba. But if you're not aware of that, you might be surprised by the results.
81
+
82
+ If you are designing custom modules that use numba, note that you must call `numba.set_num_threads()` (i.e., this function) before executing an `import` statement
83
+ on a Numba-jitted function. Otherwise, the `numba.set_num_threads()` call will have no effect on the imported function.
77
84
 
78
85
  Parameters:
79
86
  CPUlimit: whether and how to limit the CPU usage. See notes for details.
@@ -93,17 +100,17 @@ def setCPUlimit(CPUlimit: Any | None) -> int:
93
100
  if not (CPUlimit is None or isinstance(CPUlimit, (bool, int, float))):
94
101
  CPUlimit = oopsieKwargsie(CPUlimit)
95
102
 
96
- concurrencyLimit: int = int(defineConcurrencyLimit(CPUlimit))
97
103
  from mapFolding.theSSOT import concurrencyPackage
98
104
  if concurrencyPackage == 'numba':
99
105
  from numba import get_num_threads, set_num_threads
106
+ concurrencyLimit: int = defineConcurrencyLimit(CPUlimit, get_num_threads())
100
107
  set_num_threads(concurrencyLimit)
101
108
  concurrencyLimit = get_num_threads()
102
109
  elif concurrencyPackage == 'algorithm':
103
- concurrencyLimit = 1
110
+ # When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
111
+ concurrencyLimit: int = defineConcurrencyLimit(CPUlimit)
104
112
  else:
105
- raise NotImplementedError("This function only supports the 'numba' concurrency package.")
106
-
113
+ raise NotImplementedError(f"I received {concurrencyPackage=} but I don't know what to do with that.")
107
114
  return concurrencyLimit
108
115
 
109
116
  def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: int, leavesTotal: int) -> int:
@@ -160,5 +167,5 @@ def getTaskDivisions(computationDivisions: int | str | None, concurrencyLimit: i
160
167
  def outfitCountFolds(mapShape: tuple[int, ...], computationDivisions: int | str | None = None, concurrencyLimit: int = 1) -> ComputationState:
161
168
  leavesTotal = getLeavesTotal(mapShape)
162
169
  taskDivisions = getTaskDivisions(computationDivisions, concurrencyLimit, leavesTotal)
163
- computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions)
170
+ computationStateInitialized = ComputationState(mapShape, leavesTotal, taskDivisions, concurrencyLimit)
164
171
  return computationStateInitialized
mapFolding/filesystem.py CHANGED
@@ -1,35 +1,8 @@
1
1
  """Filesystem functions for mapFolding package."""
2
- from pathlib import Path
2
+ from pathlib import Path, PurePath
3
+ from typing import Any
3
4
  import os
4
5
 
5
- def saveFoldsTotal(pathFilename: str | os.PathLike[str], foldsTotal: int) -> None:
6
- """
7
- Save foldsTotal with multiple fallback mechanisms.
8
-
9
- Parameters:
10
- pathFilename: Target save location
11
- foldsTotal: Critical computed value to save
12
- """
13
- try:
14
- pathFilenameFoldsTotal = Path(pathFilename)
15
- pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
16
- pathFilenameFoldsTotal.write_text(str(foldsTotal))
17
- except Exception as ERRORmessage:
18
- try:
19
- print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
20
- print(ERRORmessage)
21
- print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
22
- randomnessPlanB = (int(str(foldsTotal).strip()[-1]) + 1) * ['YO_']
23
- filenameInfixUnique = ''.join(randomnessPlanB)
24
- pathFilenamePlanB = os.path.join(os.getcwd(), 'foldsTotal' + filenameInfixUnique + '.txt')
25
- writeStreamFallback = open(pathFilenamePlanB, 'w')
26
- writeStreamFallback.write(str(foldsTotal))
27
- writeStreamFallback.close()
28
- print(str(pathFilenamePlanB))
29
- except Exception:
30
- print(foldsTotal)
31
- return None
32
-
33
6
  def getFilenameFoldsTotal(mapShape: tuple[int, ...]) -> str:
34
7
  """Imagine your computer has been counting folds for 9 days, and when it tries to save your newly discovered value,
35
8
  the filename is invalid. I bet you think this function is more important after that thought experiment.
@@ -85,3 +58,38 @@ def getPathFilenameFoldsTotal(mapShape: tuple[int, ...], pathLikeWriteFoldsTotal
85
58
 
86
59
  pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
87
60
  return pathFilenameFoldsTotal
61
+
62
+ def saveFoldsTotal(pathFilename: str | os.PathLike[str], foldsTotal: int) -> None:
63
+ """
64
+ Save foldsTotal with multiple fallback mechanisms.
65
+
66
+ Parameters:
67
+ pathFilename: Target save location
68
+ foldsTotal: Critical computed value to save
69
+ """
70
+ try:
71
+ pathFilenameFoldsTotal = Path(pathFilename)
72
+ pathFilenameFoldsTotal.parent.mkdir(parents=True, exist_ok=True)
73
+ pathFilenameFoldsTotal.write_text(str(foldsTotal))
74
+ except Exception as ERRORmessage:
75
+ try:
76
+ print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
77
+ print(ERRORmessage)
78
+ print(f"\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n\n{foldsTotal=}\n\nfoldsTotal foldsTotal foldsTotal foldsTotal foldsTotal\n")
79
+ randomnessPlanB = (int(str(foldsTotal).strip()[-1]) + 1) * ['YO_']
80
+ filenameInfixUnique = ''.join(randomnessPlanB)
81
+ pathFilenamePlanB = os.path.join(os.getcwd(), 'foldsTotal' + filenameInfixUnique + '.txt')
82
+ writeStreamFallback = open(pathFilenamePlanB, 'w')
83
+ writeStreamFallback.write(str(foldsTotal))
84
+ writeStreamFallback.close()
85
+ print(str(pathFilenamePlanB))
86
+ except Exception:
87
+ print(foldsTotal)
88
+ return None
89
+
90
+ def writeStringToHere(this: str, pathFilename: str | os.PathLike[Any] | PurePath) -> None:
91
+ """Write the string `this` to the file at `pathFilename`."""
92
+ pathFilename = Path(pathFilename)
93
+ pathFilename.parent.mkdir(parents=True, exist_ok=True)
94
+ pathFilename.write_text(str(this))
95
+ return None
@@ -1,4 +1,3 @@
1
- from mapFolding.someAssemblyRequired.synthesizeDataConverters import makeStateJob
2
1
  from mapFolding.someAssemblyRequired.synthesizeDataConverters import makeDataclassConverter
3
2
  from mapFolding.someAssemblyRequired.whatWillBe import IngredientsFunction, IngredientsModule, numbaFlow
4
3
  from mapFolding.someAssemblyRequired.synthesizeCountingFunctions import Z0Z_makeCountingFunction
@@ -6,13 +5,13 @@ import ast
6
5
 
7
6
  if __name__ == '__main__':
8
7
  ingredientsFunctionDataConverter = makeDataclassConverter(
9
- dataclassIdentifierAsStr=numbaFlow.dataclassIdentifierAsStr
8
+ dataclassIdentifier=numbaFlow.sourceDataclassIdentifier
10
9
  , logicalPathModuleDataclass=numbaFlow.logicalPathModuleDataclass
11
- , dataclassInstanceAsStr=numbaFlow.dataclassInstanceAsStr
10
+ , dataclassInstance=numbaFlow.dataclassInstance
12
11
 
13
- , dispatcherCallableAsStr=numbaFlow.dispatcherCallableAsStr
12
+ , dispatcherCallable=numbaFlow.dispatcherCallable
14
13
  , logicalPathModuleDispatcher=numbaFlow.logicalPathModuleDispatcher
15
- , dataConverterCallableAsStr=numbaFlow.dataConverterCallableAsStr
14
+ , dataConverterCallable=numbaFlow.dataConverterCallable
16
15
  )
17
16
 
18
17
  # initialize with theDao
@@ -20,7 +19,7 @@ if __name__ == '__main__':
20
19
  ingredientsFunctionDataConverter.FunctionDef.body.insert(0, ast.parse(dataInitializationHack).body[0])
21
20
  ingredientsFunctionDataConverter.imports.addImportFromStr('mapFolding.someAssemblyRequired', 'makeStateJob')
22
21
 
23
- ingredientsSequential = Z0Z_makeCountingFunction(numbaFlow.sequentialCallableAsStr
22
+ ingredientsSequential = Z0Z_makeCountingFunction(numbaFlow.sequentialCallable
24
23
  , numbaFlow.sourceAlgorithm
25
24
  , inline=True
26
25
  , dataclass=False)
@@ -28,7 +27,7 @@ if __name__ == '__main__':
28
27
  ingredientsModuleDataConverter = IngredientsModule(
29
28
  name=numbaFlow.dataConverterModule,
30
29
  ingredientsFunction=ingredientsFunctionDataConverter,
31
- logicalPathINFIX=numbaFlow.moduleOfSyntheticModules,
30
+ logicalPathINFIX=numbaFlow.Z0Z_flowLogicalPathRoot,
32
31
  )
33
32
 
34
33
  ingredientsModuleDataConverter.writeModule()
@@ -67,19 +67,19 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, da
67
67
  astTupleForAssignTargetsToFragments: ast.Tuple = Make.astTuple(list_astNameDataclassFragments, ast.Store())
68
68
  return astNameDataclass, ledgerDataclassAndFragments, list_astAnnAssign, list_astNameDataclassFragments, listKeywordForDataclassInitialization, astTupleForAssignTargetsToFragments
69
69
 
70
- def makeDataclassConverter(dataclassIdentifierAsStr: str,
70
+ def makeDataclassConverter(dataclassIdentifier: str,
71
71
  logicalPathModuleDataclass: str,
72
- dataclassInstanceAsStr: str,
73
- dispatcherCallableAsStr: str,
72
+ dataclassInstance: str,
73
+ dispatcherCallable: str,
74
74
  logicalPathModuleDispatcher: str,
75
- dataConverterCallableAsStr: str,
75
+ dataConverterCallable: str,
76
76
  ) -> IngredientsFunction:
77
77
 
78
- astNameDataclass, ledgerDataclassAndFragments, list_astAnnAssign, list_astNameDataclassFragments, list_astKeywordDataclassFragments, astTupleForAssignTargetsToFragments = shatter_dataclassesDOTdataclass(logicalPathModuleDataclass, dataclassIdentifierAsStr, dataclassInstanceAsStr)
78
+ astNameDataclass, ledgerDataclassAndFragments, list_astAnnAssign, list_astNameDataclassFragments, list_astKeywordDataclassFragments, astTupleForAssignTargetsToFragments = shatter_dataclassesDOTdataclass(logicalPathModuleDataclass, dataclassIdentifier, dataclassInstance)
79
79
 
80
80
  ingredientsFunction = IngredientsFunction(
81
- FunctionDef = Make.astFunctionDef(name=dataConverterCallableAsStr
82
- , argumentsSpecification=Make.astArgumentsSpecification(args=[Make.astArg(dataclassInstanceAsStr, astNameDataclass)])
81
+ FunctionDef = Make.astFunctionDef(name=dataConverterCallable
82
+ , argumentsSpecification=Make.astArgumentsSpecification(args=[Make.astArg(dataclassInstance, astNameDataclass)])
83
83
  , body = cast(list[ast.stmt], list_astAnnAssign)
84
84
  , returns = astNameDataclass
85
85
  )
@@ -87,9 +87,9 @@ def makeDataclassConverter(dataclassIdentifierAsStr: str,
87
87
  )
88
88
 
89
89
  callToDispatcher = Make.astAssign(listTargets=[astTupleForAssignTargetsToFragments]
90
- , value=Make.astCall(Make.astName(dispatcherCallableAsStr), args=list_astNameDataclassFragments))
90
+ , value=Make.astCall(Make.astName(dispatcherCallable), args=list_astNameDataclassFragments))
91
91
  ingredientsFunction.FunctionDef.body.append(callToDispatcher)
92
- ingredientsFunction.imports.addImportFromStr(logicalPathModuleDispatcher, dispatcherCallableAsStr)
92
+ ingredientsFunction.imports.addImportFromStr(logicalPathModuleDispatcher, dispatcherCallable)
93
93
 
94
94
  ingredientsFunction.FunctionDef.body.append(Make.astReturn(Make.astCall(astNameDataclass, list_astKeywords=list_astKeywordDataclassFragments)))
95
95
 
@@ -316,7 +316,7 @@ class Make:
316
316
 
317
317
  @staticmethod
318
318
  def astModule(body: list[ast.stmt], type_ignores: list[ast.TypeIgnore] = []) -> ast.Module:
319
- return ast.Module(body=body, type_ignores=type_ignores)
319
+ return ast.Module(body, type_ignores)
320
320
 
321
321
  @staticmethod
322
322
  def astName(identifier: ast_Identifier) -> ast.Name:
@@ -4,68 +4,69 @@
4
4
  - Therefore, an abstracted system for creating settings for the package
5
5
  - And with only a little more effort, an abstracted system for creating settings to synthesize arbitrary subsets of modules for arbitrary packages
6
6
  """
7
- from mapFolding.someAssemblyRequired.transformationTools import *
7
+ from mapFolding.someAssemblyRequired.transformationTools import (
8
+ ast_Identifier,
9
+ executeActionUnlessDescendantMatches,
10
+ extractClassDef,
11
+ extractFunctionDef,
12
+ ifThis,
13
+ Make,
14
+ NodeCollector,
15
+ NodeReplacer,
16
+ strDotStrCuzPyStoopid,
17
+ Then,
18
+ )
19
+ from mapFolding.filesystem import writeStringToHere
8
20
  from mapFolding.theSSOT import (
9
21
  FREAKOUT,
10
22
  getDatatypePackage,
11
23
  getSourceAlgorithm,
12
- theDataclassIdentifierAsStr,
13
- theDataclassInstanceAsStr,
14
- theDispatcherCallableAsStr,
24
+ theDataclassIdentifier,
25
+ theDataclassInstance,
26
+ theDispatcherCallable,
15
27
  theFileExtension,
16
28
  theFormatStrModuleForCallableSynthetic,
17
29
  theFormatStrModuleSynthetic,
18
30
  theLogicalPathModuleDataclass,
19
31
  theLogicalPathModuleDispatcherSynthetic,
32
+ theModuleDispatcherSynthetic,
20
33
  theModuleOfSyntheticModules,
21
34
  thePackageName,
22
35
  thePathPackage,
23
- Z0Z_sequentialCallableAsStr,
36
+ theSourceInitializeCallable,
37
+ theSourceParallelCallable,
38
+ theSourceSequentialCallable,
24
39
  )
25
40
  from autoflake import fix_code as autoflake_fix_code
26
41
  from collections import defaultdict
27
42
  from collections.abc import Sequence
28
43
  from inspect import getsource as inspect_getsource
29
44
  from mapFolding.someAssemblyRequired.ingredientsNumba import parametersNumbaDEFAULT, parametersNumbaSuperJit, parametersNumbaSuperJitParallel, ParametersNumba
30
- from pathlib import Path
45
+ from pathlib import Path, PurePosixPath
31
46
  from types import ModuleType
32
47
  from typing import NamedTuple
33
48
  from Z0Z_tools import updateExtendPolishDictionaryLists
34
49
  import ast
35
50
  import dataclasses
36
51
 
37
- """
38
- Start with what is: theDao.py
39
- Create settings that can transform into what I or the user want it to be.
40
-
41
- The simplest flow with numba is:
42
- 1. one module
43
- 2. dispatcher
44
- - initialize data with makeJob
45
- - smash dataclass
46
- - call countSequential
47
- 3. countSequential
48
- - jitted, not super-jitted
49
- - functions inlined (or I'd have to jit them)
50
- - return groupsOfFolds
51
- 4. recycle the dataclass with groupsOfFolds
52
- 5. return the dataclass
53
- """
54
-
55
52
  @dataclasses.dataclass
56
53
  class RecipeSynthesizeFlow:
57
54
  """Settings for synthesizing flow."""
58
55
  # TODO consider `IngredientsFlow` or similar
56
+ # ========================================
57
+ # Source
59
58
  sourceAlgorithm: ModuleType = getSourceAlgorithm()
60
59
  sourcePython: str = inspect_getsource(sourceAlgorithm)
61
- # sourcePython: str = inspect_getsource(self.sourceAlgorithm)
62
- # "self" is not defined
63
- # I still hate the OOP paradigm. But I like this dataclass stuff.
64
60
  source_astModule: ast.Module = ast.parse(sourcePython)
61
+ # https://github.com/hunterhogan/mapFolding/issues/4
62
+ sourceDispatcherCallable: str = theDispatcherCallable
63
+ sourceSequentialCallable: str = theSourceSequentialCallable
64
+ sourceDataclassIdentifier: str = theDataclassIdentifier
65
+ # I still hate the OOP paradigm. But I like this dataclass stuff.
65
66
 
66
67
  # ========================================
67
68
  # Filesystem
68
- pathPackage: Path = thePathPackage
69
+ pathPackage: PurePosixPath = PurePosixPath(thePathPackage)
69
70
  fileExtension: str = theFileExtension
70
71
 
71
72
  # ========================================
@@ -78,19 +79,23 @@ class RecipeSynthesizeFlow:
78
79
  packageName: ast_Identifier = thePackageName
79
80
 
80
81
  # Module
81
- moduleOfSyntheticModules: str = theModuleOfSyntheticModules
82
+ # https://github.com/hunterhogan/mapFolding/issues/4
83
+ Z0Z_flowLogicalPathRoot: str = theModuleOfSyntheticModules
84
+ moduleDispatcher: str = theModuleDispatcherSynthetic
82
85
  logicalPathModuleDataclass: str = theLogicalPathModuleDataclass
86
+ # https://github.com/hunterhogan/mapFolding/issues/4
87
+ # `theLogicalPathModuleDispatcherSynthetic` is a problem. It is defined in theSSOT, but it can also be calculated.
83
88
  logicalPathModuleDispatcher: str = theLogicalPathModuleDispatcherSynthetic
84
89
  dataConverterModule: str = 'dataNamespaceFlattened'
85
90
 
86
91
  # Function
87
- dataclassIdentifierAsStr: str = theDataclassIdentifierAsStr
88
- dispatcherCallableAsStr: str = theDispatcherCallableAsStr
89
- dataConverterCallableAsStr: str = 'flattenData'
90
- sequentialCallableAsStr: str = Z0Z_sequentialCallableAsStr
92
+ sequentialCallable: str = sourceSequentialCallable
93
+ dataclassIdentifier: str = sourceDataclassIdentifier
94
+ dataConverterCallable: str = 'unpackDataclassPackUp'
95
+ dispatcherCallable: str = sourceDispatcherCallable
91
96
 
92
97
  # Variable
93
- dataclassInstanceAsStr: str = theDataclassInstanceAsStr
98
+ dataclassInstance: str = theDataclassInstance
94
99
 
95
100
  class LedgerOfImports:
96
101
  def __init__(self, startWith: ast.AST | None = None) -> None:
@@ -117,6 +122,11 @@ class LedgerOfImports:
117
122
  def addImportFromStr(self, module: str, name: str, asname: str | None = None) -> None:
118
123
  self.dictionaryImportFrom[module].append((name, asname))
119
124
 
125
+ def exportListModuleNames(self) -> list[str]:
126
+ listModuleNames: list[str] = list(self.dictionaryImportFrom.keys())
127
+ listModuleNames.extend(self.listImport)
128
+ return sorted(set(listModuleNames))
129
+
120
130
  def makeListAst(self) -> list[ast.ImportFrom | ast.Import]:
121
131
  listAstImportFrom: list[ast.ImportFrom] = []
122
132
 
@@ -147,6 +157,12 @@ class LedgerOfImports:
147
157
  if isinstance(smurf, (ast.Import, ast.ImportFrom)):
148
158
  self.addAst(smurf)
149
159
 
160
+ @dataclasses.dataclass
161
+ class Z0Z_IngredientsDataStructure:
162
+ """Everything necessary to create a data structure should be here."""
163
+ dataclassDef: ast.ClassDef
164
+ imports: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
165
+
150
166
  @dataclasses.dataclass
151
167
  class IngredientsFunction:
152
168
  """Everything necessary to integrate a function into a module should be here."""
@@ -155,21 +171,89 @@ class IngredientsFunction:
155
171
 
156
172
  @dataclasses.dataclass
157
173
  class IngredientsModule:
158
- """Everything necessary to create a module, including the package context, should be here."""
159
- name: ast_Identifier
174
+ """Everything necessary to create one _logical_ `ast.Module` should be here.
175
+ Extrinsic qualities should be handled externally, such as with `RecipeModule`."""
176
+ # If an `ast.Module` had a logical name that would be reasonable, but Python is firmly opposed
177
+ # to a reasonable namespace, therefore, Hunter, you were silly to add a `name` field to this
178
+ # dataclass for building an `ast.Module`.
179
+ # name: ast_Identifier
180
+ # Hey, genius, note that this is dataclasses.InitVar
160
181
  ingredientsFunction: dataclasses.InitVar[Sequence[IngredientsFunction] | IngredientsFunction | None] = None
161
182
 
183
+ # `body` attribute of `ast.Module`
162
184
  imports: LedgerOfImports = dataclasses.field(default_factory=LedgerOfImports)
163
185
  prologue: list[ast.stmt] = dataclasses.field(default_factory=list)
164
186
  functions: list[ast.FunctionDef | ast.stmt] = dataclasses.field(default_factory=list)
165
187
  epilogue: list[ast.stmt] = dataclasses.field(default_factory=list)
166
188
  launcher: list[ast.stmt] = dataclasses.field(default_factory=list)
167
189
 
190
+ # parameter for `ast.Module` constructor
191
+ type_ignores: list[ast.TypeIgnore] = dataclasses.field(default_factory=list)
192
+
193
+ def __post_init__(self, ingredientsFunction: Sequence[IngredientsFunction] | IngredientsFunction | None = None) -> None:
194
+ if ingredientsFunction is not None:
195
+ if isinstance(ingredientsFunction, IngredientsFunction):
196
+ self.addIngredientsFunction(ingredientsFunction)
197
+ else:
198
+ self.addIngredientsFunction(*ingredientsFunction)
199
+
200
+ def addIngredientsFunction(self, *ingredientsFunction: IngredientsFunction) -> None:
201
+ """Add one or more `IngredientsFunction`. """
202
+ listLedgers: list[LedgerOfImports] = []
203
+ for definition in ingredientsFunction:
204
+ self.functions.append(definition.FunctionDef)
205
+ listLedgers.append(definition.imports)
206
+ self.imports.update(*listLedgers)
207
+
208
+ def _makeModuleBody(self) -> list[ast.stmt]:
209
+ body: list[ast.stmt] = []
210
+ body.extend(self.imports.makeListAst())
211
+ body.extend(self.prologue)
212
+ body.extend(self.functions)
213
+ body.extend(self.epilogue)
214
+ body.extend(self.launcher)
215
+ # TODO `launcher`, if it exists, must start with `if __name__ == '__main__':` and be indented
216
+ return body
217
+
218
+ def export(self) -> ast.Module:
219
+ """Create a new `ast.Module` from the ingredients."""
220
+ return Make.astModule(self._makeModuleBody(), self.type_ignores)
221
+
222
+ @dataclasses.dataclass
223
+ class RecipeCountingFunction:
224
+ """Settings for synthesizing counting functions."""
225
+ ingredients: IngredientsFunction
226
+
227
+ @dataclasses.dataclass
228
+ class RecipeDispatchFunction:
229
+ # A "dispatcher" must receive a dataclass instance and return a dataclass instance.
230
+ # computationStateComplete: ComputationState = dispatcher(computationStateInitialized)
231
+ # The most critical values in the returned dataclass are foldGroups[0:-1] and leavesTotal
232
+ # self.foldsTotal = DatatypeFoldsTotal(self.foldGroups[0:-1].sum() * self.leavesTotal)
233
+ # the function name is required by IngredientsFunction
234
+ ingredients: IngredientsFunction
235
+ logicalPathModuleDataclass: str = theLogicalPathModuleDataclass
236
+ dataclassIdentifier: str = theDataclassIdentifier
237
+ dataclassInstance: str = theDataclassInstance
238
+ Z0Z_unpackDataclass: bool = True
239
+ countDispatcher: bool = True
240
+ # is this the countDispatcher or what is the information for calling the countDispatcher: import or no? callable identifier? parameters? return type?
241
+ # countDispatcher lives in `theLogicalPathModuleDispatcherSynthetic`
242
+ # countDispatcher is named `theDispatcherCallable`
243
+ # post init
244
+ # addImportFromStr(self, module: str, name: str, asname: str | None = None)
245
+
246
+ @dataclasses.dataclass
247
+ class RecipeModule:
248
+ """How to get one or more logical `ast.Module` on disk as one physical module."""
249
+ # Physical namespace
250
+ filenameStem: str
251
+ fileExtension: str = theFileExtension
252
+ pathPackage: PurePosixPath = PurePosixPath(thePathPackage)
253
+
254
+ # Physical and logical namespace
168
255
  packageName: ast_Identifier | None= thePackageName
169
256
  logicalPathINFIX: ast_Identifier | strDotStrCuzPyStoopid | None = None # module names other than the module itself and the package name
170
- pathPackage: Path = thePathPackage
171
- fileExtension: str = theFileExtension
172
- type_ignores: list[ast.TypeIgnore] = dataclasses.field(default_factory=list)
173
257
 
174
258
  def _getLogicalPathParent(self) -> str | None:
175
259
  listModules: list[ast_Identifier] = []
@@ -185,18 +269,22 @@ class IngredientsModule:
185
269
  logicalPathParent: str | None = self._getLogicalPathParent()
186
270
  if logicalPathParent:
187
271
  listModules.append(logicalPathParent)
188
- listModules.append(self.name)
272
+ listModules.append(self.filenameStem)
189
273
  return '.'.join(listModules)
190
274
 
191
275
  @property
192
- def pathFilename(self) -> Path:
193
- pathRoot: Path = self.pathPackage
194
- filename = self.name + self.fileExtension
276
+ def pathFilename(self):
277
+ """ `PurePosixPath` ensures os-independent formatting of the `dataclass.field` value,
278
+ but you must convert to `Path` to perform filesystem operations."""
279
+ pathRoot: PurePosixPath = self.pathPackage
280
+ filename: str = self.filenameStem + self.fileExtension
195
281
  if self.logicalPathINFIX:
196
- whyIsThisStillAThing = self.logicalPathINFIX.split('.')
282
+ whyIsThisStillAThing: list[str] = self.logicalPathINFIX.split('.')
197
283
  pathRoot = pathRoot.joinpath(*whyIsThisStillAThing)
198
284
  return pathRoot.joinpath(filename)
199
285
 
286
+ ingredients: IngredientsModule = IngredientsModule()
287
+
200
288
  @property
201
289
  def absoluteImport(self) -> ast.Import:
202
290
  return Make.astImport(self._getLogicalPathAbsolute())
@@ -204,76 +292,33 @@ class IngredientsModule:
204
292
  @property
205
293
  def absoluteImportFrom(self) -> ast.ImportFrom:
206
294
  """ `from . import theModule` """
207
- logicalPathParent: str | None = self._getLogicalPathParent()
208
- if logicalPathParent is None:
209
- logicalPathParent = '.'
210
- return Make.astImportFrom(logicalPathParent, [Make.astAlias(self.name)])
211
-
212
- def __post_init__(self, ingredientsFunction: Sequence[IngredientsFunction] | IngredientsFunction | None = None) -> None:
213
- if ingredientsFunction is not None:
214
- if isinstance(ingredientsFunction, IngredientsFunction):
215
- self.addIngredientsFunction(ingredientsFunction)
216
- else:
217
- self.addIngredientsFunction(*ingredientsFunction)
218
-
219
- def addIngredientsFunction(self, *ingredientsFunction: IngredientsFunction) -> None:
220
- """Add one or more `IngredientsFunction`. """
221
- listLedgers: list[LedgerOfImports] = []
222
- for definition in ingredientsFunction:
223
- self.functions.append(definition.FunctionDef)
224
- listLedgers.append(definition.imports)
225
- self.imports.update(*listLedgers)
226
-
227
- def _makeModuleBody(self) -> list[ast.stmt]:
228
- """Constructs the body of the module, including prologue, functions, epilogue, and launcher."""
229
- body: list[ast.stmt] = []
230
- body.extend(self.imports.makeListAst())
231
- body.extend(self.prologue)
232
- body.extend(self.functions)
233
- body.extend(self.epilogue)
234
- body.extend(self.launcher)
235
- # TODO `launcher` must start with `if __name__ == '__main__':` and be indented
236
- return body
295
+ logicalPathParent: str = self._getLogicalPathParent() or '.'
296
+ return Make.astImportFrom(logicalPathParent, [Make.astAlias(self.filenameStem)])
237
297
 
238
298
  def writeModule(self) -> None:
239
- """Writes the module to disk with proper imports and functions.
240
-
241
- This method creates a proper AST module with imports and function definitions,
242
- fixes missing locations, unpacks the AST to Python code, applies autoflake
243
- to clean up imports, and writes the resulting code to the appropriate file.
244
- """
245
- astModule = Make.astModule(body=self._makeModuleBody(), type_ignores=self.type_ignores)
299
+ astModule = self.ingredients.export()
246
300
  ast.fix_missing_locations(astModule)
247
301
  pythonSource: str = ast.unparse(astModule)
248
302
  if not pythonSource: raise FREAKOUT
249
- autoflake_additional_imports: list[str] = []
303
+ autoflake_additional_imports: list[str] = self.ingredients.imports.exportListModuleNames()
250
304
  if self.packageName:
251
305
  autoflake_additional_imports.append(self.packageName)
252
- # TODO LedgerOfImports method: list of package names. autoflake_additional_imports.extend()
253
- autoflake_additional_imports.append(getDatatypePackage())
254
306
  pythonSource = autoflake_fix_code(pythonSource, autoflake_additional_imports, expand_star_imports=False, remove_all_unused_imports=False, remove_duplicate_keys = False, remove_unused_variables = False,)
255
- self.pathFilename.write_text(pythonSource)
256
-
257
- @dataclasses.dataclass
258
- class RecipeSynthesizeCountingFunction:
259
- """Settings for synthesizing counting functions."""
260
- ingredients: IngredientsFunction
307
+ writeStringToHere(pythonSource, self.pathFilename)
261
308
 
262
309
  numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()
263
310
 
264
311
  # https://github.com/hunterhogan/mapFolding/issues/3
265
- sequentialFunctionDef = extractFunctionDef(numbaFlow.sequentialCallableAsStr, numbaFlow.source_astModule)
266
- if sequentialFunctionDef is None: raise FREAKOUT
312
+ sourceSequentialFunctionDef = extractFunctionDef(numbaFlow.sourceSequentialCallable, numbaFlow.source_astModule)
313
+ if sourceSequentialFunctionDef is None: raise FREAKOUT
267
314
 
268
- numbaCountSequential = RecipeSynthesizeCountingFunction(IngredientsFunction(
269
- FunctionDef=sequentialFunctionDef,
315
+ numbaCountSequential = RecipeCountingFunction(IngredientsFunction(
316
+ FunctionDef=sourceSequentialFunctionDef,
270
317
  imports=LedgerOfImports(numbaFlow.source_astModule)
271
318
  ))
272
319
 
273
- # the data converter and the dispatcher could be in the same module.
274
-
275
- Z0Z_autoflake_additional_imports: list[str] = []
276
- Z0Z_autoflake_additional_imports.append(thePackageName)
320
+ numbaDispatcher = RecipeModule(filenameStem=numbaFlow.moduleDispatcher, fileExtension=numbaFlow.fileExtension, pathPackage=numbaFlow.pathPackage,
321
+ packageName=numbaFlow.packageName, logicalPathINFIX=numbaFlow.Z0Z_flowLogicalPathRoot)
277
322
 
278
323
  class ParametersSynthesizeNumbaCallable(NamedTuple):
279
324
  callableTarget: str
@@ -292,6 +337,7 @@ _decoratorCallable = ''
292
337
  # if numba
293
338
  _datatypeModuleScalar = 'numba'
294
339
  _decoratorCallable = 'jit'
340
+ Z0Z_autoflake_additional_imports: list[str] = []
295
341
  Z0Z_autoflake_additional_imports.append('numba')
296
342
 
297
343
  def Z0Z_getDatatypeModuleScalar() -> str:
@@ -0,0 +1,216 @@
1
+ from concurrent.futures import ProcessPoolExecutor
2
+ from mapFolding.theSSOT import ComputationState
3
+ import concurrent.futures
4
+ import copy
5
+ import multiprocessing
6
+
7
+ def activeLeafConnectedToItself(state: ComputationState) -> bool:
8
+ return state.leafConnectee == state.leaf1ndex
9
+
10
+ def activeLeafGreaterThan0(state: ComputationState) -> bool:
11
+ return state.leaf1ndex > 0
12
+
13
+ def activeLeafGreaterThanLeavesTotal(state: ComputationState) -> bool:
14
+ return state.leaf1ndex > state.leavesTotal
15
+
16
+ def activeLeafIsTheFirstLeaf(state: ComputationState) -> bool:
17
+ return state.leaf1ndex <= 1
18
+
19
+ def allDimensionsAreUnconstrained(state: ComputationState) -> bool:
20
+ return not state.dimensionsUnconstrained
21
+
22
+ def backtrack(state: ComputationState) -> ComputationState:
23
+ state.leaf1ndex -= 1
24
+ state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leafBelow[state.leaf1ndex]
25
+ state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leafAbove[state.leaf1ndex]
26
+ return state
27
+
28
+ def countGaps(state: ComputationState) -> ComputationState:
29
+ state.gapsWhere[state.gap1ndexCeiling] = state.leafConnectee
30
+ if state.countDimensionsGapped[state.leafConnectee] == 0:
31
+ state = incrementGap1ndexCeiling(state)
32
+ state.countDimensionsGapped[state.leafConnectee] += 1
33
+ return state
34
+
35
+ def decrementDimensionsUnconstrained(state: ComputationState) -> ComputationState:
36
+ state.dimensionsUnconstrained -= 1
37
+ return state
38
+
39
+ def dimensionsUnconstrainedCondition(state: ComputationState) -> bool:
40
+ return state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leaf1ndex] == state.leaf1ndex
41
+
42
+ def filterCommonGaps(state: ComputationState) -> ComputationState:
43
+ state.gapsWhere[state.gap1ndex] = state.gapsWhere[state.indexMiniGap]
44
+ if state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] == state.dimensionsUnconstrained:
45
+ state = incrementActiveGap(state)
46
+ state.countDimensionsGapped[state.gapsWhere[state.indexMiniGap]] = 0
47
+ return state
48
+
49
+ def incrementActiveGap(state: ComputationState) -> ComputationState:
50
+ state.gap1ndex += 1
51
+ return state
52
+
53
+ def incrementGap1ndexCeiling(state: ComputationState) -> ComputationState:
54
+ state.gap1ndexCeiling += 1
55
+ return state
56
+
57
+ def incrementIndexDimension(state: ComputationState) -> ComputationState:
58
+ state.indexDimension += 1
59
+ return state
60
+
61
+ def incrementIndexMiniGap(state: ComputationState) -> ComputationState:
62
+ state.indexMiniGap += 1
63
+ return state
64
+
65
+ def initializeIndexMiniGap(state: ComputationState) -> ComputationState:
66
+ state.indexMiniGap = state.gap1ndex
67
+ return state
68
+
69
+ def initializeLeafConnectee(state: ComputationState) -> ComputationState:
70
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leaf1ndex]
71
+ return state
72
+
73
+ def initializeVariablesToFindGaps(state: ComputationState) -> ComputationState:
74
+ state.dimensionsUnconstrained = state.dimensionsTotal
75
+ state.gap1ndexCeiling = state.gapRangeStart[state.leaf1ndex - 1]
76
+ state.indexDimension = 0
77
+ return state
78
+
79
+ def insertUnconstrainedLeaf(state: ComputationState) -> ComputationState:
80
+ indexLeaf = 0
81
+ while indexLeaf < state.leaf1ndex:
82
+ state.gapsWhere[state.gap1ndexCeiling] = indexLeaf
83
+ state.gap1ndexCeiling += 1
84
+ indexLeaf += 1
85
+ return state
86
+
87
+ def leafBelowSentinelIs1(state: ComputationState) -> bool:
88
+ return state.leafBelow[0] == 1
89
+
90
+ def loopingLeavesConnectedToActiveLeaf(state: ComputationState) -> bool:
91
+ return state.leafConnectee != state.leaf1ndex
92
+
93
+ def loopingToActiveGapCeiling(state: ComputationState) -> bool:
94
+ return state.indexMiniGap < state.gap1ndexCeiling
95
+
96
+ def loopUpToDimensionsTotal(state: ComputationState) -> bool:
97
+ return state.indexDimension < state.dimensionsTotal
98
+
99
+ def noGapsHere(state: ComputationState) -> bool:
100
+ return (state.leaf1ndex > 0) and (state.gap1ndex == state.gapRangeStart[state.leaf1ndex - 1])
101
+
102
+ def placeLeaf(state: ComputationState) -> ComputationState:
103
+ state.gap1ndex -= 1
104
+ state.leafAbove[state.leaf1ndex] = state.gapsWhere[state.gap1ndex]
105
+ state.leafBelow[state.leaf1ndex] = state.leafBelow[state.leafAbove[state.leaf1ndex]]
106
+ state.leafBelow[state.leafAbove[state.leaf1ndex]] = state.leaf1ndex
107
+ state.leafAbove[state.leafBelow[state.leaf1ndex]] = state.leaf1ndex
108
+ state.gapRangeStart[state.leaf1ndex] = state.gap1ndex
109
+ state.leaf1ndex += 1
110
+ return state
111
+
112
+ def thereIsAnActiveLeaf(state: ComputationState) -> bool:
113
+ return state.leaf1ndex > 0
114
+
115
+ def thisIsMyTaskIndex(state: ComputationState) -> bool:
116
+ return (state.leaf1ndex != state.taskDivisions) or (state.leafConnectee % state.taskDivisions == state.taskIndex)
117
+
118
+ def updateLeafConnectee(state: ComputationState) -> ComputationState:
119
+ state.leafConnectee = state.connectionGraph[state.indexDimension, state.leaf1ndex, state.leafBelow[state.leafConnectee]]
120
+ return state
121
+
122
+ def countInitialize(state: ComputationState) -> ComputationState:
123
+ while activeLeafGreaterThan0(state):
124
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
125
+ state = initializeVariablesToFindGaps(state)
126
+ while loopUpToDimensionsTotal(state):
127
+ state = initializeLeafConnectee(state)
128
+ if activeLeafConnectedToItself(state):
129
+ state = decrementDimensionsUnconstrained(state)
130
+ else:
131
+ while loopingLeavesConnectedToActiveLeaf(state):
132
+ state = countGaps(state)
133
+ state = updateLeafConnectee(state)
134
+ state = incrementIndexDimension(state)
135
+ if allDimensionsAreUnconstrained(state):
136
+ state = insertUnconstrainedLeaf(state)
137
+ state = initializeIndexMiniGap(state)
138
+ while loopingToActiveGapCeiling(state):
139
+ state = filterCommonGaps(state)
140
+ state = incrementIndexMiniGap(state)
141
+ if thereIsAnActiveLeaf(state):
142
+ state = placeLeaf(state)
143
+ if state.gap1ndex > 0:
144
+ break
145
+ return state
146
+
147
+ def countParallel(state: ComputationState) -> ComputationState:
148
+ while activeLeafGreaterThan0(state):
149
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
150
+ if activeLeafGreaterThanLeavesTotal(state):
151
+ state.groupsOfFolds += 1
152
+ else:
153
+ state = initializeVariablesToFindGaps(state)
154
+ while loopUpToDimensionsTotal(state):
155
+ if dimensionsUnconstrainedCondition(state):
156
+ state = decrementDimensionsUnconstrained(state)
157
+ else:
158
+ state = initializeLeafConnectee(state)
159
+ while loopingLeavesConnectedToActiveLeaf(state):
160
+ if thisIsMyTaskIndex(state):
161
+ state = countGaps(state)
162
+ state = updateLeafConnectee(state)
163
+ state = incrementIndexDimension(state)
164
+ state = initializeIndexMiniGap(state)
165
+ while loopingToActiveGapCeiling(state):
166
+ state = filterCommonGaps(state)
167
+ state = incrementIndexMiniGap(state)
168
+ while noGapsHere(state):
169
+ state = backtrack(state)
170
+ if thereIsAnActiveLeaf(state):
171
+ state = placeLeaf(state)
172
+ state.foldGroups[state.taskIndex] = state.groupsOfFolds
173
+ return state
174
+
175
+ def countSequential(state: ComputationState) -> ComputationState:
176
+ while activeLeafGreaterThan0(state):
177
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
178
+ if activeLeafGreaterThanLeavesTotal(state):
179
+ state.groupsOfFolds += 1
180
+ else:
181
+ state = initializeVariablesToFindGaps(state)
182
+ while loopUpToDimensionsTotal(state):
183
+ state = initializeLeafConnectee(state)
184
+ if activeLeafConnectedToItself(state):
185
+ state = decrementDimensionsUnconstrained(state)
186
+ else:
187
+ while loopingLeavesConnectedToActiveLeaf(state):
188
+ state = countGaps(state)
189
+ state = updateLeafConnectee(state)
190
+ state = incrementIndexDimension(state)
191
+ state = initializeIndexMiniGap(state)
192
+ while loopingToActiveGapCeiling(state):
193
+ state = filterCommonGaps(state)
194
+ state = incrementIndexMiniGap(state)
195
+ while noGapsHere(state):
196
+ state = backtrack(state)
197
+ if thereIsAnActiveLeaf(state):
198
+ state = placeLeaf(state)
199
+ state.foldGroups[state.taskIndex] = state.groupsOfFolds
200
+ return state
201
+
202
+ def doTheNeedful(state: ComputationState) -> ComputationState:
203
+ state = countInitialize(state)
204
+ if state.taskDivisions > 0:
205
+ multiprocessing.set_start_method('spawn')
206
+ dictionaryConcurrency: dict[int, concurrent.futures.Future[ComputationState]] = {}
207
+ with ProcessPoolExecutor() as concurrencyManager:
208
+ for indexSherpa in range(state.taskDivisions):
209
+ stateParallel = copy.deepcopy(state)
210
+ stateParallel.taskIndex = indexSherpa
211
+ dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, stateParallel)
212
+ for indexSherpa in range(state.taskDivisions):
213
+ state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result().foldGroups[indexSherpa]
214
+ return state
215
+ else:
216
+ return countSequential(state)
@@ -1,4 +1,4 @@
1
- from mapFolding.theDao import countInitialize, countParallel
1
+ from mapFolding.theDao import countInitialize, doTheNeedful
2
2
  from mapFolding.theSSOT import Array1DElephino, Array1DLeavesTotal, Array3D, ComputationState, DatatypeElephino, DatatypeFoldsTotal, DatatypeLeavesTotal
3
3
  from numba import jit
4
4
 
@@ -65,9 +65,10 @@ def countSequential(
65
65
 
66
66
  def flattenData(state: ComputationState) -> ComputationState:
67
67
 
68
- state = countInitialize(state)
69
68
  if state.taskDivisions > 0:
70
- return countParallel(state)
69
+ return doTheNeedful(state)
70
+
71
+ state = countInitialize(state)
71
72
 
72
73
  connectionGraph: Array3D = state.connectionGraph
73
74
  countDimensionsGapped: Array1DLeavesTotal = state.countDimensionsGapped
mapFolding/theDao.py CHANGED
@@ -1,5 +1,12 @@
1
+ from concurrent.futures import ProcessPoolExecutor
1
2
  from mapFolding.theSSOT import ComputationState
3
+ import concurrent.futures
2
4
  import copy
5
+ import multiprocessing
6
+
7
+ # When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
8
+ if __name__ == '__main__':
9
+ multiprocessing.set_start_method('spawn')
3
10
 
4
11
  def activeLeafConnectedToItself(state: ComputationState) -> bool:
5
12
  return state.leafConnectee == state.leaf1ndex
@@ -141,37 +148,33 @@ def countInitialize(state: ComputationState) -> ComputationState:
141
148
  break
142
149
  return state
143
150
 
144
- def countParallel(statePARALLEL: ComputationState) -> ComputationState:
145
- stateCOMPLETE = copy.deepcopy(statePARALLEL)
146
- for indexSherpa in range(statePARALLEL.taskDivisions):
147
- state = copy.deepcopy(statePARALLEL)
148
- state.taskIndex = indexSherpa
149
- while activeLeafGreaterThan0(state):
150
- if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
151
- if activeLeafGreaterThanLeavesTotal(state):
152
- state.groupsOfFolds += 1
153
- else:
154
- state = initializeVariablesToFindGaps(state)
155
- while loopUpToDimensionsTotal(state):
156
- if dimensionsUnconstrainedCondition(state):
157
- state = decrementDimensionsUnconstrained(state)
158
- else:
159
- state = initializeLeafConnectee(state)
160
- while loopingLeavesConnectedToActiveLeaf(state):
161
- if thisIsMyTaskIndex(state):
162
- state = countGaps(state)
163
- state = updateLeafConnectee(state)
164
- state = incrementIndexDimension(state)
165
- state = initializeIndexMiniGap(state)
166
- while loopingToActiveGapCeiling(state):
167
- state = filterCommonGaps(state)
168
- state = incrementIndexMiniGap(state)
169
- while noGapsHere(state):
170
- state = backtrack(state)
171
- if thereIsAnActiveLeaf(state):
172
- state = placeLeaf(state)
173
- stateCOMPLETE.foldGroups[state.taskIndex] = state.groupsOfFolds
174
- return stateCOMPLETE
151
+ def countParallel(state: ComputationState) -> ComputationState:
152
+ while activeLeafGreaterThan0(state):
153
+ if activeLeafIsTheFirstLeaf(state) or leafBelowSentinelIs1(state):
154
+ if activeLeafGreaterThanLeavesTotal(state):
155
+ state.groupsOfFolds += 1
156
+ else:
157
+ state = initializeVariablesToFindGaps(state)
158
+ while loopUpToDimensionsTotal(state):
159
+ if dimensionsUnconstrainedCondition(state):
160
+ state = decrementDimensionsUnconstrained(state)
161
+ else:
162
+ state = initializeLeafConnectee(state)
163
+ while loopingLeavesConnectedToActiveLeaf(state):
164
+ if thisIsMyTaskIndex(state):
165
+ state = countGaps(state)
166
+ state = updateLeafConnectee(state)
167
+ state = incrementIndexDimension(state)
168
+ state = initializeIndexMiniGap(state)
169
+ while loopingToActiveGapCeiling(state):
170
+ state = filterCommonGaps(state)
171
+ state = incrementIndexMiniGap(state)
172
+ while noGapsHere(state):
173
+ state = backtrack(state)
174
+ if thereIsAnActiveLeaf(state):
175
+ state = placeLeaf(state)
176
+ state.foldGroups[state.taskIndex] = state.groupsOfFolds
177
+ return state
175
178
 
176
179
  def countSequential(state: ComputationState) -> ComputationState:
177
180
  while activeLeafGreaterThan0(state):
@@ -203,6 +206,14 @@ def countSequential(state: ComputationState) -> ComputationState:
203
206
  def doTheNeedful(state: ComputationState) -> ComputationState:
204
207
  state = countInitialize(state)
205
208
  if state.taskDivisions > 0:
206
- return countParallel(state)
209
+ dictionaryConcurrency: dict[int, concurrent.futures.Future[ComputationState]] = {}
210
+ with ProcessPoolExecutor(state.concurrencyLimit) as concurrencyManager:
211
+ for indexSherpa in range(state.taskDivisions):
212
+ stateParallel = copy.deepcopy(state)
213
+ stateParallel.taskIndex = indexSherpa
214
+ dictionaryConcurrency[indexSherpa] = concurrencyManager.submit(countParallel, stateParallel)
215
+ for indexSherpa in range(state.taskDivisions):
216
+ state.foldGroups[indexSherpa] = dictionaryConcurrency[indexSherpa].result().foldGroups[indexSherpa]
217
+ return state
207
218
  else:
208
219
  return countSequential(state)
mapFolding/theSSOT.py CHANGED
@@ -1,3 +1,4 @@
1
+ from collections.abc import Callable
1
2
  from importlib import import_module as importlib_import_module
2
3
  from inspect import getfile as inspect_getfile
3
4
  from numpy import dtype, int64 as numpy_int64, int16 as numpy_int16, ndarray, signedinteger
@@ -20,7 +21,9 @@ Identifiers: scope and resolution, LEGB (Local, Enclosing, Global, Builtin)
20
21
 
21
22
  # I _think_, in theSSOT, I have abstracted the flow settings to only these couple of lines:
22
23
  packageFlowSynthetic = 'numba'
24
+ # packageFlowSynthetic = 'multiprocessing'
23
25
  Z0Z_packageFlow = 'algorithm'
26
+ # https://github.com/hunterhogan/mapFolding/issues/4
24
27
  # Z0Z_packageFlow = packageFlowSynthetic
25
28
 
26
29
  # =============================================================================
@@ -29,23 +32,23 @@ Z0Z_packageFlow = 'algorithm'
29
32
 
30
33
  sourceAlgorithmPACKAGING: str = 'theDao'
31
34
  datatypePackagePACKAGING: Final[str] = 'numpy'
32
- dispatcherCallableAsStrPACKAGING: str = 'doTheNeedful'
35
+ dispatcherCallablePACKAGING: str = 'doTheNeedful'
33
36
  moduleOfSyntheticModulesPACKAGING: Final[str] = 'syntheticModules'
34
37
 
35
- dataclassModuleAsStrPACKAGING: str = 'theSSOT'
36
- dataclassIdentifierAsStrPACKAGING: str = 'ComputationState'
37
- dataclassInstanceAsStrPACKAGING: str = 'state'
38
- dataclassInstance_Pre_ParallelAsStrPACKAGING = dataclassInstanceAsStrPACKAGING + 'PARALLEL'
39
- dataclassInstance_Post_ParallelAsStrPACKAGING = dataclassInstanceAsStrPACKAGING + 'COMPLETE'
38
+ dataclassModulePACKAGING: str = 'theSSOT'
39
+ dataclassIdentifierPACKAGING: str = 'ComputationState'
40
+ dataclassInstancePACKAGING: str = 'state'
41
+ dataclassInstance_Pre_ParallelPACKAGING = dataclassInstancePACKAGING + 'PARALLEL'
42
+ dataclassInstance_Post_ParallelPACKAGING = dataclassInstancePACKAGING + 'COMPLETE'
40
43
 
41
- Z0Z_initializeCallableAsStrPACKAGING = 'countInitialize'
42
- Z0Z_sequentialCallableAsStrPACKAGING = 'countSequential'
43
- Z0Z_parallelCallableAsStrPACKAGING = 'countParallel'
44
+ sourceInitializeCallablePACKAGING = 'countInitialize'
45
+ sourceSequentialCallablePACKAGING = 'countSequential'
46
+ sourceParallelCallablePACKAGING = 'countParallel'
44
47
 
45
48
  try:
46
- thePackageNameIsPACKAGING: str = tomli_load(Path("../pyproject.toml").open('rb'))["project"]["name"]
49
+ thePackageNamePACKAGING: str = tomli_load(Path("../pyproject.toml").open('rb'))["project"]["name"]
47
50
  except Exception:
48
- thePackageNameIsPACKAGING: str = "mapFolding"
51
+ thePackageNamePACKAGING: str = "mapFolding"
49
52
 
50
53
  # =============================================================================
51
54
  # The Wrong Way The Wrong Way The Wrong Way The Wrong Way The Wrong Way
@@ -54,7 +57,7 @@ except Exception:
54
57
  fileExtensionINSTALLING: str = '.py'
55
58
 
56
59
  def getPathPackageINSTALLING() -> Path:
57
- pathPackage: Path = Path(inspect_getfile(importlib_import_module(thePackageNameIsPACKAGING)))
60
+ pathPackage: Path = Path(inspect_getfile(importlib_import_module(thePackageNamePACKAGING)))
58
61
  if pathPackage.is_file():
59
62
  pathPackage = pathPackage.parent
60
63
  return pathPackage
@@ -68,7 +71,7 @@ def getPathPackageINSTALLING() -> Path:
68
71
 
69
72
  # =====================
70
73
  # Create enduring identifiers from the hopefully transient identifiers above.
71
- thePackageName: Final[str] = thePackageNameIsPACKAGING
74
+ thePackageName: Final[str] = thePackageNamePACKAGING
72
75
  thePathPackage: Path = getPathPackageINSTALLING()
73
76
 
74
77
  """
@@ -81,24 +84,23 @@ NOTE on semiotics: `theIdentifier` vs `identifier`
81
84
  """
82
85
 
83
86
  theSourceAlgorithm: str = sourceAlgorithmPACKAGING
87
+ theSourceInitializeCallable = sourceInitializeCallablePACKAGING
88
+ theSourceSequentialCallable = sourceSequentialCallablePACKAGING
89
+ theSourceParallelCallable = sourceParallelCallablePACKAGING
84
90
  theDatatypePackage: Final[str] = datatypePackagePACKAGING
85
91
 
86
- theDispatcherCallableAsStr: str = dispatcherCallableAsStrPACKAGING
92
+ theDispatcherCallable: str = dispatcherCallablePACKAGING
87
93
 
88
- theDataclassModuleAsStr: str = dataclassModuleAsStrPACKAGING
89
- theDataclassIdentifierAsStr: str = dataclassIdentifierAsStrPACKAGING
90
- theDataclassInstanceAsStr: str = dataclassInstanceAsStrPACKAGING
91
- theDataclassInstance_Pre_ParallelAsStr: str = dataclassInstance_Pre_ParallelAsStrPACKAGING
92
- theDataclassInstance_Post_ParallelAsStr: str = dataclassInstance_Post_ParallelAsStrPACKAGING
94
+ theDataclassModule: str = dataclassModulePACKAGING
95
+ theDataclassIdentifier: str = dataclassIdentifierPACKAGING
96
+ theDataclassInstance: str = dataclassInstancePACKAGING
97
+ theDataclassInstance_Pre_Parallel: str = dataclassInstance_Pre_ParallelPACKAGING
98
+ theDataclassInstance_Post_Parallel: str = dataclassInstance_Post_ParallelPACKAGING
93
99
 
94
100
  theFileExtension: str = fileExtensionINSTALLING
95
101
 
96
102
  theModuleOfSyntheticModules: Final[str] = moduleOfSyntheticModulesPACKAGING
97
103
 
98
- Z0Z_initializeCallableAsStr = Z0Z_initializeCallableAsStrPACKAGING
99
- Z0Z_sequentialCallableAsStr = Z0Z_sequentialCallableAsStrPACKAGING
100
- Z0Z_parallelCallableAsStr = Z0Z_parallelCallableAsStrPACKAGING
101
-
102
104
  # =============================================================================
103
105
  # The right way.
104
106
  concurrencyPackage: str = Z0Z_packageFlow
@@ -137,6 +139,7 @@ class ComputationState:
137
139
  mapShape: tuple[DatatypeLeavesTotal, ...]
138
140
  leavesTotal: DatatypeLeavesTotal
139
141
  taskDivisions: DatatypeLeavesTotal
142
+ concurrencyLimit: DatatypeElephino
140
143
 
141
144
  connectionGraph: Array3D = dataclasses.field(init=False, metadata={'description': 'A 3D array representing the connection graph of the map.'})
142
145
  dimensionsTotal: DatatypeLeavesTotal = dataclasses.field(init=False)
@@ -199,16 +202,16 @@ class ComputationState:
199
202
 
200
203
  theLogicalPathModuleSourceAlgorithm: str = '.'.join([thePackageName, theSourceAlgorithm])
201
204
  theLogicalPathModuleDispatcher: str = theLogicalPathModuleSourceAlgorithm
202
- theLogicalPathModuleDataclass: str = '.'.join([thePackageName, theDataclassModuleAsStr])
205
+ theLogicalPathModuleDataclass: str = '.'.join([thePackageName, theDataclassModule])
203
206
 
204
207
  def getSourceAlgorithm() -> ModuleType:
205
208
  moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleSourceAlgorithm)
206
209
  return moduleImported
207
210
 
211
+ # dynamically set the return type https://github.com/hunterhogan/mapFolding/issues/5
208
212
  def getAlgorithmDispatcher():
209
213
  moduleImported: ModuleType = getSourceAlgorithm()
210
- # TODO I think I need to use `inspect` to type the return value
211
- dispatcherCallable = getattr(moduleImported, theDispatcherCallableAsStr)
214
+ dispatcherCallable = getattr(moduleImported, theDispatcherCallable)
212
215
  return dispatcherCallable
213
216
 
214
217
  def getPathSyntheticModules() -> Path:
@@ -241,21 +244,33 @@ class FREAKOUT(Exception): pass
241
244
  # Temporary or transient or something; probably still the wrong way
242
245
 
243
246
  # THIS IS A STUPID SYSTEM BUT I CAN'T FIGURE OUT AN IMPROVEMENT
247
+ # NOTE This section for _default_ values probably has value
248
+ # https://github.com/hunterhogan/mapFolding/issues/4
244
249
  theFormatStrModuleSynthetic = "{packageFlow}Count"
245
250
  theFormatStrModuleForCallableSynthetic = theFormatStrModuleSynthetic + "_{callableTarget}"
246
251
 
247
- theModuleDispatcherSynthetic: str = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=theDispatcherCallableAsStr)
252
+ theModuleDispatcherSynthetic: str = theFormatStrModuleForCallableSynthetic.format(packageFlow=packageFlowSynthetic, callableTarget=theDispatcherCallable)
248
253
  theLogicalPathModuleDispatcherSynthetic: str = '.'.join([thePackageName, theModuleOfSyntheticModules, theModuleDispatcherSynthetic])
249
254
 
250
255
  # =============================================================================
251
256
  # The most right way I know how to implement.
252
257
 
258
+ # https://github.com/hunterhogan/mapFolding/issues/4
253
259
  if Z0Z_packageFlow == packageFlowSynthetic: # pyright: ignore [reportUnnecessaryComparison]
260
+ # NOTE this as a default value _might_ have value
254
261
  theLogicalPathModuleDispatcher = theLogicalPathModuleDispatcherSynthetic
255
262
 
256
- def getPackageDispatcher():
257
- moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleDispatcher)
258
- dispatcherCallable = getattr(moduleImported, theDispatcherCallableAsStr)
263
+ # https://github.com/hunterhogan/mapFolding/issues/4
264
+ # dynamically set the return type https://github.com/hunterhogan/mapFolding/issues/5
265
+ def getPackageDispatcher() -> Callable[[ComputationState], ComputationState]:
266
+ # NOTE but this part, if the package flow is synthetic, probably needs to be delegated
267
+ # to the authority for creating _that_ synthetic flow.
268
+
269
+ # Automated system
270
+ # moduleImported: ModuleType = importlib_import_module(theLogicalPathModuleDispatcher)
271
+ # dispatcherCallable = getattr(moduleImported, theDispatcherCallable)
272
+
273
+ # Hardcoded while I am refactoring "someAssemblyRequired"
259
274
  from mapFolding.syntheticModules.numbaCountSequential import flattenData
260
275
  dispatcherCallable = flattenData
261
276
  return dispatcherCallable
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mapFolding
3
- Version: 0.7.0
3
+ Version: 0.7.1
4
4
  Summary: Count distinct ways to fold a map (or a strip of stamps)
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -1,12 +1,12 @@
1
- mapFolding/__init__.py,sha256=dg1pzBxbhIY8FRtS2uphqL7YwUwR5uxZ5epduKhl3Y8,189
2
- mapFolding/basecamp.py,sha256=H-cZeoDhKftRwZu1Q_Xo66lCweVEtB0ChqJY_GNEfdI,3835
3
- mapFolding/beDRY.py,sha256=WsXAPbjFfjO0bVFq8GGbEdEP9lmrYTaZxzIWpO9s3ok,8209
4
- mapFolding/filesystem.py,sha256=TfvvIXnlLU6NNZrrY9T9Wk9cIGksvDjeI-JgsKHdsLY,3767
1
+ mapFolding/__init__.py,sha256=hONqdWnBN1ebgrKZuMIZfI8m-1krSR66L4GTVRwBmw4,203
2
+ mapFolding/basecamp.py,sha256=Ik_oH-MpH8f6k-yBzwm99lfkv3pMDgFFEnSSDyiJjsQ,3861
3
+ mapFolding/beDRY.py,sha256=Icgj8s6Rgd_oIBdvzwr6dovywdvG3sIVELO-n74W-So,9011
4
+ mapFolding/filesystem.py,sha256=KqgsO-jaSWAlYib_9ovoXQY76PcQri09_73u0OqMWC8,4094
5
5
  mapFolding/noHomeYet.py,sha256=HjxLP-7BGVkKL66T50q4BWnC0Cg2gHUeCKMFuwR2mEQ,785
6
6
  mapFolding/oeis.py,sha256=xqnL_VyhwRBET-Kdpnf59K5W5metGbW0JiufTlVw7g0,11853
7
7
  mapFolding/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- mapFolding/theDao.py,sha256=Bcd-VmZfarxdsdyFh40XdURrMPjV2QIO0WoMJ6AS6hw,8054
9
- mapFolding/theSSOT.py,sha256=1-trVx4JHT2twT5gmKY5cCdZ2gLSKjScMBzE9QiyIXo,12594
8
+ mapFolding/theDao.py,sha256=eQvR6lDMW_dI6qhZmkTqv-Eog79ekfQjap9rg8cavE8,8629
9
+ mapFolding/theSSOT.py,sha256=fWEkYyCoioi_jJzu2Qwi11GBIyXBbjcFfKAf3tlL9Aw,13270
10
10
  mapFolding/reference/flattened.py,sha256=S6D9wiFTlbeoetEqaMLOcA-R22BHOzjqPRujffNxxUM,14875
11
11
  mapFolding/reference/hunterNumba.py,sha256=jDS0ORHkIhcJ1rzA5hT49sZHKf3rgJOoGesUCcbKFFY,6054
12
12
  mapFolding/reference/irvineJavaPort.py,sha256=7GvBU0tnS6wpFgkYad3465do9jBQW-2bYvbCYyABPHM,3341
@@ -16,35 +16,36 @@ mapFolding/reference/lunnanNumpy.py,sha256=rwVP3WIDXimpAuaxhRIuBYU56nVDTKlfGiclw
16
16
  mapFolding/reference/lunnanWhile.py,sha256=uRrMT23jTJvoQDlD_FzeIQe_pfMXJG6_bRvs7uhC8z0,3271
17
17
  mapFolding/reference/rotatedEntryPoint.py,sha256=USZY3n3zwhSE68ATscUuN66t1qShuEbMI790Gz9JFTw,9352
18
18
  mapFolding/reference/total_countPlus1vsPlusN.py,sha256=wpgay-uqPOBd64Z4Pg6tg40j7-4pzWHGMM6v0bnmjhE,6288
19
- mapFolding/someAssemblyRequired/Z0Z_workbench.py,sha256=wkTqYlzi-okXloPCg4IHpThq97vuyCzrLSn1wsF6-_0,1627
19
+ mapFolding/someAssemblyRequired/Z0Z_workbench.py,sha256=XibNQRbX5gGfrkWVfRsW66kHlqphcqVCPz7TyJFcs90,1505
20
20
  mapFolding/someAssemblyRequired/__init__.py,sha256=pYkG-1LM8emzTeQbGtOfiZsAiklm5DU92-WE63qB-9s,602
21
21
  mapFolding/someAssemblyRequired/getLLVMforNoReason.py,sha256=sGDzg5HG21Q42M0upRD1NWzOUsd7mZ_9wUKJIQHI13o,1000
22
22
  mapFolding/someAssemblyRequired/ingredientsNumba.py,sha256=HnWgo1KwIoevUrE5A1WZh4AVFXKQllt8L6RL46JTXEg,2787
23
23
  mapFolding/someAssemblyRequired/synthesizeCountingFunctions.py,sha256=77iNPPsN8BmwHaYKdXcDKCwoZKS1g41CJNenQB0W-eg,252
24
- mapFolding/someAssemblyRequired/synthesizeDataConverters.py,sha256=qNedUaXxU107x3OBx1AHcSEQPM3bMe5ym-GfpJ9O0bc,6237
24
+ mapFolding/someAssemblyRequired/synthesizeDataConverters.py,sha256=czo6Y0ABlwye01ShumObAFtkTn7PBx4Qsh9Jna5BMUw,6187
25
25
  mapFolding/someAssemblyRequired/synthesizeNumba.py,sha256=cKXvp0BxmYayUBMQ2RJLkVoJUSvzPI1S4Wy_APRM5cI,4788
26
26
  mapFolding/someAssemblyRequired/synthesizeNumbaJob.py,sha256=w7zLUzLrLXf2N6O75jKvXDoyTJemWB8GOdK4ryqSWbs,22700
27
27
  mapFolding/someAssemblyRequired/synthesizeNumbaModules.py,sha256=zUclYPmAr6X5qFO3nQK1_HHqdiRq6j3EIBXjeax_b8c,4388
28
- mapFolding/someAssemblyRequired/transformationTools.py,sha256=FwaZlwMnfzoGvtax5GQSjkyBWr5atMo2VFxPHaBwuNA,21019
29
- mapFolding/someAssemblyRequired/whatWillBe.py,sha256=ue_NsFN3Z6IxlTgVOaikG8GCiXz9Dk1dvNlypOWMcG0,12188
28
+ mapFolding/someAssemblyRequired/transformationTools.py,sha256=QAheMJgYWI09PBetyhnJqNhJk_hl6Zlu1TBbDjBwFSs,21001
29
+ mapFolding/someAssemblyRequired/whatWillBe.py,sha256=G_r1VD106gTZISyiwhuJosREfRzQd73JLAMpYg8F6O8,14641
30
30
  mapFolding/syntheticModules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  mapFolding/syntheticModules/dataNamespaceFlattened.py,sha256=vsU4PDEMoEcZ424AzsXOJoXoVlX_bSk-pXSTlDsSN9k,2939
32
+ mapFolding/syntheticModules/multiprocessingCount_doTheNeedful.py,sha256=JwZzvF79mvuo2E0Lfs0NeY1QR6JP8FRuwUaRmENwDs8,8482
32
33
  mapFolding/syntheticModules/numbaCount.py,sha256=znFV9RhZ0WqPA-r-DT92WDgZXUyhdQlrsKmnsGySuKc,4561
33
34
  mapFolding/syntheticModules/numbaCountExample.py,sha256=Oo0Xeex89sLD15iRt3N76OKRFScKl9qwO84-d5sv6lM,11871
34
- mapFolding/syntheticModules/numbaCountSequential.py,sha256=iJ-IyzXv0c41uugceJGIHutdzJpkhqbTj0ic_3DqHEM,4326
35
+ mapFolding/syntheticModules/numbaCountSequential.py,sha256=CtSA3UX2JzoN_uSo0SH7E3coKuHBWaTloQZwbExIPBw,4325
35
36
  mapFolding/syntheticModules/numbaCount_doTheNeedful.py,sha256=J9wZ9PW5EVduOQWVze7CgpTHkUjRIvBPG8fNuT3IVpA,1145
36
37
  mapFolding/syntheticModules/numba_doTheNeedful.py,sha256=8kgV2GGjy_nxVi9o_aAtrZzRSxsYr8HVNsVXesf4Kec,767
37
38
  mapFolding/syntheticModules/numba_doTheNeedfulExample.py,sha256=J9wZ9PW5EVduOQWVze7CgpTHkUjRIvBPG8fNuT3IVpA,1145
38
39
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
40
  tests/conftest.py,sha256=oPc1IS4RXhLQIBskhuawi4GEWhy6kXbM6D2MdX9kDhw,10833
40
- tests/test_computations.py,sha256=s4ce78iEa9axlknR83NqwXwluX8Z5HN1GtTJ3RyXuzQ,3240
41
+ tests/test_computations.py,sha256=YhadlskBh_r5RiefHRy0FlrYQ0FelYbqcSNNSkSJMIY,3368
41
42
  tests/test_filesystem.py,sha256=7Ova6f4R6lI9rwnnu8SkZgficTYlKEhSzLp-nBRWESM,3183
42
43
  tests/test_oeis.py,sha256=uxvwmgbnylSDdsVJfuAT0LuYLbIVFwSgdLxHm-xUGBM,5043
43
44
  tests/test_other.py,sha256=CS64h5NACYmXhG3owBpPDcXv3BpYlXNeLqPcqT4quwg,4303
44
- tests/test_tasks.py,sha256=nsrNuGYk49EnTUslZIQTgqEONwuCSlQaPCpoQTkE08k,2849
45
- mapfolding-0.7.0.dist-info/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
46
- mapfolding-0.7.0.dist-info/METADATA,sha256=xR40gKDQ001BELktPxoyqtsdz7b6zFVmqdU8zV2qoRg,7696
47
- mapfolding-0.7.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
48
- mapfolding-0.7.0.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
49
- mapfolding-0.7.0.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
50
- mapfolding-0.7.0.dist-info/RECORD,,
45
+ tests/test_tasks.py,sha256=K5WdCtsM8ghFHX_BMmATsNUTvCICoTHtU-K73QBA4gY,3048
46
+ mapfolding-0.7.1.dist-info/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
47
+ mapfolding-0.7.1.dist-info/METADATA,sha256=czJ_D8mDCOZg1ug6kaIvrwMTKJKh2zfk7Q3FWLJcs6U,7696
48
+ mapfolding-0.7.1.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
49
+ mapfolding-0.7.1.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
50
+ mapfolding-0.7.1.dist-info/top_level.txt,sha256=1gP2vFaqPwHujGwb3UjtMlLEGN-943VSYFR7V4gDqW8,17
51
+ mapfolding-0.7.1.dist-info/RECORD,,
@@ -3,16 +3,20 @@ from mapFolding.filesystem import getPathFilenameFoldsTotal
3
3
  from mapFolding.noHomeYet import getFoldsTotalKnown
4
4
  from mapFolding.oeis import settingsOEIS, oeisIDfor_n
5
5
  # from mapFolding.someAssemblyRequired import writeJobNumba
6
+ from pathlib import Path
6
7
  from tests.conftest import standardizedEqualToCallableReturn, registrarRecordsTmpObject
8
+ from types import ModuleType
7
9
  import importlib.util
10
+ import multiprocessing
8
11
  import pytest
9
- from pathlib import Path
10
- from types import ModuleType
11
12
 
12
- def test_algorithmSourceParallel(listDimensionsTestParallelization, useAlgorithmSourceDispatcher: None) -> None:
13
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum')
13
+ if __name__ == '__main__':
14
+ multiprocessing.set_start_method('spawn')
15
+
16
+ def test_algorithmSourceParallel(listDimensionsTestParallelization: list[int], useAlgorithmSourceDispatcher: None) -> None:
17
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum', None)
14
18
 
15
- def test_algorithmSourceSequential(listDimensionsTestCountFolds, useAlgorithmSourceDispatcher: None) -> None:
19
+ def test_algorithmSourceSequential(listDimensionsTestCountFolds: tuple[int, ...], useAlgorithmSourceDispatcher: None) -> None:
16
20
  standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestCountFolds)), countFolds, listDimensionsTestCountFolds)
17
21
 
18
22
  def test_aOFn_calculate_value(oeisID: str) -> None:
tests/test_tasks.py CHANGED
@@ -1,12 +1,18 @@
1
- from typing import Literal
1
+ from collections.abc import Callable
2
2
  from mapFolding.basecamp import countFolds
3
3
  from mapFolding.beDRY import getTaskDivisions, setCPUlimit, validateListDimensions, getLeavesTotal
4
4
  from mapFolding.noHomeYet import getFoldsTotalKnown
5
5
  from tests.conftest import standardizedEqualToCallableReturn
6
+ from typing import Literal
6
7
  from Z0Z_tools.pytestForYourUse import PytestFor_defineConcurrencyLimit
7
- from collections.abc import Callable
8
+ import multiprocessing
8
9
  import pytest
9
10
 
11
+ # When to use multiprocessing.set_start_method https://github.com/hunterhogan/mapFolding/issues/6
12
+ if __name__ == '__main__':
13
+ multiprocessing.set_start_method('spawn')
14
+
15
+
10
16
  # TODO add a test. `C` = number of logical cores available. `n = C + 1`. Ensure that `[2,n]` is computed correctly.
11
17
  # Or, probably smarter: limit the number of cores, then run a test with C+1.
12
18
 
@@ -14,7 +20,7 @@ def test_countFoldsComputationDivisionsInvalid(mapShapeTestFunctionality: tuple[
14
20
  standardizedEqualToCallableReturn(ValueError, countFolds, mapShapeTestFunctionality, None, {"wrong": "value"})
15
21
 
16
22
  def test_countFoldsComputationDivisionsMaximum(listDimensionsTestParallelization: list[int]) -> None:
17
- standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum')
23
+ standardizedEqualToCallableReturn(getFoldsTotalKnown(tuple(listDimensionsTestParallelization)), countFolds, listDimensionsTestParallelization, None, 'maximum', None)
18
24
 
19
25
  @pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_defineConcurrencyLimit())
20
26
  def test_defineConcurrencyLimit(nameOfTest: str, callablePytest: Callable[[], None]) -> None: