mapFolding 0.8.1__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,211 +1,234 @@
1
+ """
2
+ Comparison of two nearly identical counting implementations with vastly different performance.
3
+
4
+ This file provides a direct comparison between two variants of the map folding algorithm
5
+ that differ only in their approach to incrementing the folding counter. Despite their apparent
6
+ similarity, one implementation demonstrates orders of magnitude better performance than the other.
7
+
8
+ Key characteristics:
9
+ - Both implementations use Numba for performance optimization
10
+ - Both use identical data structures and array initializations
11
+ - `foldings_plus_1`: Increments the counter by 1 for each valid folding
12
+ - `foldings`: Increments the counter by n (total leaves) when certain conditions are met
13
+
14
+ The performance difference illustrates how subtle algorithmic changes can dramatically
15
+ impact computational efficiency, even when the overall algorithm structure remains unchanged.
16
+ This example serves as a compelling demonstration of the importance of algorithm analysis
17
+ and optimization for combinatorial problems.
18
+
19
+ Note: These functions are isolated for educational purposes to highlight the specific
20
+ optimization technique. The main package uses more comprehensive optimizations derived
21
+ from this and other lessons.
22
+ """
23
+
1
24
  from numba import njit
2
25
  import numpy
3
26
 
4
27
  @njit(cache=True)
5
28
  def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
6
- n: int = 1 # Total number of leaves
7
- for dimension in p:
8
- n *= dimension
9
-
10
- d = len(p) # Number of dimensions
11
- # Compute arrays P, C, D as per the algorithm
12
- P = numpy.ones(d + 1, dtype=numpy.int64)
13
- for i in range(1, d + 1):
14
- P[i] = P[i - 1] * p[i - 1]
15
-
16
- # C[i][m] holds the i-th coordinate of leaf m
17
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
18
- for i in range(1, d + 1):
19
- for m in range(1, n + 1):
20
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
21
-
22
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
23
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
24
- for i in range(1, d + 1):
25
- for l in range(1, n + 1):
26
- for m in range(1, l + 1):
27
- delta = C[i][l] - C[i][m]
28
- if delta % 2 == 0:
29
- # If delta is even
30
- if C[i][m] == 1:
31
- D[i][l][m] = m
32
- else:
33
- D[i][l][m] = m - P[i - 1]
34
- else:
35
- # If delta is odd
36
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
37
- D[i][l][m] = m
38
- else:
39
- D[i][l][m] = m + P[i - 1]
40
- # Initialize arrays/lists
41
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
42
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
43
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
44
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
45
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
46
-
47
-
48
- # Initialize variables for backtracking
49
- total_count = 0 # Total number of foldings
50
- g = 0 # Gap index
51
- l = 1 # Current leaf
52
-
53
- # Start backtracking loop
54
- while l > 0:
55
- # If we have processed all leaves, increment total count
56
- if l > n:
57
- total_count += 1
58
- else:
59
- dd = 0 # Number of sections where leaf l is unconstrained
60
- gg = g # Temporary gap index
61
- g = gapter[l - 1] # Reset gap index for current leaf
62
-
63
- # Count possible gaps for leaf l in each section
64
- for i in range(1, d + 1):
65
- if D[i][l][l] == l:
66
- dd += 1
67
- else:
68
- m = D[i][l][l]
69
- while m != l:
70
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
71
- gap[gg] = m
72
- if count[m] == 0:
73
- gg += 1
74
- count[m] += 1
75
- m = D[i][l][B[m]]
76
-
77
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
78
- if dd == d:
79
- for m in range(l):
80
- gap[gg] = m
81
- gg += 1
82
-
83
- # Filter gaps that are common to all sections
84
- for j in range(g, gg):
85
- gap[g] = gap[j]
86
- if count[gap[j]] == d - dd:
87
- g += 1
88
- count[gap[j]] = 0 # Reset count for next iteration
89
-
90
- # Recursive backtracking steps
91
- while l > 0 and g == gapter[l - 1]:
92
- l -= 1
93
- B[A[l]] = B[l]
94
- A[B[l]] = A[l]
95
-
96
- if l > 0:
97
- g -= 1
98
- A[l] = gap[g]
99
- B[l] = B[A[l]]
100
- B[A[l]] = l
101
- A[B[l]] = l
102
- gapter[l] = g # Save current gap index
103
- l += 1 # Move to next leaf
104
-
105
- return total_count
29
+ n: int = 1 # Total number of leaves
30
+ for dimension in p:
31
+ n *= dimension
32
+
33
+ d = len(p) # Number of dimensions
34
+ # Compute arrays P, C, D as per the algorithm
35
+ P = numpy.ones(d + 1, dtype=numpy.int64)
36
+ for i in range(1, d + 1):
37
+ P[i] = P[i - 1] * p[i - 1]
38
+
39
+ # C[i][m] holds the i-th coordinate of leaf m
40
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
41
+ for i in range(1, d + 1):
42
+ for m in range(1, n + 1):
43
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
44
+
45
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
46
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
47
+ for i in range(1, d + 1):
48
+ for l in range(1, n + 1):
49
+ for m in range(1, l + 1):
50
+ delta = C[i][l] - C[i][m]
51
+ if delta % 2 == 0:
52
+ # If delta is even
53
+ if C[i][m] == 1:
54
+ D[i][l][m] = m
55
+ else:
56
+ D[i][l][m] = m - P[i - 1]
57
+ else:
58
+ # If delta is odd
59
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
60
+ D[i][l][m] = m
61
+ else:
62
+ D[i][l][m] = m + P[i - 1]
63
+ # Initialize arrays/lists
64
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
65
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
66
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
67
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
68
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
69
+
70
+
71
+ # Initialize variables for backtracking
72
+ total_count = 0 # Total number of foldings
73
+ g = 0 # Gap index
74
+ l = 1 # Current leaf
75
+
76
+ # Start backtracking loop
77
+ while l > 0:
78
+ # If we have processed all leaves, increment total count
79
+ if l > n:
80
+ total_count += 1
81
+ else:
82
+ dd = 0 # Number of sections where leaf l is unconstrained
83
+ gg = g # Temporary gap index
84
+ g = gapter[l - 1] # Reset gap index for current leaf
85
+
86
+ # Count possible gaps for leaf l in each section
87
+ for i in range(1, d + 1):
88
+ if D[i][l][l] == l:
89
+ dd += 1
90
+ else:
91
+ m = D[i][l][l]
92
+ while m != l:
93
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
94
+ gap[gg] = m
95
+ if count[m] == 0:
96
+ gg += 1
97
+ count[m] += 1
98
+ m = D[i][l][B[m]]
99
+
100
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
101
+ if dd == d:
102
+ for m in range(l):
103
+ gap[gg] = m
104
+ gg += 1
105
+
106
+ # Filter gaps that are common to all sections
107
+ for j in range(g, gg):
108
+ gap[g] = gap[j]
109
+ if count[gap[j]] == d - dd:
110
+ g += 1
111
+ count[gap[j]] = 0 # Reset count for next iteration
112
+
113
+ # Recursive backtracking steps
114
+ while l > 0 and g == gapter[l - 1]:
115
+ l -= 1
116
+ B[A[l]] = B[l]
117
+ A[B[l]] = A[l]
118
+
119
+ if l > 0:
120
+ g -= 1
121
+ A[l] = gap[g]
122
+ B[l] = B[A[l]]
123
+ B[A[l]] = l
124
+ A[B[l]] = l
125
+ gapter[l] = g # Save current gap index
126
+ l += 1 # Move to next leaf
127
+
128
+ return total_count
106
129
 
107
130
  @njit(cache=True)
108
131
  def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
109
- n: int = 1 # Total number of leaves
110
- for dimension in p:
111
- n *= dimension
112
-
113
- d = len(p) # Number of dimensions
114
- # Compute arrays P, C, D as per the algorithm
115
- P = numpy.ones(d + 1, dtype=numpy.int64)
116
- for i in range(1, d + 1):
117
- P[i] = P[i - 1] * p[i - 1]
118
-
119
- # C[i][m] holds the i-th coordinate of leaf m
120
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
121
- for i in range(1, d + 1):
122
- for m in range(1, n + 1):
123
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
124
- # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
125
-
126
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
127
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
128
- for i in range(1, d + 1):
129
- for l in range(1, n + 1):
130
- for m in range(1, l + 1):
131
- delta = C[i][l] - C[i][m]
132
- if delta % 2 == 0:
133
- # If delta is even
134
- if C[i][m] == 1:
135
- D[i][l][m] = m
136
- else:
137
- D[i][l][m] = m - P[i - 1]
138
- else:
139
- # If delta is odd
140
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
141
- D[i][l][m] = m
142
- else:
143
- D[i][l][m] = m + P[i - 1]
144
- # Initialize arrays/lists
145
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
146
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
147
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
148
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
149
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
150
-
151
-
152
- # Initialize variables for backtracking
153
- total_count = 0 # Total number of foldings
154
- g = 0 # Gap index
155
- l = 1 # Current leaf
156
-
157
- # Start backtracking loop
158
- while l > 0:
159
- if l <= 1 or B[0] == 1: # NOTE different
160
- # NOTE the above `if` statement encloses the the if/else block below
161
- # NOTE these changes increase the throughput by more than an order of magnitude
162
- if l > n:
163
- total_count += n
164
- else:
165
- dd = 0 # Number of sections where leaf l is unconstrained
166
- gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
167
- g = gg # NOTE different, but not important
168
-
169
- # Count possible gaps for leaf l in each section
170
- for i in range(1, d + 1):
171
- if D[i][l][l] == l:
172
- dd += 1
173
- else:
174
- m = D[i][l][l]
175
- while m != l:
176
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
177
- gap[gg] = m
178
- if count[m] == 0:
179
- gg += 1
180
- count[m] += 1
181
- m = D[i][l][B[m]]
182
-
183
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
184
- if dd == d:
185
- for m in range(l):
186
- gap[gg] = m
187
- gg += 1
188
-
189
- # Filter gaps that are common to all sections
190
- for j in range(g, gg):
191
- gap[g] = gap[j]
192
- if count[gap[j]] == d - dd:
193
- g += 1
194
- count[gap[j]] = 0 # Reset count for next iteration
195
-
196
- # Recursive backtracking steps
197
- while l > 0 and g == gapter[l - 1]:
198
- l -= 1
199
- B[A[l]] = B[l]
200
- A[B[l]] = A[l]
201
-
202
- if l > 0:
203
- g -= 1
204
- A[l] = gap[g]
205
- B[l] = B[A[l]]
206
- B[A[l]] = l
207
- A[B[l]] = l
208
- gapter[l] = g # Save current gap index
209
- l += 1 # Move to next leaf
210
-
211
- return total_count
132
+ n: int = 1 # Total number of leaves
133
+ for dimension in p:
134
+ n *= dimension
135
+
136
+ d = len(p) # Number of dimensions
137
+ # Compute arrays P, C, D as per the algorithm
138
+ P = numpy.ones(d + 1, dtype=numpy.int64)
139
+ for i in range(1, d + 1):
140
+ P[i] = P[i - 1] * p[i - 1]
141
+
142
+ # C[i][m] holds the i-th coordinate of leaf m
143
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
144
+ for i in range(1, d + 1):
145
+ for m in range(1, n + 1):
146
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
147
+ # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
148
+
149
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
150
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
151
+ for i in range(1, d + 1):
152
+ for l in range(1, n + 1):
153
+ for m in range(1, l + 1):
154
+ delta = C[i][l] - C[i][m]
155
+ if delta % 2 == 0:
156
+ # If delta is even
157
+ if C[i][m] == 1:
158
+ D[i][l][m] = m
159
+ else:
160
+ D[i][l][m] = m - P[i - 1]
161
+ else:
162
+ # If delta is odd
163
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
164
+ D[i][l][m] = m
165
+ else:
166
+ D[i][l][m] = m + P[i - 1]
167
+ # Initialize arrays/lists
168
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
169
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
170
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
171
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
172
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
173
+
174
+
175
+ # Initialize variables for backtracking
176
+ total_count = 0 # Total number of foldings
177
+ g = 0 # Gap index
178
+ l = 1 # Current leaf
179
+
180
+ # Start backtracking loop
181
+ while l > 0:
182
+ if l <= 1 or B[0] == 1: # NOTE different
183
+ # NOTE the above `if` statement encloses the the if/else block below
184
+ # NOTE these changes increase the throughput by more than an order of magnitude
185
+ if l > n:
186
+ total_count += n
187
+ else:
188
+ dd = 0 # Number of sections where leaf l is unconstrained
189
+ gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
190
+ g = gg # NOTE different, but not important
191
+
192
+ # Count possible gaps for leaf l in each section
193
+ for i in range(1, d + 1):
194
+ if D[i][l][l] == l:
195
+ dd += 1
196
+ else:
197
+ m = D[i][l][l]
198
+ while m != l:
199
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
200
+ gap[gg] = m
201
+ if count[m] == 0:
202
+ gg += 1
203
+ count[m] += 1
204
+ m = D[i][l][B[m]]
205
+
206
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
207
+ if dd == d:
208
+ for m in range(l):
209
+ gap[gg] = m
210
+ gg += 1
211
+
212
+ # Filter gaps that are common to all sections
213
+ for j in range(g, gg):
214
+ gap[g] = gap[j]
215
+ if count[gap[j]] == d - dd:
216
+ g += 1
217
+ count[gap[j]] = 0 # Reset count for next iteration
218
+
219
+ # Recursive backtracking steps
220
+ while l > 0 and g == gapter[l - 1]:
221
+ l -= 1
222
+ B[A[l]] = B[l]
223
+ A[B[l]] = A[l]
224
+
225
+ if l > 0:
226
+ g -= 1
227
+ A[l] = gap[g]
228
+ B[l] = B[A[l]]
229
+ B[A[l]] = l
230
+ A[B[l]] = l
231
+ gapter[l] = g # Save current gap index
232
+ l += 1 # Move to next leaf
233
+
234
+ return total_count
@@ -77,18 +77,13 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
77
77
  shatteredDataclass = shatter_dataclassesDOTdataclass(numbaFlow.logicalPathModuleDataclass, numbaFlow.sourceDataclassIdentifier, numbaFlow.sourceDataclassInstanceTaskDistribution)
78
78
  ingredientsDispatcher.imports.update(shatteredDataclass.ledgerDataclassANDFragments)
79
79
 
80
- # TODO remove hardcoding
81
- namespaceHARDCODED = 'concurrencyManager'
82
- identifierHARDCODED = 'submit'
83
- sourceNamespace = namespaceHARDCODED
84
- sourceIdentifier = identifierHARDCODED
85
80
  NodeReplacer(
86
- findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
81
+ findThis = ifThis.isAssignAndValueIsCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
87
82
  , doThat = Then.insertThisAbove(shatteredDataclass.listAnnAssign4DataclassUnpack)
88
83
  ).visit(ingredientsDispatcher.astFunctionDef)
89
84
  NodeReplacer(
90
- findThis = ifThis.isCallNamespace_Identifier(sourceNamespace, sourceIdentifier)
91
- , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(sourceNamespace), sourceIdentifier)
85
+ findThis = ifThis.isCallNamespace_Identifier(numbaFlow.sourceConcurrencyManagerNamespace, numbaFlow.sourceConcurrencyManagerIdentifier)
86
+ , doThat = Then.replaceWith(Make.astCall(Make.astAttribute(Make.astName(numbaFlow.sourceConcurrencyManagerNamespace), numbaFlow.sourceConcurrencyManagerIdentifier)
92
87
  , listArguments=[Make.astName(numbaFlow.parallelCallable)] + shatteredDataclass.listNameDataclassFragments4Parameters))
93
88
  ).visit(ingredientsDispatcher.astFunctionDef)
94
89
 
@@ -133,6 +128,8 @@ def makeNumbaFlow(numbaFlow: RecipeSynthesizeFlow = RecipeSynthesizeFlow()) -> N
133
128
  , doThat = Then.replaceWith(Make.astAssign(listTargets=[shatteredDataclass.astTuple4AssignTargetsToFragments], value=Make.astCall(Make.astName(numbaFlow.sequentialCallable), shatteredDataclass.listNameDataclassFragments4Parameters)))
134
129
  ).visit(ingredientsDispatcher.astFunctionDef)
135
130
 
131
+ ingredientsDispatcher.astFunctionDef.name = numbaFlow.dispatcherCallable
132
+
136
133
  # ===========================================================
137
134
  sourcePython = numbaFlow.sourceInitializeCallable
138
135
  astFunctionDef = extractFunctionDef(sourcePython, numbaFlow.source_astModule)
@@ -21,7 +21,7 @@ designed to be applicable to various data structure transformation scenarios.
21
21
  """
22
22
 
23
23
  from collections.abc import Sequence
24
- from importlib import import_module
24
+ from importlib import import_module as importlib_import_module
25
25
  from inspect import getsource as inspect_getsource
26
26
  from mapFolding.beDRY import outfitCountFolds, validateListDimensions
27
27
  from mapFolding.filesystem import getPathFilenameFoldsTotal
@@ -36,7 +36,7 @@ from mapFolding.someAssemblyRequired import (
36
36
  Then,
37
37
  Z0Z_executeActionUnlessDescendantMatches,
38
38
  )
39
- from mapFolding.theSSOT import ComputationState, getSourceAlgorithm
39
+ from mapFolding.theSSOT import ComputationState, The
40
40
  from pathlib import Path
41
41
  from types import ModuleType
42
42
  from typing import Any, Literal, overload
@@ -69,7 +69,9 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, da
69
69
  dataclass_Identifier: The identifier of the dataclass to be dismantled.
70
70
  instance_Identifier: In the synthesized module/function/scope, the identifier that will be used for the instance.
71
71
  """
72
- module: ast.Module = ast.parse(inspect_getsource(import_module(logicalPathModule)))
72
+ # TODO learn whether dataclasses.make_dataclass would be useful to transform the target dataclass into the `ShatteredDataclass`
73
+
74
+ module: ast.Module = ast.parse(inspect_getsource(importlib_import_module(logicalPathModule)))
73
75
  astName_dataclassesDOTdataclass = Make.astName(dataclass_Identifier)
74
76
 
75
77
  dataclass = extractClassDef(dataclass_Identifier, module)
@@ -122,6 +124,10 @@ def shatter_dataclassesDOTdataclass(logicalPathModule: strDotStrCuzPyStoopid, da
122
124
  shatteredDataclass.ledgerDataclassANDFragments.addImportFromStr(logicalPathModule, dataclass_Identifier)
123
125
  return shatteredDataclass
124
126
 
127
+ def getSourceAlgorithmVESTIGIAL() -> ModuleType:
128
+ moduleImported: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
129
+ return moduleImported
130
+
125
131
  @overload
126
132
  def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: Literal[True], **keywordArguments: Any) -> Path: ...
127
133
  @overload
@@ -145,7 +151,7 @@ def makeStateJobOUTDATED(listDimensions: Sequence[int], *, writeJob: bool = True
145
151
  mapShape = validateListDimensions(listDimensions)
146
152
  stateUniversal: ComputationState = outfitCountFolds(mapShape, **keywordArguments)
147
153
 
148
- moduleSource: ModuleType = getSourceAlgorithm()
154
+ moduleSource: ModuleType = getSourceAlgorithmVESTIGIAL()
149
155
  # TODO `countInitialize` is hardcoded
150
156
  stateUniversal = moduleSource.countInitialize(stateUniversal)
151
157
 
@@ -26,27 +26,16 @@ from autoflake import fix_code as autoflake_fix_code
26
26
  from collections import defaultdict
27
27
  from collections.abc import Callable, Container, Sequence
28
28
  from copy import deepcopy
29
+ from importlib import import_module as importlib_import_module
29
30
  from inspect import getsource as inspect_getsource
30
31
  from mapFolding.filesystem import writeStringToHere
31
32
  from mapFolding.theSSOT import (
32
- getSourceAlgorithm,
33
33
  raiseIfNoneGitHubIssueNumber3,
34
- theDataclassIdentifier,
35
- theDataclassInstance,
36
- theDataclassInstanceTaskDistribution,
37
- theDispatcherCallable,
38
- theFileExtension,
34
+ The,
39
35
  theFormatStrModuleForCallableSynthetic,
40
36
  theFormatStrModuleSynthetic,
41
- theLogicalPathModuleDataclass,
42
37
  theLogicalPathModuleDispatcherSynthetic,
43
38
  theModuleDispatcherSynthetic,
44
- theModuleOfSyntheticModules,
45
- thePackageName,
46
- thePathPackage,
47
- theSourceInitializeCallable,
48
- theSourceParallelCallable,
49
- theSourceSequentialCallable,
50
39
  )
51
40
  from os import PathLike
52
41
  from pathlib import Path, PurePath, PurePosixPath
@@ -563,25 +552,27 @@ class RecipeSynthesizeFlow:
563
552
  """Settings for synthesizing flow."""
564
553
  # ========================================
565
554
  # Source
566
- sourceAlgorithm: ModuleType = getSourceAlgorithm()
555
+ sourceAlgorithm: ModuleType = importlib_import_module(The.logicalPathModuleSourceAlgorithm)
567
556
  sourcePython: str = inspect_getsource(sourceAlgorithm)
568
557
  source_astModule: ast.Module = ast.parse(sourcePython)
569
558
 
570
559
  # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
571
- sourceDispatcherCallable: str = theDispatcherCallable
572
- sourceInitializeCallable: str = theSourceInitializeCallable
573
- sourceParallelCallable: str = theSourceParallelCallable
574
- sourceSequentialCallable: str = theSourceSequentialCallable
575
-
576
- sourceDataclassIdentifier: str = theDataclassIdentifier
577
- sourceDataclassInstance: str = theDataclassInstance
578
- sourceDataclassInstanceTaskDistribution: str = theDataclassInstanceTaskDistribution
579
- sourcePathModuleDataclass: str = theLogicalPathModuleDataclass
580
-
560
+ sourceDispatcherCallable: str = The.dispatcherCallable
561
+ sourceInitializeCallable: str = The.sourceInitializeCallable
562
+ sourceParallelCallable: str = The.sourceParallelCallable
563
+ sourceSequentialCallable: str = The.sourceSequentialCallable
564
+
565
+ sourceDataclassIdentifier: str = The.dataclassIdentifier
566
+ sourceDataclassInstance: str = The.dataclassInstance
567
+ sourceDataclassInstanceTaskDistribution: str = The.dataclassInstanceTaskDistribution
568
+ sourcePathModuleDataclass: str = The.logicalPathModuleDataclass
569
+
570
+ sourceConcurrencyManagerNamespace = The.sourceConcurrencyManagerNamespace
571
+ sourceConcurrencyManagerIdentifier = The.sourceConcurrencyManagerIdentifier
581
572
  # ========================================
582
573
  # Filesystem
583
- pathPackage: PurePosixPath | None = PurePosixPath(thePathPackage)
584
- fileExtension: str = theFileExtension
574
+ pathPackage: PurePosixPath | None = PurePosixPath(The.pathPackage)
575
+ fileExtension: str = The.fileExtension
585
576
 
586
577
  # ========================================
587
578
  # Logical identifiers
@@ -590,11 +581,11 @@ class RecipeSynthesizeFlow:
590
581
  formatStrModuleForCallableSynthetic: str = theFormatStrModuleForCallableSynthetic
591
582
 
592
583
  # Package
593
- packageName: ast_Identifier | None = thePackageName
584
+ packageName: ast_Identifier | None = The.packageName
594
585
 
595
586
  # Module
596
587
  # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4
597
- Z0Z_flowLogicalPathRoot: str = theModuleOfSyntheticModules
588
+ Z0Z_flowLogicalPathRoot: str | None = The.moduleOfSyntheticModules
598
589
  moduleDispatcher: str = theModuleDispatcherSynthetic
599
590
  logicalPathModuleDataclass: str = sourcePathModuleDataclass
600
591
  # Figure out dynamic flow control to synthesized modules https://github.com/hunterhogan/mapFolding/issues/4