mapFolding 0.3.12__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. mapFolding/__init__.py +40 -38
  2. mapFolding/basecamp.py +50 -50
  3. mapFolding/beDRY.py +336 -336
  4. mapFolding/oeis.py +262 -262
  5. mapFolding/reference/flattened.py +294 -293
  6. mapFolding/reference/hunterNumba.py +126 -126
  7. mapFolding/reference/irvineJavaPort.py +99 -99
  8. mapFolding/reference/jax.py +153 -153
  9. mapFolding/reference/lunnan.py +148 -148
  10. mapFolding/reference/lunnanNumpy.py +115 -115
  11. mapFolding/reference/lunnanWhile.py +114 -114
  12. mapFolding/reference/rotatedEntryPoint.py +183 -183
  13. mapFolding/reference/total_countPlus1vsPlusN.py +203 -203
  14. mapFolding/someAssemblyRequired/__init__.py +2 -1
  15. mapFolding/someAssemblyRequired/getLLVMforNoReason.py +12 -12
  16. mapFolding/someAssemblyRequired/makeJob.py +48 -48
  17. mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +17 -17
  18. mapFolding/someAssemblyRequired/synthesizeNumba.py +343 -633
  19. mapFolding/someAssemblyRequired/synthesizeNumbaGeneralized.py +371 -0
  20. mapFolding/someAssemblyRequired/synthesizeNumbaJob.py +150 -0
  21. mapFolding/someAssemblyRequired/synthesizeNumbaModules.py +75 -0
  22. mapFolding/syntheticModules/__init__.py +0 -0
  23. mapFolding/syntheticModules/numba_countInitialize.py +3 -3
  24. mapFolding/syntheticModules/numba_countParallel.py +3 -3
  25. mapFolding/syntheticModules/numba_countSequential.py +3 -3
  26. mapFolding/syntheticModules/numba_doTheNeedful.py +6 -6
  27. mapFolding/theDao.py +165 -165
  28. mapFolding/theSSOT.py +176 -172
  29. mapFolding/theSSOTnumba.py +90 -74
  30. mapFolding-0.4.0.dist-info/METADATA +122 -0
  31. mapFolding-0.4.0.dist-info/RECORD +41 -0
  32. tests/conftest.py +238 -128
  33. tests/test_oeis.py +80 -80
  34. tests/test_other.py +137 -224
  35. tests/test_tasks.py +21 -21
  36. tests/test_types.py +2 -2
  37. mapFolding/someAssemblyRequired/synthesizeNumbaHardcoding.py +0 -188
  38. mapFolding-0.3.12.dist-info/METADATA +0 -155
  39. mapFolding-0.3.12.dist-info/RECORD +0 -40
  40. tests/conftest_tmpRegistry.py +0 -62
  41. tests/conftest_uniformTests.py +0 -53
  42. {mapFolding-0.3.12.dist-info → mapFolding-0.4.0.dist-info}/LICENSE +0 -0
  43. {mapFolding-0.3.12.dist-info → mapFolding-0.4.0.dist-info}/WHEEL +0 -0
  44. {mapFolding-0.3.12.dist-info → mapFolding-0.4.0.dist-info}/entry_points.txt +0 -0
  45. {mapFolding-0.3.12.dist-info → mapFolding-0.4.0.dist-info}/top_level.txt +0 -0
@@ -3,209 +3,209 @@ import numpy
3
3
 
4
4
  @njit(cache=True)
5
5
  def foldings_plus_1(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
6
- n: int = 1 # Total number of leaves
7
- for dimension in p:
8
- n *= dimension
9
-
10
- d = len(p) # Number of dimensions
11
- # Compute arrays P, C, D as per the algorithm
12
- P = numpy.ones(d + 1, dtype=numpy.int64)
13
- for i in range(1, d + 1):
14
- P[i] = P[i - 1] * p[i - 1]
15
-
16
- # C[i][m] holds the i-th coordinate of leaf m
17
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
18
- for i in range(1, d + 1):
19
- for m in range(1, n + 1):
20
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
21
-
22
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
23
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
24
- for i in range(1, d + 1):
25
- for l in range(1, n + 1):
26
- for m in range(1, l + 1):
27
- delta = C[i][l] - C[i][m]
28
- if delta % 2 == 0:
29
- # If delta is even
30
- if C[i][m] == 1:
31
- D[i][l][m] = m
32
- else:
33
- D[i][l][m] = m - P[i - 1]
34
- else:
35
- # If delta is odd
36
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
37
- D[i][l][m] = m
38
- else:
39
- D[i][l][m] = m + P[i - 1]
40
- # Initialize arrays/lists
41
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
42
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
43
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
44
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
45
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
46
-
47
-
48
- # Initialize variables for backtracking
49
- total_count = 0 # Total number of foldings
50
- g = 0 # Gap index
51
- l = 1 # Current leaf
52
-
53
- # Start backtracking loop
54
- while l > 0:
55
- # If we have processed all leaves, increment total count
56
- if l > n:
57
- total_count += 1
58
- else:
59
- dd = 0 # Number of sections where leaf l is unconstrained
60
- gg = g # Temporary gap index
61
- g = gapter[l - 1] # Reset gap index for current leaf
62
-
63
- # Count possible gaps for leaf l in each section
64
- for i in range(1, d + 1):
65
- if D[i][l][l] == l:
66
- dd += 1
67
- else:
68
- m = D[i][l][l]
69
- while m != l:
70
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
71
- gap[gg] = m
72
- if count[m] == 0:
73
- gg += 1
74
- count[m] += 1
75
- m = D[i][l][B[m]]
76
-
77
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
78
- if dd == d:
79
- for m in range(l):
80
- gap[gg] = m
81
- gg += 1
82
-
83
- # Filter gaps that are common to all sections
84
- for j in range(g, gg):
85
- gap[g] = gap[j]
86
- if count[gap[j]] == d - dd:
87
- g += 1
88
- count[gap[j]] = 0 # Reset count for next iteration
89
-
90
- # Recursive backtracking steps
91
- while l > 0 and g == gapter[l - 1]:
92
- l -= 1
93
- B[A[l]] = B[l]
94
- A[B[l]] = A[l]
95
-
96
- if l > 0:
97
- g -= 1
98
- A[l] = gap[g]
99
- B[l] = B[A[l]]
100
- B[A[l]] = l
101
- A[B[l]] = l
102
- gapter[l] = g # Save current gap index
103
- l += 1 # Move to next leaf
104
-
105
- return total_count
6
+ n: int = 1 # Total number of leaves
7
+ for dimension in p:
8
+ n *= dimension
9
+
10
+ d = len(p) # Number of dimensions
11
+ # Compute arrays P, C, D as per the algorithm
12
+ P = numpy.ones(d + 1, dtype=numpy.int64)
13
+ for i in range(1, d + 1):
14
+ P[i] = P[i - 1] * p[i - 1]
15
+
16
+ # C[i][m] holds the i-th coordinate of leaf m
17
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
18
+ for i in range(1, d + 1):
19
+ for m in range(1, n + 1):
20
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
21
+
22
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
23
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
24
+ for i in range(1, d + 1):
25
+ for l in range(1, n + 1):
26
+ for m in range(1, l + 1):
27
+ delta = C[i][l] - C[i][m]
28
+ if delta % 2 == 0:
29
+ # If delta is even
30
+ if C[i][m] == 1:
31
+ D[i][l][m] = m
32
+ else:
33
+ D[i][l][m] = m - P[i - 1]
34
+ else:
35
+ # If delta is odd
36
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
37
+ D[i][l][m] = m
38
+ else:
39
+ D[i][l][m] = m + P[i - 1]
40
+ # Initialize arrays/lists
41
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
42
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
43
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
44
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
45
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
46
+
47
+
48
+ # Initialize variables for backtracking
49
+ total_count = 0 # Total number of foldings
50
+ g = 0 # Gap index
51
+ l = 1 # Current leaf
52
+
53
+ # Start backtracking loop
54
+ while l > 0:
55
+ # If we have processed all leaves, increment total count
56
+ if l > n:
57
+ total_count += 1
58
+ else:
59
+ dd = 0 # Number of sections where leaf l is unconstrained
60
+ gg = g # Temporary gap index
61
+ g = gapter[l - 1] # Reset gap index for current leaf
62
+
63
+ # Count possible gaps for leaf l in each section
64
+ for i in range(1, d + 1):
65
+ if D[i][l][l] == l:
66
+ dd += 1
67
+ else:
68
+ m = D[i][l][l]
69
+ while m != l:
70
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
71
+ gap[gg] = m
72
+ if count[m] == 0:
73
+ gg += 1
74
+ count[m] += 1
75
+ m = D[i][l][B[m]]
76
+
77
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
78
+ if dd == d:
79
+ for m in range(l):
80
+ gap[gg] = m
81
+ gg += 1
82
+
83
+ # Filter gaps that are common to all sections
84
+ for j in range(g, gg):
85
+ gap[g] = gap[j]
86
+ if count[gap[j]] == d - dd:
87
+ g += 1
88
+ count[gap[j]] = 0 # Reset count for next iteration
89
+
90
+ # Recursive backtracking steps
91
+ while l > 0 and g == gapter[l - 1]:
92
+ l -= 1
93
+ B[A[l]] = B[l]
94
+ A[B[l]] = A[l]
95
+
96
+ if l > 0:
97
+ g -= 1
98
+ A[l] = gap[g]
99
+ B[l] = B[A[l]]
100
+ B[A[l]] = l
101
+ A[B[l]] = l
102
+ gapter[l] = g # Save current gap index
103
+ l += 1 # Move to next leaf
104
+
105
+ return total_count
106
106
 
107
107
  @njit(cache=True)
108
108
  def foldings(p: list[int], computationDivisions: int = 0, computationIndex: int = 0) -> int:
109
- n: int = 1 # Total number of leaves
110
- for dimension in p:
111
- n *= dimension
112
-
113
- d = len(p) # Number of dimensions
114
- # Compute arrays P, C, D as per the algorithm
115
- P = numpy.ones(d + 1, dtype=numpy.int64)
116
- for i in range(1, d + 1):
117
- P[i] = P[i - 1] * p[i - 1]
118
-
119
- # C[i][m] holds the i-th coordinate of leaf m
120
- C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
121
- for i in range(1, d + 1):
122
- for m in range(1, n + 1):
123
- C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
124
- # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
125
-
126
- # D[i][l][m] computes the leaf connected to m in section i when inserting l
127
- D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
128
- for i in range(1, d + 1):
129
- for l in range(1, n + 1):
130
- for m in range(1, l + 1):
131
- delta = C[i][l] - C[i][m]
132
- if delta % 2 == 0:
133
- # If delta is even
134
- if C[i][m] == 1:
135
- D[i][l][m] = m
136
- else:
137
- D[i][l][m] = m - P[i - 1]
138
- else:
139
- # If delta is odd
140
- if C[i][m] == p[i - 1] or m + P[i - 1] > l:
141
- D[i][l][m] = m
142
- else:
143
- D[i][l][m] = m + P[i - 1]
144
- # Initialize arrays/lists
145
- A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
146
- B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
147
- count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
148
- gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
149
- gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
150
-
151
-
152
- # Initialize variables for backtracking
153
- total_count = 0 # Total number of foldings
154
- g = 0 # Gap index
155
- l = 1 # Current leaf
156
-
157
- # Start backtracking loop
158
- while l > 0:
159
- if l <= 1 or B[0] == 1: # NOTE different
160
- # NOTE the above `if` statement encloses the the if/else block below
161
- # NOTE these changes increase the throughput by more than an order of magnitude
162
- if l > n:
163
- total_count += n
164
- else:
165
- dd = 0 # Number of sections where leaf l is unconstrained
166
- gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
167
- g = gg # NOTE different, but not important
168
-
169
- # Count possible gaps for leaf l in each section
170
- for i in range(1, d + 1):
171
- if D[i][l][l] == l:
172
- dd += 1
173
- else:
174
- m = D[i][l][l]
175
- while m != l:
176
- if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
177
- gap[gg] = m
178
- if count[m] == 0:
179
- gg += 1
180
- count[m] += 1
181
- m = D[i][l][B[m]]
182
-
183
- # If leaf l is unconstrained in all sections, it can be inserted anywhere
184
- if dd == d:
185
- for m in range(l):
186
- gap[gg] = m
187
- gg += 1
188
-
189
- # Filter gaps that are common to all sections
190
- for j in range(g, gg):
191
- gap[g] = gap[j]
192
- if count[gap[j]] == d - dd:
193
- g += 1
194
- count[gap[j]] = 0 # Reset count for next iteration
195
-
196
- # Recursive backtracking steps
197
- while l > 0 and g == gapter[l - 1]:
198
- l -= 1
199
- B[A[l]] = B[l]
200
- A[B[l]] = A[l]
201
-
202
- if l > 0:
203
- g -= 1
204
- A[l] = gap[g]
205
- B[l] = B[A[l]]
206
- B[A[l]] = l
207
- A[B[l]] = l
208
- gapter[l] = g # Save current gap index
209
- l += 1 # Move to next leaf
210
-
211
- return total_count
109
+ n: int = 1 # Total number of leaves
110
+ for dimension in p:
111
+ n *= dimension
112
+
113
+ d = len(p) # Number of dimensions
114
+ # Compute arrays P, C, D as per the algorithm
115
+ P = numpy.ones(d + 1, dtype=numpy.int64)
116
+ for i in range(1, d + 1):
117
+ P[i] = P[i - 1] * p[i - 1]
118
+
119
+ # C[i][m] holds the i-th coordinate of leaf m
120
+ C = numpy.zeros((d + 1, n + 1), dtype=numpy.int64)
121
+ for i in range(1, d + 1):
122
+ for m in range(1, n + 1):
123
+ C[i][m] = ((m - 1) // P[i - 1]) - ((m - 1) // P[i]) * p[i - 1] + 1
124
+ # C[i][m] = ((m - 1) // P[i - 1]) % p[i - 1] + 1 # NOTE different, but either one works
125
+
126
+ # D[i][l][m] computes the leaf connected to m in section i when inserting l
127
+ D = numpy.zeros((d + 1, n + 1, n + 1), dtype=numpy.int64)
128
+ for i in range(1, d + 1):
129
+ for l in range(1, n + 1):
130
+ for m in range(1, l + 1):
131
+ delta = C[i][l] - C[i][m]
132
+ if delta % 2 == 0:
133
+ # If delta is even
134
+ if C[i][m] == 1:
135
+ D[i][l][m] = m
136
+ else:
137
+ D[i][l][m] = m - P[i - 1]
138
+ else:
139
+ # If delta is odd
140
+ if C[i][m] == p[i - 1] or m + P[i - 1] > l:
141
+ D[i][l][m] = m
142
+ else:
143
+ D[i][l][m] = m + P[i - 1]
144
+ # Initialize arrays/lists
145
+ A = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf above leaf m
146
+ B = numpy.zeros(n + 1, dtype=numpy.int64) # Leaf below leaf m
147
+ count = numpy.zeros(n + 1, dtype=numpy.int64) # Counts for potential gaps
148
+ gapter = numpy.zeros(n + 1, dtype=numpy.int64) # Indices for gap stack per leaf
149
+ gap = numpy.zeros(n * n + 1, dtype=numpy.int64) # Stack of potential gaps
150
+
151
+
152
+ # Initialize variables for backtracking
153
+ total_count = 0 # Total number of foldings
154
+ g = 0 # Gap index
155
+ l = 1 # Current leaf
156
+
157
+ # Start backtracking loop
158
+ while l > 0:
159
+ if l <= 1 or B[0] == 1: # NOTE different
160
+ # NOTE the above `if` statement encloses the the if/else block below
161
+ # NOTE these changes increase the throughput by more than an order of magnitude
162
+ if l > n:
163
+ total_count += n
164
+ else:
165
+ dd = 0 # Number of sections where leaf l is unconstrained
166
+ gg = gapter[l - 1] # Track possible gaps # NOTE different, but not important
167
+ g = gg # NOTE different, but not important
168
+
169
+ # Count possible gaps for leaf l in each section
170
+ for i in range(1, d + 1):
171
+ if D[i][l][l] == l:
172
+ dd += 1
173
+ else:
174
+ m = D[i][l][l]
175
+ while m != l:
176
+ if computationDivisions == 0 or l != computationDivisions or m % computationDivisions == computationIndex:
177
+ gap[gg] = m
178
+ if count[m] == 0:
179
+ gg += 1
180
+ count[m] += 1
181
+ m = D[i][l][B[m]]
182
+
183
+ # If leaf l is unconstrained in all sections, it can be inserted anywhere
184
+ if dd == d:
185
+ for m in range(l):
186
+ gap[gg] = m
187
+ gg += 1
188
+
189
+ # Filter gaps that are common to all sections
190
+ for j in range(g, gg):
191
+ gap[g] = gap[j]
192
+ if count[gap[j]] == d - dd:
193
+ g += 1
194
+ count[gap[j]] = 0 # Reset count for next iteration
195
+
196
+ # Recursive backtracking steps
197
+ while l > 0 and g == gapter[l - 1]:
198
+ l -= 1
199
+ B[A[l]] = B[l]
200
+ A[B[l]] = A[l]
201
+
202
+ if l > 0:
203
+ g -= 1
204
+ A[l] = gap[g]
205
+ B[l] = B[A[l]]
206
+ B[A[l]] = l
207
+ A[B[l]] = l
208
+ gapter[l] = g # Save current gap index
209
+ l += 1 # Move to next leaf
210
+
211
+ return total_count
@@ -1 +1,2 @@
1
- from .makeJob import makeStateJob
1
+ from mapFolding.someAssemblyRequired.getLLVMforNoReason import writeModuleLLVM
2
+ from mapFolding.someAssemblyRequired.makeJob import makeStateJob
@@ -4,16 +4,16 @@ import llvmlite.binding
4
4
  import pathlib
5
5
 
6
6
  def writeModuleLLVM(pathFilename: pathlib.Path, identifierCallable: str) -> pathlib.Path:
7
- """Import the generated module directly and get its LLVM IR."""
8
- specTarget = importlib.util.spec_from_file_location("generatedModule", pathFilename)
9
- if specTarget is None or specTarget.loader is None:
10
- raise ImportError(f"Could not create module spec or loader for {pathFilename}")
11
- moduleTarget = importlib.util.module_from_spec(specTarget)
12
- specTarget.loader.exec_module(moduleTarget)
7
+ """Import the generated module directly and get its LLVM IR."""
8
+ specTarget = importlib.util.spec_from_file_location("generatedModule", pathFilename)
9
+ if specTarget is None or specTarget.loader is None:
10
+ raise ImportError(f"Could not create module spec or loader for {pathFilename}")
11
+ moduleTarget = importlib.util.module_from_spec(specTarget)
12
+ specTarget.loader.exec_module(moduleTarget)
13
13
 
14
- # Get LLVM IR and write to file
15
- linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
16
- moduleLLVM = llvmlite.binding.module.parse_assembly(linesLLVM)
17
- pathFilenameLLVM = pathFilename.with_suffix(".ll")
18
- pathFilenameLLVM.write_text(str(moduleLLVM))
19
- return pathFilenameLLVM
14
+ # Get LLVM IR and write to file
15
+ linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
16
+ moduleLLVM = llvmlite.binding.module.parse_assembly(linesLLVM)
17
+ pathFilenameLLVM = pathFilename.with_suffix(".ll")
18
+ pathFilenameLLVM.write_text(str(moduleLLVM))
19
+ return pathFilenameLLVM
@@ -6,56 +6,56 @@ import pickle
6
6
 
7
7
  @overload
8
8
  def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[True]
9
- , **keywordArguments: Optional[str]) -> pathlib.Path:
10
- ...
9
+ , **keywordArguments: Optional[str]) -> pathlib.Path:
10
+ ...
11
11
 
12
12
  @overload
13
13
  def makeStateJob(listDimensions: Sequence[int], *, writeJob: Literal[False]
14
- , **keywordArguments: Optional[str]) -> computationState:
15
- ...
14
+ , **keywordArguments: Optional[str]) -> computationState:
15
+ ...
16
16
 
17
17
  def makeStateJob(listDimensions: Sequence[int], *, writeJob: bool = True, **keywordArguments: Optional[Any]) -> computationState | pathlib.Path:
18
- """
19
- Creates a computation state job for map folding calculations and optionally saves it to disk.
20
-
21
- This function initializes a computation state for map folding calculations based on the given dimensions,
22
- sets up the initial counting configuration, and can optionally save the state to a pickle file.
23
-
24
- Parameters
25
- ----------
26
- listDimensions : Sequence[int]
27
- The dimensions of the map to be folded, typically as [height, width].
28
- writeJob : bool, optional
29
- If True, saves the computation state to disk. If False, returns the state object directly.
30
- Default is True.
31
- **keywordArguments : Optional[str]
32
- Additional keyword arguments to be passed to the outfitCountFolds function.
33
-
34
- Returns
35
- -------
36
- Union[computationState, pathlib.Path]
37
- If writeJob is False, returns the computation state object.
38
- If writeJob is True, returns the Path object pointing to the saved state file.
39
-
40
- Notes
41
- -----
42
- The function creates necessary directories and saves the state as a pickle file
43
- when writeJob is True. The file is saved in a directory structure based on the map shape.
44
- """
45
-
46
- stateUniversal: computationState = outfitCountFolds(listDimensions, **keywordArguments)
47
-
48
- moduleSource: ModuleType = getAlgorithmSource()
49
- moduleSource.countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
50
-
51
- if not writeJob:
52
- return stateUniversal
53
-
54
- pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
55
- suffix = pathFilenameChopChop.suffix
56
- pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
57
- pathJob.mkdir(parents=True, exist_ok=True)
58
- pathFilenameJob = pathJob / 'stateJob.pkl'
59
-
60
- pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
61
- return pathFilenameJob
18
+ """
19
+ Creates a computation state job for map folding calculations and optionally saves it to disk.
20
+
21
+ This function initializes a computation state for map folding calculations based on the given dimensions,
22
+ sets up the initial counting configuration, and can optionally save the state to a pickle file.
23
+
24
+ Parameters
25
+ ----------
26
+ listDimensions : Sequence[int]
27
+ The dimensions of the map to be folded, typically as [height, width].
28
+ writeJob : bool, optional
29
+ If True, saves the computation state to disk. If False, returns the state object directly.
30
+ Default is True.
31
+ **keywordArguments : Optional[str]
32
+ Additional keyword arguments to be passed to the outfitCountFolds function.
33
+
34
+ Returns
35
+ -------
36
+ Union[computationState, pathlib.Path]
37
+ If writeJob is False, returns the computation state object.
38
+ If writeJob is True, returns the Path object pointing to the saved state file.
39
+
40
+ Notes
41
+ -----
42
+ The function creates necessary directories and saves the state as a pickle file
43
+ when writeJob is True. The file is saved in a directory structure based on the map shape.
44
+ """
45
+
46
+ stateUniversal: computationState = outfitCountFolds(listDimensions, **keywordArguments)
47
+
48
+ moduleSource: ModuleType = getAlgorithmSource()
49
+ moduleSource.countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
50
+
51
+ if not writeJob:
52
+ return stateUniversal
53
+
54
+ pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
55
+ suffix = pathFilenameChopChop.suffix
56
+ pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
57
+ pathJob.mkdir(parents=True, exist_ok=True)
58
+ pathFilenameJob = pathJob / 'stateJob.pkl'
59
+
60
+ pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
61
+ return pathFilenameJob
@@ -6,24 +6,24 @@ import inspect
6
6
  import pathlib
7
7
  import sys
8
8
 
9
- def transformPythonToJAX(codePython: str):
10
- astPython = ast.parse(codePython)
9
+ def transformPythonToJAX(codePython: str) -> None:
10
+ astPython = ast.parse(codePython)
11
11
 
12
12
  def writeJax(*, codeSource: Optional[str] = None, pathFilenameAlgorithm: Optional[pathlib.Path] = None, pathFilenameDestination: Optional[pathlib.Path] = None) -> None:
13
- if codeSource is None and pathFilenameAlgorithm is None:
14
- algorithmSource = getAlgorithmSource()
15
- codeSource = inspect.getsource(algorithmSource)
16
- transformedText = transformPythonToJAX(codeSource)
17
- pathFilenameAlgorithm = pathlib.Path(inspect.getfile(algorithmSource))
18
- else:
19
- raise NotImplementedError("You haven't written this part yet.")
20
- if pathFilenameDestination is None:
21
- pathFilenameDestination = getPathSyntheticModules() / "countJax.py"
22
- # pathFilenameDestination.write_text(transformedText)
13
+ if codeSource is None and pathFilenameAlgorithm is None:
14
+ algorithmSource = getAlgorithmSource()
15
+ codeSource = inspect.getsource(algorithmSource)
16
+ transformedText = transformPythonToJAX(codeSource)
17
+ pathFilenameAlgorithm = pathlib.Path(inspect.getfile(algorithmSource))
18
+ else:
19
+ raise NotImplementedError("You haven't written this part yet.")
20
+ if pathFilenameDestination is None:
21
+ pathFilenameDestination = getPathSyntheticModules() / "countJax.py"
22
+ # pathFilenameDestination.write_text(transformedText)
23
23
 
24
24
  if __name__ == '__main__':
25
- setDatatypeModule('jax.numpy', sourGrapes=True)
26
- setDatatypeFoldsTotal('int64', sourGrapes=True)
27
- setDatatypeElephino('uint8', sourGrapes=True)
28
- setDatatypeLeavesTotal('uint8', sourGrapes=True)
29
- writeJax()
25
+ setDatatypeModule('jax.numpy', sourGrapes=True)
26
+ setDatatypeFoldsTotal('int64', sourGrapes=True)
27
+ setDatatypeElephino('uint8', sourGrapes=True)
28
+ setDatatypeLeavesTotal('uint8', sourGrapes=True)
29
+ writeJax()