mapFolding 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. mapFolding/__init__.py +38 -0
  2. mapFolding/basecamp.py +55 -0
  3. mapFolding/beDRY.py +364 -0
  4. mapFolding/oeis.py +329 -0
  5. mapFolding/someAssemblyRequired/makeJob.py +62 -0
  6. mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
  7. someAssemblyRequired/synthesizeModuleJob.py → mapFolding/someAssemblyRequired/synthesizeModuleJobNumba.py +81 -27
  8. mapFolding/someAssemblyRequired/synthesizeModulesNumba.py +506 -0
  9. mapFolding/syntheticModules/__init__.py +3 -0
  10. syntheticModules/Initialize.py → mapFolding/syntheticModules/numba_countInitialize.py +5 -4
  11. syntheticModules/Parallel.py → mapFolding/syntheticModules/numba_countParallel.py +10 -5
  12. syntheticModules/Sequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -4
  13. mapFolding/syntheticModules/numba_doTheNeedful.py +33 -0
  14. mapFolding/theDao.py +214 -0
  15. mapFolding/theSSOT.py +269 -0
  16. mapFolding-0.3.9.dist-info/LICENSE +407 -0
  17. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/METADATA +9 -5
  18. mapFolding-0.3.9.dist-info/RECORD +40 -0
  19. mapFolding-0.3.9.dist-info/top_level.txt +2 -0
  20. tests/__init__.py +1 -0
  21. tests/conftest.py +224 -0
  22. tests/conftest_tmpRegistry.py +62 -0
  23. tests/conftest_uniformTests.py +53 -0
  24. tests/test_oeis.py +200 -0
  25. tests/test_other.py +258 -0
  26. tests/test_tasks.py +44 -0
  27. tests/test_types.py +5 -0
  28. benchmarks/benchmarking.py +0 -67
  29. citations/updateCitation.py +0 -238
  30. mapFolding-0.3.7.dist-info/RECORD +0 -25
  31. mapFolding-0.3.7.dist-info/top_level.txt +0 -5
  32. someAssemblyRequired/makeJob.py +0 -34
  33. someAssemblyRequired/synthesizeModules.py +0 -216
  34. syntheticModules/__init__.py +0 -4
  35. {reference → mapFolding/reference}/flattened.py +0 -0
  36. {reference → mapFolding/reference}/hunterNumba.py +0 -0
  37. {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
  38. {reference → mapFolding/reference}/jax.py +0 -0
  39. {reference → mapFolding/reference}/lunnan.py +0 -0
  40. {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
  41. {reference → mapFolding/reference}/lunnanWhile.py +0 -0
  42. {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
  43. {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
  44. {someAssemblyRequired → mapFolding/someAssemblyRequired}/__init__.py +0 -0
  45. {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
  46. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/WHEEL +0 -0
  47. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/entry_points.txt +0 -0
tests/test_other.py ADDED
@@ -0,0 +1,258 @@
1
+ from contextlib import redirect_stdout
2
+ from tests.conftest import *
3
+ from typing import Dict, List, Optional, Any, Tuple, Literal, Callable, Generator
4
+ from Z0Z_tools import intInnit
5
+ import io
6
+ import itertools
7
+ import numba
8
+ import numpy
9
+ import pathlib
10
+ import pytest
11
+ import random
12
+ import sys
13
+
14
+ @pytest.mark.parametrize("listDimensions,expected_intInnit,expected_parseListDimensions,expected_validateListDimensions,expected_getLeavesTotal", [
15
+ (None, ValueError, ValueError, ValueError, ValueError), # None instead of list
16
+ (['a'], ValueError, ValueError, ValueError, ValueError), # string
17
+ ([-4, 2], [-4, 2], ValueError, ValueError, ValueError), # negative
18
+ ([-3], [-3], ValueError, ValueError, ValueError), # negative
19
+ ([0, 0], [0, 0], [0, 0], NotImplementedError, 0), # no positive dimensions
20
+ ([0, 5, 6], [0, 5, 6], [0, 5, 6], [5, 6], 30), # zeros ignored
21
+ ([0], [0], [0], NotImplementedError, 0), # edge case
22
+ ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 120), # sequential
23
+ ([1, sys.maxsize], [1, sys.maxsize], [1, sys.maxsize], [1, sys.maxsize], sys.maxsize), # maxint
24
+ ([7.5], ValueError, ValueError, ValueError, ValueError), # float
25
+ ([1] * 1000, [1] * 1000, [1] * 1000, [1] * 1000, 1), # long list
26
+ ([11], [11], [11], NotImplementedError, 11), # single dimension
27
+ ([13, 0, 17], [13, 0, 17], [13, 0, 17], [13, 17], 221), # zeros handled
28
+ ([2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], 16), # repeated dimensions
29
+ ([2, 3, 4], [2, 3, 4], [2, 3, 4], [2, 3, 4], 24),
30
+ ([2, 3], [2, 3], [2, 3], [2, 3], 6),
31
+ ([2] * 11, [2] * 11, [2] * 11, [2] * 11, 2048), # power of 2
32
+ ([3, 2], [3, 2], [3, 2], [2, 3], 6), # return value is the input when valid
33
+ ([3] * 5, [3] * 5, [3] * 5, [3, 3, 3, 3, 3], 243), # power of 3
34
+ ([None], TypeError, TypeError, TypeError, TypeError), # None
35
+ ([True], TypeError, TypeError, TypeError, TypeError), # bool
36
+ ([[17, 39]], TypeError, TypeError, TypeError, TypeError), # nested
37
+ ([], ValueError, ValueError, ValueError, ValueError), # empty
38
+ ([complex(1,1)], ValueError, ValueError, ValueError, ValueError), # complex number
39
+ ([float('inf')], ValueError, ValueError, ValueError, ValueError), # infinity
40
+ ([float('nan')], ValueError, ValueError, ValueError, ValueError), # NaN
41
+ ([sys.maxsize - 1, 1], [sys.maxsize - 1, 1], [sys.maxsize - 1, 1], [1, sys.maxsize - 1], sys.maxsize - 1), # near maxint
42
+ ([sys.maxsize // 2, sys.maxsize // 2, 2], [sys.maxsize // 2, sys.maxsize // 2, 2], [sys.maxsize // 2, sys.maxsize // 2, 2], [2, sys.maxsize // 2, sys.maxsize // 2], OverflowError), # overflow protection
43
+ ([sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], [sys.maxsize, sys.maxsize], OverflowError), # overflow protection
44
+ (range(3, 7), [3, 4, 5, 6], [3, 4, 5, 6], [3, 4, 5, 6], 360), # range sequence type
45
+ (tuple([3, 5, 7]), [3, 5, 7], [3, 5, 7], [3, 5, 7], 105), # tuple sequence type
46
+ ])
47
+ def test_listDimensionsAsParameter(listDimensions: None | List[str] | List[int] | List[float] | List[None] | List[bool] | List[List[int]] | List[complex] | range | tuple[int, ...], expected_intInnit: type[ValueError] | List[int] | type[TypeError], expected_parseListDimensions: type[ValueError] | List[int] | type[TypeError], expected_validateListDimensions: type[ValueError] | type[NotImplementedError] | List[int] | type[TypeError], expected_getLeavesTotal: type[ValueError] | int | type[TypeError] | type[OverflowError]) -> None:
48
+ """Test both validateListDimensions and getLeavesTotal with the same inputs."""
49
+ standardizedEqualTo(expected_intInnit, intInnit, listDimensions)
50
+ standardizedEqualTo(expected_parseListDimensions, parseDimensions, listDimensions)
51
+ standardizedEqualTo(expected_validateListDimensions, validateListDimensions, listDimensions)
52
+ standardizedEqualTo(expected_getLeavesTotal, getLeavesTotal, listDimensions)
53
+
54
+ def test_getLeavesTotal_edge_cases() -> None:
55
+ """Test edge cases for getLeavesTotal."""
56
+ # Order independence
57
+ standardizedEqualTo(getLeavesTotal([2, 3, 4]), getLeavesTotal, [4, 2, 3])
58
+
59
+ # Immutability
60
+ listOriginal = [2, 3]
61
+ standardizedEqualTo(6, getLeavesTotal, listOriginal)
62
+ standardizedEqualTo([2, 3], lambda x: x, listOriginal) # Check that the list wasn't modified
63
+
64
+ @pytest.mark.parametrize("foldsValue,writeFoldsTarget", [
65
+ (756839, "foldsTotalTest.txt"), # Direct file
66
+ (2640919, "foldsTotalTest.txt"), # Direct file
67
+ (7715177, None), # Directory, will use default filename
68
+ ])
69
+ def test_countFolds_writeFoldsTotal(
70
+ listDimensionsTestFunctionality: List[int],
71
+ pathTempTesting: pathlib.Path,
72
+ mockFoldingFunction: Callable[..., Callable[..., None]],
73
+ mockDispatcher: Callable[[Callable[..., None]], Any],
74
+ foldsValue: int,
75
+ writeFoldsTarget: Optional[str]
76
+ ) -> None:
77
+ """Test writing folds total to either a file or directory."""
78
+ # For directory case, use the directory path directly
79
+ if writeFoldsTarget is None:
80
+ pathWriteTarget = pathTempTesting
81
+ filenameFoldsTotalExpected = getFilenameFoldsTotal(listDimensionsTestFunctionality)
82
+ else:
83
+ pathWriteTarget = pathTempTesting / writeFoldsTarget
84
+ filenameFoldsTotalExpected = writeFoldsTarget
85
+
86
+ foldsTotalExpected = foldsValue * getLeavesTotal(listDimensionsTestFunctionality)
87
+ mock_countFolds = mockFoldingFunction(foldsValue, listDimensionsTestFunctionality)
88
+
89
+ with mockDispatcher(mock_countFolds):
90
+ returned = countFolds(listDimensionsTestFunctionality, pathLikeWriteFoldsTotal=pathWriteTarget)
91
+
92
+ standardizedEqualTo(str(foldsTotalExpected), lambda: (pathTempTesting / filenameFoldsTotalExpected).read_text())
93
+
94
+ @pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_intInnit())
95
+ def testIntInnit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
96
+ callablePytest()
97
+
98
+ @pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_oopsieKwargsie())
99
+ def testOopsieKwargsie(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
100
+ callablePytest()
101
+
102
+ @pytest.mark.parametrize("CPUlimit, expectedLimit", [
103
+ (None, numba.get_num_threads()),
104
+ (False, numba.get_num_threads()),
105
+ (True, 1),
106
+ (4, 4),
107
+ (0.5, max(1, numba.get_num_threads() // 2)),
108
+ (-0.5, max(1, numba.get_num_threads() // 2)),
109
+ (-2, max(1, numba.get_num_threads() - 2)),
110
+ (0, numba.get_num_threads()),
111
+ (1, 1),
112
+ ])
113
+ def test_setCPUlimit(CPUlimit: None | float | bool | Literal[4] | Literal[-2] | Literal[0] | Literal[1], expectedLimit: Any | int) -> None:
114
+ standardizedEqualTo(expectedLimit, setCPUlimit, CPUlimit)
115
+
116
+ def test_makeConnectionGraph_nonNegative(listDimensionsTestFunctionality: List[int]) -> None:
117
+ connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality)
118
+ assert numpy.all(connectionGraph >= 0), "All values in the connection graph should be non-negative."
119
+
120
+ # @pytest.mark.parametrize("datatype", ['int16', 'uint64'])
121
+ # def test_makeConnectionGraph_datatype(listDimensionsTestFunctionality: List[int], datatype) -> None:
122
+ # connectionGraph = makeConnectionGraph(listDimensionsTestFunctionality, datatype=datatype)
123
+ # assert connectionGraph.dtype == datatype, f"Expected datatype {datatype}, but got {connectionGraph.dtype}."
124
+
125
+ """5 parameters
126
+ listDimensionsTestFunctionality
127
+
128
+ computationDivisions
129
+ None
130
+ random: int, first included: 2, first excluded: leavesTotal
131
+ maximum
132
+ cpu
133
+
134
+ CPUlimit
135
+ None
136
+ True
137
+ False
138
+ 0
139
+ 1
140
+ -1
141
+ random: 0 < float < 1
142
+ random: -1 < float < 0
143
+ random: int, first included: 2, first excluded: (min(leavesTotal, 16) - 1)
144
+ random: int, first included: -1 * (min(leavesTotal, 16) - 1), first excluded: -1
145
+
146
+ datatypeMedium
147
+ None
148
+ numpy.int64
149
+ numpy.intc
150
+ numpy.uint16
151
+
152
+ datatypeLarge
153
+ None
154
+ numpy.int64
155
+ numpy.intp
156
+ numpy.uint32
157
+
158
+ """
159
+
160
+ @pytest.fixture
161
+ def parameterIterator() -> Callable[[List[int]], Generator[Dict[str, Any], None, None]]:
162
+ """Generate random combinations of parameters for outfitCountFolds testing."""
163
+ parameterSets: Dict[str, List[Any]] = {
164
+ 'computationDivisions': [
165
+ None,
166
+ 'maximum',
167
+ 'cpu',
168
+ ],
169
+ 'CPUlimit': [
170
+ None, True, False, 0, 1, -1,
171
+ ],
172
+ 'datatypeMedium': [
173
+ None,
174
+ numpy.int64,
175
+ numpy.intc,
176
+ numpy.uint16
177
+ ],
178
+ 'datatypeLarge': [
179
+ None,
180
+ numpy.int64,
181
+ numpy.intp,
182
+ numpy.uint32
183
+ ]
184
+ }
185
+
186
+ def makeParametersDynamic(listDimensions: List[int]) -> Dict[str, List[Any]]:
187
+ """Add context-dependent parameter values."""
188
+ parametersDynamic = parameterSets.copy()
189
+ leavesTotal = getLeavesTotal(listDimensions)
190
+ concurrencyLimit = min(leavesTotal, 16)
191
+
192
+ # Add dynamic computationDivisions values
193
+ dynamicDivisions = [random.randint(2, leavesTotal-1) for iterator in range(3)]
194
+ parametersDynamic['computationDivisions'] = parametersDynamic['computationDivisions'] + dynamicDivisions
195
+
196
+ # Add dynamic CPUlimit values
197
+ parameterDynamicCPU = [
198
+ random.random(), # 0 to 1
199
+ -random.random(), # -1 to 0
200
+ ]
201
+ parameterDynamicCPU.extend(
202
+ [random.randint(2, concurrencyLimit-1) for iterator in range(2)]
203
+ )
204
+ parameterDynamicCPU.extend(
205
+ [random.randint(-concurrencyLimit+1, -2) for iterator in range(2)]
206
+ )
207
+ parametersDynamic['CPUlimit'] = parametersDynamic['CPUlimit'] + parameterDynamicCPU
208
+
209
+ return parametersDynamic
210
+
211
+ def generateCombinations(listDimensions: List[int]) -> Generator[Dict[str, Any], None, None]:
212
+ parametersDynamic = makeParametersDynamic(listDimensions)
213
+ parameterKeys = list(parametersDynamic.keys())
214
+ parameterValues = [parametersDynamic[key] for key in parameterKeys]
215
+
216
+ # Shuffle each parameter list
217
+ for valueList in parameterValues:
218
+ random.shuffle(valueList)
219
+
220
+ # Use zip_longest to iterate, filling with None when shorter lists are exhausted
221
+ for combination in itertools.zip_longest(*parameterValues, fillvalue=None):
222
+ yield dict(zip(parameterKeys, combination))
223
+
224
+ return generateCombinations
225
+
226
+ # TODO refactor due to changes
227
+ # def test_pathJobDEFAULT_colab() -> None:
228
+ # """Test that pathJobDEFAULT is set correctly when running in Google Colab."""
229
+ # # Mock sys.modules to simulate running in Colab
230
+ # with unittest.mock.patch.dict('sys.modules', {'google.colab': unittest.mock.MagicMock()}):
231
+ # # Force reload of theSSOT to trigger Colab path logic
232
+ # import importlib
233
+ # import mapFolding.theSSOT
234
+ # importlib.reload(mapFolding.theSSOT)
235
+
236
+ # # Check that path was set to Colab-specific value
237
+ # assert mapFolding.theSSOT.pathJobDEFAULT == pathlib.Path("/content/drive/MyDrive") / "jobs"
238
+
239
+ # # Reload one more time to restore original state
240
+ # importlib.reload(mapFolding.theSSOT)
241
+
242
+ def test_saveFoldsTotal_fallback(pathTempTesting: pathlib.Path) -> None:
243
+ foldsTotal = 123
244
+ pathFilename = pathTempTesting / "foldsTotal.txt"
245
+ with unittest.mock.patch("pathlib.Path.write_text", side_effect=OSError("Simulated write failure")):
246
+ with unittest.mock.patch("os.getcwd", return_value=str(pathTempTesting)):
247
+ capturedOutput = io.StringIO()
248
+ with redirect_stdout(capturedOutput):
249
+ saveFoldsTotal(pathFilename, foldsTotal)
250
+ fallbackFiles = list(pathTempTesting.glob("foldsTotalYO_*.txt"))
251
+ assert len(fallbackFiles) == 1, "Fallback file was not created upon write failure."
252
+
253
+ def test_makeDataContainer_default_datatype() -> None:
254
+ """Test that makeDataContainer uses dtypeLargeDEFAULT when no datatype is specified."""
255
+ testShape = (3, 4)
256
+ container = makeDataContainer(testShape)
257
+ assert container.dtype == hackSSOTdtype('dtypeFoldsTotal'), f"Expected datatype but got {container.dtype}"
258
+ assert container.shape == testShape, f"Expected shape {testShape}, but got {container.shape}"
tests/test_tasks.py ADDED
@@ -0,0 +1,44 @@
1
+ from tests.conftest import *
2
+ import pytest
3
+ from typing import List, Dict, Literal, Tuple, Any
4
+
5
+ # TODO add a test. `C` = number of logical cores available. `n = C + 1`. Ensure that `[2,n]` is computed correctly.
6
+ # Or, probably smarter: limit the number of cores, then run a test with C+1.
7
+
8
+ def test_algorithmSourceParallel(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int], useAlgorithmDirectly: None) -> None:
9
+ standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
10
+
11
+ def test_countFoldsComputationDivisionsInvalid(listDimensionsTestFunctionality: List[int]) -> None:
12
+ standardizedEqualTo(ValueError, countFolds, listDimensionsTestFunctionality, None, {"wrong": "value"})
13
+
14
+ def test_countFoldsComputationDivisionsMaximum(listDimensionsTestParallelization: List[int], foldsTotalKnown: Dict[Tuple[int, ...], int]) -> None:
15
+ standardizedEqualTo(foldsTotalKnown[tuple(listDimensionsTestParallelization)], countFolds, listDimensionsTestParallelization, None, 'maximum')
16
+
17
+ @pytest.mark.parametrize("nameOfTest,callablePytest", PytestFor_defineConcurrencyLimit())
18
+ def test_defineConcurrencyLimit(nameOfTest: str, callablePytest: Callable[[], None]) -> None:
19
+ callablePytest()
20
+
21
+ # @pytest.mark.parametrize("CPUlimitParameter", [{"invalid": True}, ["weird"]])
22
+ # def test_countFolds_cpuLimitOopsie(listDimensionsTestFunctionality: List[int], CPUlimitParameter: Dict[str, bool] | List[str]) -> None:
23
+ # standardizedEqualTo((AttributeError or ValueError), countFolds, listDimensionsTestFunctionality, None, 'cpu', CPUlimitParameter)
24
+
25
+ @pytest.mark.parametrize("computationDivisions, concurrencyLimit, listDimensions, expectedTaskDivisions", [
26
+ (None, 4, [9, 11], 0),
27
+ ("maximum", 4, [7, 11], 77),
28
+ ("cpu", 4, [3, 7], 4),
29
+ (["invalid"], 4, [19, 23], ValueError),
30
+ (20, 4, [3,5], ValueError)
31
+ ])
32
+ def test_getTaskDivisions(computationDivisions: None | List[str] | Literal['maximum'] | Literal['cpu'] | Literal[20], concurrencyLimit: Literal[4], listDimensions: List[int], expectedTaskDivisions: type[ValueError] | Literal[0] | Literal[77] | Literal[4]) -> None:
33
+ standardizedEqualTo(expectedTaskDivisions, getTaskDivisions, computationDivisions, concurrencyLimit, None, listDimensions)
34
+
35
+ @pytest.mark.parametrize("expected,parameter", [
36
+ (2, "2"), # string
37
+ (ValueError, [4]), # list
38
+ (ValueError, (2,)), # tuple
39
+ (ValueError, {2}), # set
40
+ (ValueError, {"cores": 2}), # dict
41
+ ])
42
+ def test_setCPUlimitMalformedParameter(expected: type[ValueError] | Literal[2], parameter: List[int] | Tuple[int] | set[int] | Dict[str, int] | Literal['2']) -> None:
43
+ """Test that invalid CPUlimit types are properly handled."""
44
+ standardizedEqualTo(expected, setCPUlimit, parameter)
tests/test_types.py ADDED
@@ -0,0 +1,5 @@
1
+ """Type checking tests for mapFolding package."""
2
+
3
+ def test_static_typing() -> None:
4
+ """This is a placeholder. pytest-mypy will run type checking automatically."""
5
+ pass
@@ -1,67 +0,0 @@
1
- """An incompetent benchmarking module for mapFolding."""
2
- from typing import Callable
3
- import multiprocessing
4
- import numpy
5
- import pathlib
6
- import time
7
-
8
- pathRecordedBenchmarks = pathlib.Path('mapFolding/benchmarks/marks')
9
- pathRecordedBenchmarks.mkdir(parents=True, exist_ok=True)
10
- pathFilenameRecordedBenchmarks = pathRecordedBenchmarks / "benchmarks.npy"
11
-
12
- def recordBenchmarks():
13
- """Decorator to benchmark a function."""
14
- def AzeemTheWrapper(functionTarget: Callable):
15
- def djZeph(*arguments, **keywordArguments):
16
- timeStart = time.perf_counter_ns()
17
- returnValueTarget = functionTarget(*arguments, **keywordArguments)
18
- timeElapsed = (time.perf_counter_ns() - timeStart) / 1e9
19
-
20
- # Extract mapShape from arguments
21
- mapShape = keywordArguments['mapShape']
22
- # mapShape = tuple(arguments)[2]
23
- # leavesTotal = tuple(arguments[3])[4]
24
-
25
- # Store benchmark data in single file
26
- benchmarkEntry = numpy.array([(timeElapsed, mapShape)], dtype=[('time', 'f8'), ('mapShape', 'O')])
27
- # benchmarkEntry = numpy.array([(timeElapsed, leavesTotal)], dtype=[('time', 'f8'), ('leaves', 'O')])
28
-
29
- if pathFilenameRecordedBenchmarks.exists():
30
- arrayExisting = numpy.load(str(pathFilenameRecordedBenchmarks), allow_pickle=True)
31
- arrayBenchmark = numpy.concatenate([arrayExisting, benchmarkEntry])
32
- else:
33
- arrayBenchmark = benchmarkEntry
34
-
35
- numpy.save(str(pathFilenameRecordedBenchmarks), arrayBenchmark)
36
- return returnValueTarget
37
-
38
- return djZeph
39
- return AzeemTheWrapper
40
-
41
- def runBenchmarks(benchmarkIterations: int = 30) -> None:
42
- """Run benchmark iterations.
43
-
44
- Parameters:
45
- benchmarkIterations (30): Number of benchmark iterations to run
46
- """
47
- # TODO warmUp (False): Whether to perform one warm-up iteration
48
-
49
- import itertools
50
- from tqdm.auto import tqdm
51
- from mapFolding.oeis import settingsOEIS, oeisIDfor_n
52
- from concurrent.futures import ProcessPoolExecutor, as_completed
53
- max_workers = 6
54
-
55
- listParametersOEIS = [(oeisIdentifier, dimensionValue) for oeisIdentifier, settings in settingsOEIS.items() for dimensionValue in settings['valuesBenchmark']]
56
- # for (oeisIdentifier, dimensionValue), iterationIndex in tqdm(itertools.product(listParametersOEIS, range(benchmarkIterations)), total=len(listParametersOEIS) * benchmarkIterations):
57
- # oeisIDfor_n(oeisIdentifier, dimensionValue)
58
- listCartesianProduct = list(itertools.product(listParametersOEIS, range(benchmarkIterations)))
59
- with ProcessPoolExecutor(max_workers) as concurrencyManager:
60
- listConcurrency = [concurrencyManager.submit(oeisIDfor_n, *parameters[0]) for parameters in listCartesianProduct]
61
- for _complete in tqdm(as_completed(listConcurrency), total=len(listCartesianProduct)):
62
- pass
63
-
64
- if __name__ == '__main__':
65
- multiprocessing.set_start_method('spawn')
66
- pathFilenameRecordedBenchmarks.unlink(missing_ok=True)
67
- runBenchmarks(30)
@@ -1,238 +0,0 @@
1
- from cffconvert.cli.create_citation import create_citation
2
- from packaging.metadata import Metadata as PyPAMetadata
3
- from typing import Any, Dict, List
4
- import attrs
5
- import cffconvert
6
- import tempfile
7
- import packaging
8
- import packaging.metadata
9
- import packaging.utils
10
- import packaging.version
11
- import pathlib
12
- import ruamel.yaml
13
- import tomli
14
-
15
- listProjectURLsTarget: List[str] = ["homepage", "license", "repository"]
16
-
17
- """
18
- Tentative plan:
19
- - Commit and push to GitHub
20
- - GitHub Action gathers information from the sources of truth
21
- - If the citation needs to be updated, write to both
22
- - pathFilenameCitationSSOT
23
- - pathFilenameCitationDOTcffRepo
24
- - Commit and push to GitHub
25
- - this complicates things
26
- - I want the updated citation to be in the `commit` field of itself
27
- """
28
-
29
- @attrs.define
30
- class CitationNexus:
31
- """
32
- - one-to-one correlation with `cffconvert.lib.cff_1_2_x.citation` class Citation_1_2_x.cffobj
33
- """
34
- cffDASHversion: str # pathFilenameCitationSSOT
35
- message: str # pathFilenameCitationSSOT
36
-
37
- abstract: str | None = None # pathFilenameCitationSSOT
38
- authors: list[dict[str,str]] = attrs.field(factory=list) # pathFilenamePackageSSOT; pyproject.toml authors
39
- commit: str | None = None # workflows['Make GitHub Release']
40
- contact: list[dict[str,str]] = attrs.field(factory=list) # pathFilenamePackageSSOT; pyproject.toml maintainers
41
- dateDASHreleased: str | None = None # workflows['Make GitHub Release']
42
- doi: str | None = None # pathFilenameCitationSSOT
43
- identifiers: list[str] = attrs.field(factory=list) # workflows['Make GitHub Release']
44
- keywords: list[str] = attrs.field(factory=list) # pathFilenamePackageSSOT; packaging.metadata.Metadata.keywords
45
- license: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.license_expression
46
- licenseDASHurl: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: license or pyproject.toml urls license
47
- preferredDASHcitation: str | None = None # pathFilenameCitationSSOT
48
- references: list[str] = attrs.field(factory=list) # bibtex files in pathCitationSSOT. Conversion method and timing TBD.
49
- repositoryDASHartifact: str | None = None # (https://pypi.org/pypi/{package_name}/json').json()['releases']
50
- repositoryDASHcode: str | None = None # workflows['Make GitHub Release']
51
- repository: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: repository
52
- title: str | None = None # pathFilenamePackageSSOT; pyproject.toml name (packaging normalizes the names)
53
- type: str | None = None # pathFilenameCitationSSOT
54
- url: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: homepage
55
- version: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.version
56
-
57
- def setInStone(self, prophet: str) -> "CitationNexus":
58
- match prophet:
59
- case "Citation":
60
- pass
61
- # "freeze" these items
62
- # setattr(self.cffDASHversion, 'type', Final[str])
63
- # setattr(self.doi, 'type', Final[str])
64
- # cffDASHversion: str # pathFilenameCitationSSOT
65
- # message: str # pathFilenameCitationSSOT
66
- # abstract: str | None = None # pathFilenameCitationSSOT
67
- # doi: str | None = None # pathFilenameCitationSSOT
68
- # preferredDASHcitation: str | None = None # pathFilenameCitationSSOT
69
- # type: str | None = None # pathFilenameCitationSSOT
70
- case "PyPA":
71
- pass
72
- # "freeze" these items
73
- # setattr(self.keywords, 'type', Final[list[str]])
74
- # setattr(self.license, 'type', Final[str])
75
- # setattr(self.licenseDASHurl, 'type', Final[str])
76
- # setattr(self.repository, 'type', Final[str])
77
- # setattr(self.url, 'type', Final[str])
78
- # setattr(self.version, 'type', Final[str])
79
- case "pyprojectDOTtoml":
80
- pass
81
- # "freeze" these items
82
- # setattr(self.authors, 'type', Final[list[dict[str,str]]])
83
- # setattr(self.contact, 'type', Final[list[dict[str,str]]])
84
- # setattr(self.title, 'type', Final[str])
85
- return self
86
-
87
- def getNexusCitation(pathFilenameCitationSSOT: pathlib.Path) -> CitationNexus:
88
-
89
- # `cffconvert.cli.create_citation.create_citation()` is PAINFULLY mundane, but a major problem
90
- # in the CFF ecosystem is divergence. Therefore, I will use this function so that my code
91
- # converges with the CFF ecosystem.
92
- citationObject: cffconvert.Citation = create_citation(infile=pathFilenameCitationSSOT, url=None)
93
- # `._parse()` is a yaml loader: use it for convergence
94
- cffobj: Dict[Any, Any] = citationObject._parse()
95
-
96
- nexusCitation = CitationNexus(
97
- cffDASHversion=cffobj["cff-version"],
98
- message=cffobj["message"],
99
- )
100
-
101
- Z0Z_list: List[attrs.Attribute] = list(attrs.fields(type(nexusCitation)))
102
- for Z0Z_field in Z0Z_list:
103
- cffobjKeyName: str = Z0Z_field.name.replace("DASH", "-")
104
- cffobjValue = cffobj.get(cffobjKeyName)
105
- if cffobjValue: # An empty list will be False
106
- setattr(nexusCitation, Z0Z_field.name, cffobjValue)
107
-
108
- nexusCitation = nexusCitation.setInStone("Citation")
109
- return nexusCitation
110
-
111
- def getPypaMetadata(packageData: Dict[str, Any]) -> PyPAMetadata:
112
- """
113
- Create a PyPA metadata object (version 2.4) from packageData.
114
- https://packaging.python.org/en/latest/specifications/core-metadata/
115
- """
116
- dictionaryProjectURLs: Dict[str, str] = {}
117
- for urlName, url in packageData.get("urls", {}).items():
118
- urlName = urlName.lower()
119
- if urlName in listProjectURLsTarget:
120
- dictionaryProjectURLs[urlName] = url
121
-
122
- metadataRaw = packaging.metadata.RawMetadata(
123
- keywords=packageData.get("keywords", []),
124
- license_expression=packageData.get("license", {}).get("text", ""),
125
- metadata_version="2.4",
126
- name=packaging.utils.canonicalize_name(packageData.get("name", None), validate=True), # packaging.metadata.InvalidMetadata: 'name' is a required field
127
- project_urls=dictionaryProjectURLs,
128
- version=packageData.get("version", None),
129
- )
130
-
131
- metadata = PyPAMetadata().from_raw(metadataRaw)
132
- return metadata
133
-
134
- def addPypaMetadata(nexusCitation: CitationNexus, metadata: PyPAMetadata) -> CitationNexus:
135
- if not metadata.name:
136
- raise ValueError("Metadata name is required.")
137
-
138
- nexusCitation.title = metadata.name
139
- if metadata.version: nexusCitation.version = str(metadata.version)
140
- if metadata.keywords: nexusCitation.keywords = metadata.keywords
141
- if metadata.license_expression: nexusCitation.license = metadata.license_expression
142
-
143
- Z0Z_lookup: Dict[str, str] = {
144
- "homepage": "url",
145
- "license": "licenseDASHurl",
146
- "repository": "repository",
147
- }
148
- if metadata.project_urls:
149
- for urlTarget in listProjectURLsTarget:
150
- url = metadata.project_urls.get(urlTarget, None)
151
- if url:
152
- setattr(nexusCitation, Z0Z_lookup[urlTarget], url)
153
-
154
- nexusCitation = nexusCitation.setInStone("PyPA")
155
- return nexusCitation
156
-
157
- def add_pyprojectDOTtoml(nexusCitation: CitationNexus, packageData: Dict[str, Any]) -> CitationNexus:
158
- def Z0Z_ImaNotValidatingNoNames(person: Dict[str, str]) -> Dict[str, str]:
159
- cffPerson: Dict[str, str] = {}
160
- if person.get('name', None):
161
- cffPerson['given-names'], cffPerson['family-names'] = person['name'].split(' ', 1)
162
- if person.get('email', None):
163
- cffPerson['email'] = person['email']
164
- return cffPerson
165
- listAuthors = packageData.get("authors", None)
166
- if not listAuthors:
167
- raise ValueError("Authors are required.")
168
- else:
169
- listPersons = []
170
- for person in listAuthors:
171
- listPersons.append(Z0Z_ImaNotValidatingNoNames(person))
172
- nexusCitation.authors = listPersons
173
- if packageData.get("maintainers", None):
174
- listPersons = []
175
- for person in packageData["maintainers"]:
176
- listPersons.append(Z0Z_ImaNotValidatingNoNames(person))
177
- nexusCitation.contact = listPersons
178
- nexusCitation.title = packageData["name"]
179
- nexusCitation = nexusCitation.setInStone("pyprojectDOTtoml")
180
- return nexusCitation
181
-
182
- def writeCitation(nexusCitation: CitationNexus, pathFilenameCitationSSOT: pathlib.Path, pathFilenameCitationDOTcffRepo: pathlib.Path):
183
- # NOTE embarrassingly hacky process to follow
184
- parameterIndent= 2
185
- parameterLineWidth = 60
186
- yamlWorkhorse = ruamel.yaml.YAML()
187
-
188
- def srsly(Z0Z_filed, Z0Z_value):
189
- if Z0Z_value: # empty lists
190
- return True
191
- else:
192
- return False
193
-
194
- dictionaryCitation = attrs.asdict(nexusCitation, filter=srsly)
195
- for keyName in list(dictionaryCitation.keys()):
196
- dictionaryCitation[keyName.replace("DASH", "-")] = dictionaryCitation.pop(keyName)
197
-
198
- pathFilenameForValidation = pathlib.Path(tempfile.mktemp())
199
-
200
- def writeStream(pathFilename):
201
- with open(pathFilename, 'w') as pathlibIsAStealthContextManagerThatRuamelCannotDetectAndRefusesToWorkWith:
202
- yamlWorkhorse.dump(dictionaryCitation, pathlibIsAStealthContextManagerThatRuamelCannotDetectAndRefusesToWorkWith)
203
-
204
- writeStream(pathFilenameForValidation)
205
-
206
- citationObject: cffconvert.Citation = create_citation(infile=pathFilenameForValidation, url=None)
207
- if citationObject.validate(verbose=True) is None:
208
- writeStream(pathFilenameCitationSSOT)
209
- writeStream(pathFilenameCitationDOTcffRepo)
210
-
211
- def logistics():
212
- # Prefer reliable, dynamic values over hardcoded ones
213
- packageNameHARDCODED: str = 'mapFolding'
214
-
215
- packageName: str = packageNameHARDCODED
216
- pathRepoRoot = pathlib.Path(__file__).parent.parent.parent
217
- pathFilenamePackageSSOT = pathRepoRoot / 'pyproject.toml'
218
- filenameGitHubAction = 'updateCitation.yml'
219
- pathFilenameGitHubAction = pathRepoRoot / '.github' / 'workflows' / filenameGitHubAction
220
-
221
- filenameCitationDOTcff = 'CITATION.cff'
222
- pathCitations = pathRepoRoot / packageName / 'citations'
223
- pathFilenameCitationSSOT = pathCitations / filenameCitationDOTcff
224
- pathFilenameCitationDOTcffRepo = pathRepoRoot / filenameCitationDOTcff
225
-
226
- nexusCitation = getNexusCitation(pathFilenameCitationSSOT)
227
-
228
- tomlPackageData: Dict[str, Any] = tomli.loads(pathFilenamePackageSSOT.read_text())['project']
229
- # https://packaging.python.org/en/latest/specifications/pyproject-toml/
230
- pypaMetadata: PyPAMetadata = getPypaMetadata(tomlPackageData)
231
-
232
- nexusCitation = addPypaMetadata(nexusCitation, pypaMetadata)
233
- nexusCitation = add_pyprojectDOTtoml(nexusCitation, tomlPackageData)
234
-
235
- writeCitation(nexusCitation, pathFilenameCitationSSOT, pathFilenameCitationDOTcffRepo)
236
-
237
- if __name__ == '__main__':
238
- logistics()
@@ -1,25 +0,0 @@
1
- benchmarks/benchmarking.py,sha256=HD_0NSvuabblg94ftDre6LFnXShTe8MYj3hIodW-zV0,3076
2
- citations/updateCitation.py,sha256=2MI8wHzLZg8eg2tAkbtynXPttxg3tTrd5ezEZIVyW48,10956
3
- reference/flattened.py,sha256=6blZ2Y9G8mu1F3gV8SKndPE398t2VVFlsgKlyeJ765A,16538
4
- reference/hunterNumba.py,sha256=HWndRgsajOf76rbb2LDNEZ6itsdYbyV-k3wgOFjeR6c,7104
5
- reference/irvineJavaPort.py,sha256=Sj-63Z-OsGuDoEBXuxyjRrNmmyl0d7Yz_XuY7I47Oyg,4250
6
- reference/jax.py,sha256=rojyK80lOATtbzxjGOHWHZngQa47CXCLJHZwIdN2MwI,14955
7
- reference/lunnan.py,sha256=XEcql_gxvCCghb6Or3qwmPbn4IZUbZTaSmw_fUjRxZE,5037
8
- reference/lunnanNumpy.py,sha256=HqDgSwTOZA-G0oophOEfc4zs25Mv4yw2aoF1v8miOLk,4653
9
- reference/lunnanWhile.py,sha256=7NY2IKO5XBgol0aWWF_Fi-7oTL9pvu_z6lB0TF1uVHk,4063
10
- reference/rotatedEntryPoint.py,sha256=z0QyDQtnMvXNj5ntWzzJUQUMFm1-xHGLVhtYzwmczUI,11530
11
- reference/total_countPlus1vsPlusN.py,sha256=usenM8Yn_G1dqlPl7NKKkcnbohBZVZBXTQRm2S3_EDA,8106
12
- someAssemblyRequired/__init__.py,sha256=3JnAKXfaYPtmxV_4AnZ6KpCosT_0GFV5Nw7K8sz4-Uo,34
13
- someAssemblyRequired/getLLVMforNoReason.py,sha256=FtJzw2pZS3A4NimWdZsegXaU-vKeCw8m67kcfb5wvGM,894
14
- someAssemblyRequired/makeJob.py,sha256=RTC80FhDrR19GqHtEeo6GpmlWZQESuf8FXqBqVzdOpk,1465
15
- someAssemblyRequired/synthesizeModuleJob.py,sha256=n6_2LUO2mf4PUuzN357EJiTATFB1TIAMDMgE5JvkcB0,7426
16
- someAssemblyRequired/synthesizeModules.py,sha256=foxk-mG-HGVap2USiA3ppCyWWXUmkLFzQiKacp5DD9M,11569
17
- syntheticModules/Initialize.py,sha256=Ut6rWt08LQ-kP9y-r25hwJjW82lO2DQfLBijhkKGaUo,4024
18
- syntheticModules/Parallel.py,sha256=Kq1uo5kfeeczk871yxaagsaNz8zaM8GWy0S3hZAEQz4,5343
19
- syntheticModules/Sequential.py,sha256=8KDiuK977s8hkyNw5pCZPNi0RHWDoCxwOteZ8CLuIEM,3643
20
- syntheticModules/__init__.py,sha256=lUDBXOiislfP2sIxT13_GZgElaytoYqk0ODUsucMYew,117
21
- mapFolding-0.3.7.dist-info/METADATA,sha256=BnMb2zDVlBgxU57qquN7iLYDSRHMkGmsL0ovLA_2G7k,7729
22
- mapFolding-0.3.7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
23
- mapFolding-0.3.7.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
24
- mapFolding-0.3.7.dist-info/top_level.txt,sha256=yVG9dNZywoaddcsUdEDg7o0XOBzJd_4Z-sDaXGHpiMY,69
25
- mapFolding-0.3.7.dist-info/RECORD,,
@@ -1,5 +0,0 @@
1
- benchmarks
2
- citations
3
- reference
4
- someAssemblyRequired
5
- syntheticModules