mapFolding 0.15.2__tar.gz → 0.15.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. {mapfolding-0.15.2 → mapfolding-0.15.4}/PKG-INFO +2 -1
  2. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/__init__.py +15 -11
  3. mapfolding-0.15.4/mapFolding/_theSSOT.py +122 -0
  4. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/_theTypes.py +67 -5
  5. mapfolding-0.15.4/mapFolding/algorithms/__init__.py +1 -0
  6. mapfolding-0.15.4/mapFolding/algorithms/matrixMeanders.py +348 -0
  7. mapfolding-0.15.4/mapFolding/algorithms/oeisIDbyFormula.py +113 -0
  8. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/basecamp.py +105 -67
  9. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/oeis.py +40 -54
  10. mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround/matrixMeanders64retired.py +160 -0
  11. mapfolding-0.15.2/mapFolding/_oeisFormulas/matrixMeanders.py → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround/matrixMeandersBaselineV2.py +28 -21
  12. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/A007822rawMaterials.py +1 -1
  13. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/makeAllModules.py +5 -5
  14. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/makeJobTheorem2codon.py +5 -4
  15. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/algorithmA007822.py +1 -1
  16. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/algorithmA007822Numba.py +5 -3
  17. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/dataPacking.py +2 -4
  18. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/dataPackingA007822.py +2 -4
  19. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/initializeStateA007822.py +1 -1
  20. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2A007822.py +1 -1
  21. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2A007822Numba.py +1 -1
  22. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2A007822Trimmed.py +1 -1
  23. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/conftest.py +30 -10
  24. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/test_computations.py +75 -46
  25. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/test_oeis.py +2 -20
  26. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/PKG-INFO +2 -1
  27. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/SOURCES.txt +17 -27
  28. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/entry_points.txt +0 -1
  29. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/requires.txt +3 -0
  30. {mapfolding-0.15.2 → mapfolding-0.15.4}/pyproject.toml +4 -3
  31. mapfolding-0.15.2/mapFolding/_oeisFormulas/A000136.py +0 -4
  32. mapfolding-0.15.2/mapFolding/_oeisFormulas/A000560.py +0 -4
  33. mapfolding-0.15.2/mapFolding/_oeisFormulas/A000682.py +0 -17
  34. mapfolding-0.15.2/mapFolding/_oeisFormulas/A001010.py +0 -19
  35. mapfolding-0.15.2/mapFolding/_oeisFormulas/A001011.py +0 -5
  36. mapfolding-0.15.2/mapFolding/_oeisFormulas/A005315.py +0 -4
  37. mapfolding-0.15.2/mapFolding/_oeisFormulas/A005316.py +0 -10
  38. mapfolding-0.15.2/mapFolding/_oeisFormulas/A223094.py +0 -7
  39. mapfolding-0.15.2/mapFolding/_oeisFormulas/A259702.py +0 -4
  40. mapfolding-0.15.2/mapFolding/_oeisFormulas/A301620.py +0 -6
  41. mapfolding-0.15.2/mapFolding/_oeisFormulas/Z0Z_aOFn.py +0 -33
  42. mapfolding-0.15.2/mapFolding/_oeisFormulas/Z0Z_oeisMeanders.py +0 -52
  43. mapfolding-0.15.2/mapFolding/_oeisFormulas/__init__.py +0 -1
  44. mapfolding-0.15.2/mapFolding/_oeisFormulas/matrixMeandersAnnex.py +0 -84
  45. mapfolding-0.15.2/mapFolding/_theSSOT.py +0 -129
  46. {mapfolding-0.15.2 → mapfolding-0.15.4}/LICENSE +0 -0
  47. {mapfolding-0.15.2 → mapfolding-0.15.4}/README.md +0 -0
  48. {mapfolding-0.15.2/mapFolding → mapfolding-0.15.4/mapFolding/algorithms}/daoOfMapFolding.py +0 -0
  49. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/beDRY.py +0 -0
  50. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/dataBaskets.py +0 -0
  51. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/filesystemToolkit.py +0 -0
  52. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/py.typed +0 -0
  53. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/__init__.py +0 -0
  54. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/flattened.py +0 -0
  55. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/hunterNumba.py +0 -0
  56. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/irvineJavaPort.py +0 -0
  57. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/jaxCount.py +0 -0
  58. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/jobsCompleted/[2x19]/p2x19.py +0 -0
  59. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/jobsCompleted/__init__.py +0 -0
  60. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/jobsCompleted/p2x19/p2x19.py +0 -0
  61. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/lunnonNumpy.py +0 -0
  62. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/lunnonWhile.py +0 -0
  63. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316JavaPort.py +0 -0
  64. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316imperative.py +0 -0
  65. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316intOptimized.py +0 -0
  66. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316optimized128bit.py +0 -0
  67. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316primitiveOptimized.py +0 -0
  68. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316redis.py +0 -0
  69. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/A005316write2disk.py +0 -0
  70. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/matrixMeandersBaseline.py +0 -0
  71. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/matrixMeandersBaselineAnnex.py +0 -0
  72. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/matrixMeandersSimpleQueue.py +0 -0
  73. {mapfolding-0.15.2/mapFolding/reference → mapfolding-0.15.4/mapFolding/reference/meandersDumpingGround}/matrixMeandersSlicePop.py +0 -0
  74. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/rotatedEntryPoint.py +0 -0
  75. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/reference/total_countPlus1vsPlusN.py +0 -0
  76. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/RecipeJob.py +0 -0
  77. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/__init__.py +0 -0
  78. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/_toolIfThis.py +0 -0
  79. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/_toolkitContainers.py +0 -0
  80. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/getLLVMforNoReason.py +0 -0
  81. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/infoBooth.py +0 -0
  82. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +0 -0
  83. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/toolkitNumba.py +0 -0
  84. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/someAssemblyRequired/transformationTools.py +0 -0
  85. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/__init__.py +0 -0
  86. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/countParallelNumba.py +0 -0
  87. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/daoOfMapFoldingNumba.py +0 -0
  88. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/initializeState.py +0 -0
  89. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2.py +0 -0
  90. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2Numba.py +0 -0
  91. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/syntheticModules/theorem2Trimmed.py +0 -0
  92. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/__init__.py +0 -0
  93. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/test_filesystem.py +0 -0
  94. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/test_other.py +0 -0
  95. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding/tests/test_tasks.py +0 -0
  96. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/dependency_links.txt +0 -0
  97. {mapfolding-0.15.2 → mapfolding-0.15.4}/mapFolding.egg-info/top_level.txt +0 -0
  98. {mapfolding-0.15.2 → mapfolding-0.15.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mapFolding
3
- Version: 0.15.2
3
+ Version: 0.15.4
4
4
  Summary: Map folding, meanders, stamp folding, semi-meanders. Experiment with algorithm transformations and code optimization.
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -39,6 +39,7 @@ Requires-Dist: hunterMakesPy
39
39
  Requires-Dist: numpy
40
40
  Requires-Dist: platformdirs
41
41
  Provides-Extra: development
42
+ Requires-Dist: memray; sys_platform == "linux" and extra == "development"
42
43
  Requires-Dist: mypy; extra == "development"
43
44
  Requires-Dist: pyupgrade; extra == "development"
44
45
  Requires-Dist: py-spy; extra == "development"
@@ -15,17 +15,17 @@ The computational framework integrates type safety, persistent result storage,
15
15
  and mathematical validation through OEIS sequence integration.
16
16
 
17
17
  Core Transformation Tools:
18
- countFolds: Primary interface for computing folding pattern counts
19
- MapFoldingState: Computational state management for recursive analysis
20
- Connection graph generation: Mathematical foundation for folding relationships
21
- Task division utilities: Experimental parallel computation options
22
- OEIS integration: Mathematical validation and sequence discovery
18
+ countFolds: Primary interface for computing folding pattern counts
19
+ MapFoldingState: Computational state management for recursive analysis
20
+ Connection graph generation: Mathematical foundation for folding relationships
21
+ Task division utilities: Experimental parallel computation options
22
+ OEIS integration: Mathematical validation and sequence discovery
23
23
 
24
24
  Primary Use Cases:
25
- Mathematical research into folding pattern properties and relationships
26
- Educational exploration of combinatorial mathematics concepts
27
- Computational validation of theoretical results
28
- Extension of known mathematical sequences through new discoveries
25
+ Mathematical research into folding pattern properties and relationships
26
+ Educational exploration of combinatorial mathematics concepts
27
+ Computational validation of theoretical results
28
+ Extension of known mathematical sequences through new discoveries
29
29
 
30
30
  The package handles the full spectrum of map folding analysis, from simple
31
31
  educational examples to research-grade computations requiring multi-day processing
@@ -50,6 +50,10 @@ from mapFolding._theTypes import (
50
50
  DatatypeElephino as DatatypeElephino,
51
51
  DatatypeFoldsTotal as DatatypeFoldsTotal,
52
52
  DatatypeLeavesTotal as DatatypeLeavesTotal,
53
+ MetadataOEISidMapFolding as MetadataOEISidMapFolding,
54
+ MetadataOEISidMapFoldingManuallySet as MetadataOEISidMapFoldingManuallySet,
55
+ MetadataOEISidMeanders as MetadataOEISidMeanders,
56
+ MetadataOEISidMeandersManuallySet as MetadataOEISidMeandersManuallySet,
53
57
  NumPyElephino as NumPyElephino,
54
58
  NumPyFoldsTotal as NumPyFoldsTotal,
55
59
  NumPyIntegerType as NumPyIntegerType,
@@ -80,10 +84,10 @@ from mapFolding.filesystemToolkit import (
80
84
  from mapFolding.basecamp import countFolds as countFolds
81
85
 
82
86
  from mapFolding.oeis import (
83
- clearOEIScache as clearOEIScache,
87
+ dictionaryOEISMapFolding as dictionaryOEISMapFolding,
88
+ dictionaryOEISMeanders as dictionaryOEISMeanders,
84
89
  getFoldsTotalKnown as getFoldsTotalKnown,
85
90
  getOEISids as getOEISids,
86
91
  OEIS_for_n as OEIS_for_n,
87
92
  oeisIDfor_n as oeisIDfor_n,
88
- dictionaryOEIS as dictionaryOEIS,
89
93
  )
@@ -0,0 +1,122 @@
1
+ """Access and configure package settings and metadata."""
2
+
3
+ from hunterMakesPy import PackageSettings
4
+ from mapFolding._theTypes import MetadataOEISidMapFoldingManuallySet, MetadataOEISidMeandersManuallySet
5
+ from pathlib import Path
6
+ import dataclasses
7
+ import random
8
+
9
+ @dataclasses.dataclass
10
+ class mapFoldingPackageSettings(PackageSettings):
11
+ """Widely used settings that are especially useful for map folding algorithms.
12
+
13
+ Attributes
14
+ ----------
15
+ identifierPackageFALLBACK : str = ''
16
+ Fallback package identifier used only during initialization when automatic discovery fails.
17
+ pathPackage : Path = Path()
18
+ Absolute path to the installed package directory. Automatically resolved from `identifierPackage` if not provided.
19
+ identifierPackage : str = ''
20
+ Canonical name of the package. Automatically extracted from `pyproject.toml`.
21
+ fileExtension : str = '.py'
22
+ Default file extension.
23
+
24
+ cacheDays : int = 30
25
+ Number of days to retain cached OEIS data before refreshing from the online source.
26
+ concurrencyPackage : str = 'multiprocessing'
27
+ Package identifier for concurrent execution operations.
28
+ OEISidMapFoldingManuallySet : dict[str, MetadataOEISidMapFoldingManuallySet]
29
+ Settings that are best selected by a human instead of algorithmically.
30
+ OEISidMeandersManuallySet : dict[str, MetadataOEISidMeandersManuallySet]
31
+ Settings that are best selected by a human instead of algorithmically for meander sequences.
32
+ """
33
+
34
+ OEISidMapFoldingManuallySet: dict[str, MetadataOEISidMapFoldingManuallySet] = dataclasses.field(default_factory=dict[str, MetadataOEISidMapFoldingManuallySet])
35
+ """Settings that are best selected by a human instead of algorithmically."""
36
+
37
+ OEISidMeandersManuallySet: dict[str, MetadataOEISidMeandersManuallySet] = dataclasses.field(default_factory=dict[str, MetadataOEISidMeandersManuallySet])
38
+ """Settings that are best selected by a human instead of algorithmically for meander sequences."""
39
+
40
+ cacheDays: int = 30
41
+ """Number of days to retain cached OEIS data before refreshing from the online source."""
42
+
43
+ concurrencyPackage: str = 'multiprocessing'
44
+ """Package identifier for concurrent execution operations."""
45
+
46
+ # TODO I made a `TypedDict` before I knew how to make dataclasses and classes. Think about other data structures.
47
+ OEISidMapFoldingManuallySet: dict[str, MetadataOEISidMapFoldingManuallySet] = {
48
+ 'A000136': {
49
+ 'getMapShape': lambda n: (1, n),
50
+ 'valuesBenchmark': [14],
51
+ 'valuesTestParallelization': [*range(3, 7)],
52
+ 'valuesTestValidation': [random.randint(2, 9)], # noqa: S311
53
+ },
54
+ 'A001415': {
55
+ 'getMapShape': lambda n: (2, n),
56
+ 'valuesBenchmark': [14],
57
+ 'valuesTestParallelization': [*range(3, 7)],
58
+ 'valuesTestValidation': [random.randint(2, 9)], # noqa: S311
59
+ },
60
+ 'A001416': {
61
+ 'getMapShape': lambda n: (3, n),
62
+ 'valuesBenchmark': [9],
63
+ 'valuesTestParallelization': [*range(3, 5)],
64
+ 'valuesTestValidation': [random.randint(2, 6)], # noqa: S311
65
+ },
66
+ 'A001417': {
67
+ 'getMapShape': lambda n: tuple(2 for _dimension in range(n)),
68
+ 'valuesBenchmark': [6],
69
+ 'valuesTestParallelization': [*range(2, 4)],
70
+ 'valuesTestValidation': [random.randint(2, 4)], # noqa: S311
71
+ },
72
+ 'A195646': {
73
+ 'getMapShape': lambda n: tuple(3 for _dimension in range(n)),
74
+ 'valuesBenchmark': [3],
75
+ 'valuesTestParallelization': [*range(2, 3)],
76
+ 'valuesTestValidation': [2],
77
+ },
78
+ 'A001418': {
79
+ 'getMapShape': lambda n: (n, n),
80
+ 'valuesBenchmark': [5],
81
+ 'valuesTestParallelization': [*range(2, 4)],
82
+ 'valuesTestValidation': [random.randint(2, 4)], # noqa: S311
83
+ },
84
+ 'A007822': {
85
+ 'getMapShape': lambda n: (1, 2 * n),
86
+ 'valuesBenchmark': [7],
87
+ 'valuesTestParallelization': [*range(2, 4)],
88
+ 'valuesTestValidation': [random.randint(2, 8)], # noqa: S311
89
+ },
90
+ }
91
+
92
+ identifierPackageFALLBACK = "mapFolding"
93
+ """Manually entered package name used as fallback when dynamic resolution fails."""
94
+
95
+ packageSettings = mapFoldingPackageSettings(identifierPackageFALLBACK=identifierPackageFALLBACK, OEISidMapFoldingManuallySet=OEISidMapFoldingManuallySet)
96
+ """Global package settings."""
97
+
98
+ # TODO integrate into packageSettings
99
+ pathCache: Path = packageSettings.pathPackage / ".cache"
100
+ """Local directory path for storing cached OEIS sequence data and metadata."""
101
+ OEISidMeandersManuallySet: dict[str, MetadataOEISidMeandersManuallySet] = {
102
+ 'A000560': {'valuesTestValidation': [*range(3, 12)]},
103
+ 'A000682': {'valuesTestValidation': [*range(3, 12)]},
104
+ 'A001010': {'valuesTestValidation': [*range(3, 11)]},
105
+ 'A001011': {'valuesTestValidation': [*range(3, 7)]},
106
+ 'A005315': {'valuesTestValidation': [*range(3, 9)]},
107
+ 'A005316': {'valuesTestValidation': [*range(3, 13)]},
108
+ 'A060206': {'valuesTestValidation': [*range(3, 9)]},
109
+ 'A077460': {'valuesTestValidation': [*range(3, 8)]},
110
+ 'A078591': {'valuesTestValidation': [*range(3, 10)]},
111
+ 'A223094': {'valuesTestValidation': [*range(3, 11)]},
112
+ 'A259702': {'valuesTestValidation': [*range(3, 13)]},
113
+ 'A301620': {'valuesTestValidation': [*range(3, 11)]},
114
+ }
115
+
116
+ # Recreate packageSettings with meanders settings included
117
+ packageSettings = mapFoldingPackageSettings(
118
+ identifierPackageFALLBACK=identifierPackageFALLBACK,
119
+ OEISidMapFoldingManuallySet=OEISidMapFoldingManuallySet,
120
+ OEISidMeandersManuallySet=OEISidMeandersManuallySet,
121
+ )
122
+ """Global package settings."""
@@ -1,7 +1,8 @@
1
1
  """Types for defensive coding and for computation optimization."""
2
2
 
3
+ from collections.abc import Callable
3
4
  from numpy import dtype, int_ as numpy_int, integer, ndarray, uint64 as numpy_uint64
4
- from typing import Any, TypeAlias, TypeVar
5
+ from typing import Any, TypeAlias, TypedDict, TypeVar
5
6
 
6
7
  NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
7
8
  """Any NumPy integer type, which is usually between 8-bit signed and 64-bit unsigned."""
@@ -19,9 +20,9 @@ Note well
19
20
  ---------
20
21
  Colossal values are found with the cross humpy inequality:
21
22
 
22
- ⎡ el ⎤ ⎡ ⎤
23
- ⎢ eph ⎥ X ⎢ rhi ⎥ <= elephino
24
- ⎣ ant ⎦ ⎣ no ⎦
23
+ ⎡ el ⎤ ⎡ ⎤
24
+ ⎢ eph ⎥ X ⎢ rhi ⎥ <= elephino
25
+ ⎣ ant ⎦ ⎣ no ⎦
25
26
 
26
27
  """
27
28
 
@@ -36,7 +37,7 @@ NumPyFoldsTotal: TypeAlias = numpy_uint64 # noqa: UP040 The TypeAlias may be us
36
37
 
37
38
  Note well
38
39
  ---------
39
- If your elements might exceed 1.8 x 10^19, then you should take extra steps to ensure the integrity of the data in NumPy or use a
40
+ If your element values might exceed 1.8 x 10^19, then you should take extra steps to ensure the integrity of the data in NumPy or use a
40
41
  different data structure."""
41
42
 
42
43
  Array3DLeavesTotal: TypeAlias = ndarray[tuple[int, int, int], dtype[NumPyLeavesTotal]] # noqa: UP040 The TypeAlias may be used to construct ("cast") a value to the type. And the identifier may be changed to a different type.
@@ -50,3 +51,64 @@ Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]] # noqa:
50
51
 
51
52
  Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]] # noqa: UP040 The TypeAlias may be used to construct ("cast") a value to the type. And the identifier may be changed to a different type.
52
53
  """A `numpy.ndarray` with one axis and elements of type `NumPyFoldsTotal`."""
54
+
55
+ class MetadataOEISidMapFoldingManuallySet(TypedDict):
56
+ """Settings that are best selected by a human instead of algorithmically."""
57
+
58
+ getMapShape: Callable[[int], tuple[int, ...]]
59
+ """Function to convert the OEIS sequence index, 'n', to its `mapShape` tuple."""
60
+ valuesBenchmark: list[int]
61
+ """List of index values, 'n', to use when benchmarking the algorithm performance."""
62
+ valuesTestParallelization: list[int]
63
+ """List of index values, 'n', to use when testing parallelization performance."""
64
+ valuesTestValidation: list[int]
65
+ """List of index values, 'n', to use when testing validation performance."""
66
+
67
+ class MetadataOEISidMapFolding(TypedDict):
68
+ """Settings for an implemented OEIS sequence."""
69
+
70
+ description: str
71
+ """The OEIS.org description of the integer sequence."""
72
+ getMapShape: Callable[[int], tuple[int, ...]]
73
+ """Function to convert the OEIS sequence index, 'n', to its `mapShape` tuple."""
74
+ offset: int
75
+ """The starting index, 'n', of the sequence, typically 0 or 1."""
76
+ valuesBenchmark: list[int]
77
+ """List of index values, 'n', to use when benchmarking the algorithm performance."""
78
+ valuesKnown: dict[int, int]
79
+ """Dictionary of sequence indices, 'n', to their known values, `foldsTotal`."""
80
+ valuesTestParallelization: list[int]
81
+ """List of index values, 'n', to use when testing parallelization performance."""
82
+ valuesTestValidation: list[int]
83
+ """List of index values, 'n', to use when testing validation performance."""
84
+ valueUnknown: int
85
+ """The smallest value of 'n' for for which `foldsTotal` is unknown."""
86
+
87
+ # ruff: noqa: ERA001
88
+ class MetadataOEISidMeandersManuallySet(TypedDict):
89
+ """Settings that are best selected by a human instead of algorithmically."""
90
+
91
+ # valuesBenchmark: list[int]
92
+ """List of index values, 'n', to use when benchmarking the algorithm performance."""
93
+ # valuesTestParallelization: list[int]
94
+ """List of index values, 'n', to use when testing parallelization performance."""
95
+ valuesTestValidation: list[int]
96
+ """List of index values, 'n', to use when testing validation performance."""
97
+
98
+ class MetadataOEISidMeanders(TypedDict):
99
+ """Settings for an implemented OEIS sequence."""
100
+
101
+ description: str
102
+ """The OEIS.org description of the integer sequence."""
103
+ offset: int
104
+ """The starting index, 'n', of the sequence, typically 0 or 1."""
105
+ # valuesBenchmark: list[int]
106
+ """List of index values, 'n', to use when benchmarking the algorithm performance."""
107
+ valuesKnown: dict[int, int]
108
+ """Dictionary of sequence indices, 'n', to their known values, `foldsTotal`."""
109
+ # valuesTestParallelization: list[int]
110
+ """List of index values, 'n', to use when testing parallelization performance."""
111
+ valuesTestValidation: list[int]
112
+ """List of index values, 'n', to use when testing validation performance."""
113
+ valueUnknown: int
114
+ """The smallest value of 'n' for for which `foldsTotal` is unknown."""
@@ -0,0 +1 @@
1
+ """Hand-made algorithms."""
@@ -0,0 +1,348 @@
1
+ # ruff: noqa: D100 D103
2
+ from functools import cache
3
+ from gc import collect as goByeBye, set_threshold
4
+ from typing import Any, Literal
5
+ import gc
6
+ import numpy
7
+
8
+ # DEVELOPMENT INSTRUCTIONS FOR THIS MODULE
9
+ #
10
+ # Avoid early-return guard clauses, short-circuit returns, and multiple exit points. This codebase enforces a
11
+ # single-return-per-function pattern with stable shapes/dtypes due to AST transforms. An empty input is a problem, so allow it to
12
+ # fail early.
13
+ #
14
+ # If an algorithm has potential for infinite loops, fix the root cause: do NOT add artificial safety limits (e.g., maxIterations
15
+ # counters) to prevent infinite loops.
16
+ #
17
+ # Always use semantic column, index, or slice identifiers: Never hardcode the locations.
18
+
19
+ # TODO `set_threshold`: I know 0 means disabled, but I don't even understand if 1 means "as frequently as possible" or "almost never".
20
+ set_threshold(1, 1, 1)
21
+ Z0Z_bit_lengthSafetyLimit: int = 61
22
+
23
+ type DataArray1D = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64 | numpy.signedinteger[Any]]]
24
+ type DataArray2columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
25
+ type DataArray3columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
26
+ type SelectorBoolean = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.bool_]]
27
+ type SelectorIndices = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.intp]]
28
+
29
+ # NOTE This code blocks enables semantic references to your data.
30
+ columnsArrayCurveGroups = columnsArrayTotal = 3
31
+ columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1 # Something _feels_ right about this instead of `= -1`.
32
+ columnDistinctCrossings = columnΩ = columnΩ + 1
33
+ columnGroupAlpha = columnΩ = columnΩ + 1
34
+ columnGroupZulu = columnΩ = columnΩ + 1
35
+ if columnΩ != columnsArrayTotal - 1:
36
+ message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
37
+ raise ValueError(message)
38
+ del columnsArrayTotal, columnΩ
39
+
40
+ columnsArrayCurveLocations = columnsArrayTotal = 2
41
+ columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1
42
+ columnDistinctCrossings = columnΩ = columnΩ + 1
43
+ columnCurveLocations = columnΩ = columnΩ + 1
44
+ if columnΩ != columnsArrayTotal - 1:
45
+ message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
46
+ raise ValueError(message)
47
+ del columnsArrayTotal, columnΩ
48
+
49
+ groupAlphaLocator: int = 0x55555555555555555555555555555555
50
+ groupAlphaLocator64: int = 0x5555555555555555
51
+ groupZuluLocator: int = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
52
+ groupZuluLocator64: int = 0xaaaaaaaaaaaaaaaa
53
+
54
+ def convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations: dict[int, int]) -> dict[tuple[int, int], int]:
55
+ return {(curveLocations & groupAlphaLocator, (curveLocations & groupZuluLocator) >> 1): distinctCrossings
56
+ for curveLocations, distinctCrossings in dictionaryCurveLocations.items()}
57
+
58
+ def count(bridges: int, dictionaryCurveGroups: dict[tuple[int, int], int], bridgesMinimum: int = 0) -> tuple[int, dict[tuple[int, int], int]]:
59
+
60
+ dictionaryCurveLocations: dict[int, int] = {}
61
+ while bridges > bridgesMinimum:
62
+ bridges -= 1
63
+
64
+ curveLocationsMAXIMUM: int = 1 << (2 * bridges + 4)
65
+
66
+ for (groupAlpha, groupZulu), distinctCrossings in dictionaryCurveGroups.items():
67
+ groupAlphaCurves = groupAlpha != 1
68
+ groupZuluCurves = groupZulu != 1
69
+
70
+ # bridgesSimple
71
+ curveLocationAnalysis = ((groupAlpha | (groupZulu << 1)) << 2) | 3
72
+ if curveLocationAnalysis < curveLocationsMAXIMUM:
73
+ dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
74
+
75
+ if groupAlphaCurves:
76
+ curveLocationAnalysis = (groupAlpha >> 2) | (groupZulu << 3) | ((groupAlphaIsEven := 1 - (groupAlpha & 0b1)) << 1)
77
+ if curveLocationAnalysis < curveLocationsMAXIMUM:
78
+ dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
79
+
80
+ if groupZuluCurves:
81
+ curveLocationAnalysis = (groupZulu >> 1) | (groupAlpha << 2) | (groupZuluIsEven := 1 - (groupZulu & 1))
82
+ if curveLocationAnalysis < curveLocationsMAXIMUM:
83
+ dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
84
+
85
+ # bridgesAligned
86
+ if groupZuluCurves and groupAlphaCurves:
87
+ # One Truth-check to select a code path
88
+ groupsCanBePairedTogether = (groupZuluIsEven << 1) | groupAlphaIsEven # pyright: ignore[reportPossiblyUnboundVariable]
89
+
90
+ if groupsCanBePairedTogether != 0: # Case 0 (False, False)
91
+ XOrHere2makePair = 0b1
92
+ findUnpaired_0b1 = 0
93
+
94
+ if groupsCanBePairedTogether == 1: # Case 1: (False, True)
95
+ while findUnpaired_0b1 >= 0:
96
+ XOrHere2makePair <<= 2
97
+ findUnpaired_0b1 += 1 if (groupAlpha & XOrHere2makePair) == 0 else -1
98
+ groupAlpha ^= XOrHere2makePair # noqa: PLW2901
99
+ elif groupsCanBePairedTogether == 2: # Case 2: (True, False)
100
+ while findUnpaired_0b1 >= 0:
101
+ XOrHere2makePair <<= 2
102
+ findUnpaired_0b1 += 1 if (groupZulu & XOrHere2makePair) == 0 else -1
103
+ groupZulu ^= XOrHere2makePair # noqa: PLW2901
104
+
105
+ # Cases 1, 2, and 3 all compute curveLocationAnalysis
106
+ curveLocationAnalysis = ((groupZulu >> 2) << 1) | (groupAlpha >> 2)
107
+ if curveLocationAnalysis < curveLocationsMAXIMUM:
108
+ dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
109
+
110
+ dictionaryCurveGroups = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
111
+ dictionaryCurveLocations = {}
112
+
113
+ return (bridges, dictionaryCurveGroups)
114
+
115
+ @cache
116
+ def walkDyckPath(intWithExtra_0b1: int) -> int:
117
+ findTheExtra_0b1: int = 0
118
+ flipExtra_0b1_Here: int = 1
119
+ while True:
120
+ flipExtra_0b1_Here <<= 2
121
+ if (intWithExtra_0b1 & flipExtra_0b1_Here) == 0:
122
+ findTheExtra_0b1 += 1
123
+ else:
124
+ findTheExtra_0b1 -= 1
125
+ if findTheExtra_0b1 < 0:
126
+ break
127
+ return flipExtra_0b1_Here
128
+
129
+ @cache
130
+ def _flipTheExtra_0b1(avoidingLookupsInPerRowLoop: int) -> numpy.uint64:
131
+ """Be a docstring."""
132
+ return numpy.uint64(avoidingLookupsInPerRowLoop ^ walkDyckPath(avoidingLookupsInPerRowLoop))
133
+
134
+ # TODO there is a better way to do this.
135
+ flipTheExtra_0b1 = numpy.vectorize(_flipTheExtra_0b1, otypes=[numpy.uint64])
136
+ """The vectorize function is provided primarily for convenience, not for performance. The implementation is essentially a for loop."""
137
+
138
+ def aggregateCurveLocations(arrayCurveLocations: DataArray2columns) -> DataArray3columns:
139
+ arrayCurveGroups: DataArray3columns = numpy.tile(
140
+ A=numpy.unique(arrayCurveLocations[:, columnCurveLocations])
141
+ , reps=(columnsArrayCurveGroups, 1)
142
+ ).T
143
+ arrayCurveGroups[:, columnDistinctCrossings] = 0
144
+ numpy.add.at(
145
+ arrayCurveGroups[:, columnDistinctCrossings]
146
+ , numpy.searchsorted(
147
+ a=arrayCurveGroups[:, columnCurveLocations]
148
+ , v=arrayCurveLocations[:, columnCurveLocations])
149
+ , arrayCurveLocations[:, columnDistinctCrossings]
150
+ )
151
+ # I'm computing groupZulu from curveLocations that are physically in `arrayCurveGroups`, so I'm using `columnCurveLocations`.
152
+ numpy.bitwise_and(arrayCurveGroups[:, columnCurveLocations], numpy.uint64(groupZuluLocator64), out=arrayCurveGroups[:, columnGroupZulu])
153
+ numpy.right_shift(arrayCurveGroups[:, columnGroupZulu], 1, out=arrayCurveGroups[:, columnGroupZulu])
154
+ # NOTE Do not alphabetize these operations. This column has curveLocations data that groupZulu needs.
155
+ arrayCurveGroups[:, columnGroupAlpha] &= groupAlphaLocator64
156
+ return arrayCurveGroups
157
+
158
+ def convertDictionaryCurveGroups2array(dictionaryCurveGroups: dict[tuple[int, int], int]) -> DataArray3columns:
159
+ arrayCurveGroups: DataArray3columns = numpy.tile(numpy.fromiter(dictionaryCurveGroups.values(), dtype=numpy.uint64), (columnsArrayCurveGroups, 1)).T
160
+ arrayKeys: DataArray2columns = numpy.array(list(dictionaryCurveGroups.keys()), dtype=numpy.uint64)
161
+ arrayCurveGroups[:, columnGroupAlpha] = arrayKeys[:, 0]
162
+ arrayCurveGroups[:, columnGroupZulu] = arrayKeys[:, 1]
163
+ return arrayCurveGroups
164
+
165
+ def count64(bridges: int, arrayCurveGroups: DataArray3columns, bridgesMinimum: int = 0) -> tuple[int, DataArray3columns]:
166
+
167
+ while bridges > bridgesMinimum and int(arrayCurveGroups[:, columnDistinctCrossings].max()).bit_length() < Z0Z_bit_lengthSafetyLimit:
168
+ bridges -= 1
169
+ curveLocationsMAXIMUM: numpy.uint64 = numpy.uint64(1 << (2 * bridges + 4))
170
+
171
+ selectGroupAlphaCurves: SelectorBoolean = arrayCurveGroups[:, columnGroupAlpha] > numpy.uint64(1)
172
+ curveLocationsGroupAlpha: DataArray1D = ((arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] >> 2)
173
+ | (arrayCurveGroups[selectGroupAlphaCurves, columnGroupZulu] << 3)
174
+ | ((numpy.uint64(1) - (arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] & 1)) << 1)
175
+ )
176
+ selectGroupAlphaCurvesLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectGroupAlphaCurves)[numpy.flatnonzero(curveLocationsGroupAlpha < curveLocationsMAXIMUM)]
177
+
178
+ selectGroupZuluCurves: SelectorBoolean = arrayCurveGroups[:, columnGroupZulu] > numpy.uint64(1)
179
+ curveLocationsGroupZulu: DataArray1D = (arrayCurveGroups[selectGroupZuluCurves, columnGroupZulu] >> 1
180
+ | arrayCurveGroups[selectGroupZuluCurves, columnGroupAlpha] << 2
181
+ | (numpy.uint64(1) - (arrayCurveGroups[selectGroupZuluCurves, columnGroupZulu] & 1))
182
+ )
183
+ selectGroupZuluCurvesLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectGroupZuluCurves)[numpy.flatnonzero(curveLocationsGroupZulu < curveLocationsMAXIMUM)]
184
+
185
+ selectBridgesSimpleLessThanMaximum: SelectorIndices = numpy.flatnonzero(
186
+ ((arrayCurveGroups[:, columnGroupAlpha] << 2) | (arrayCurveGroups[:, columnGroupZulu] << 3) | 3) < curveLocationsMAXIMUM
187
+ ) # Computation, but including `< curveLocationsMAXIMUM` is ~2% of total time.
188
+
189
+ # Selectors for bridgesAligned -------------------------------------------------
190
+ selectGroupAlphaAtEven: SelectorBoolean = (arrayCurveGroups[:, columnGroupAlpha] & 1) == numpy.uint64(0)
191
+ selectGroupZuluAtEven: SelectorBoolean = (arrayCurveGroups[:, columnGroupZulu] & 1) == numpy.uint64(0)
192
+ selectBridgesAligned: SelectorBoolean = selectGroupAlphaCurves & selectGroupZuluCurves & (selectGroupAlphaAtEven | selectGroupZuluAtEven)
193
+
194
+ SliceΩ: slice[int, int, Literal[1]] = slice(0,0)
195
+ sliceAllocateGroupAlpha = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectGroupAlphaCurvesLessThanMaximum.size)
196
+ sliceAllocateGroupZulu = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectGroupZuluCurvesLessThanMaximum.size)
197
+ sliceAllocateBridgesSimple = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectBridgesSimpleLessThanMaximum.size)
198
+ sliceAllocateBridgesAligned = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectBridgesAligned.size)
199
+
200
+ arrayCurveLocations: DataArray2columns = numpy.zeros((SliceΩ.stop, columnsArrayCurveLocations), dtype=arrayCurveGroups.dtype)
201
+
202
+ arrayCurveLocations[sliceAllocateGroupAlpha, columnCurveLocations] = curveLocationsGroupAlpha[numpy.flatnonzero(curveLocationsGroupAlpha < curveLocationsMAXIMUM)]
203
+ arrayCurveLocations[sliceAllocateGroupAlpha, columnDistinctCrossings] = arrayCurveGroups[selectGroupAlphaCurvesLessThanMaximum, columnDistinctCrossings]
204
+
205
+ arrayCurveLocations[sliceAllocateGroupZulu, columnCurveLocations] = curveLocationsGroupZulu[numpy.flatnonzero(curveLocationsGroupZulu < curveLocationsMAXIMUM)]
206
+ arrayCurveLocations[sliceAllocateGroupZulu, columnDistinctCrossings] = arrayCurveGroups[selectGroupZuluCurvesLessThanMaximum, columnDistinctCrossings]
207
+
208
+ # TODO Uh, it sure looks like I am doing this computation twice. Computation (without assignment) ~ 1.5% of total time.
209
+ arrayCurveLocations[sliceAllocateBridgesSimple, columnCurveLocations] = (
210
+ (arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupAlpha] << 2)
211
+ | (arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupZulu] << 3)
212
+ | 3
213
+ )
214
+ arrayCurveLocations[sliceAllocateBridgesSimple, columnDistinctCrossings] = arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnDistinctCrossings]
215
+
216
+ curveLocationsGroupAlpha = None; del curveLocationsGroupAlpha # pyright: ignore[reportAssignmentType] # noqa: E702
217
+ curveLocationsGroupZulu = None; del curveLocationsGroupZulu # pyright: ignore[reportAssignmentType] # noqa: E702
218
+ selectBridgesSimpleLessThanMaximum = None; del selectBridgesSimpleLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
219
+ selectGroupAlphaCurvesLessThanMaximum = None; del selectGroupAlphaCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
220
+ selectGroupZuluCurvesLessThanMaximum = None; del selectGroupZuluCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
221
+ goByeBye()
222
+
223
+ # NOTE this MODIFIES `arrayCurveGroups` for bridgesPairedToOdd ---------------------------------------------------------------------------------------
224
+ selectBridgesGroupAlphaPairedToOdd: SelectorIndices = numpy.flatnonzero(selectBridgesAligned & selectGroupAlphaAtEven & (~selectGroupZuluAtEven))
225
+ arrayCurveGroups[selectBridgesGroupAlphaPairedToOdd, columnGroupAlpha] = flipTheExtra_0b1(
226
+ arrayCurveGroups[selectBridgesGroupAlphaPairedToOdd, columnGroupAlpha]
227
+ )
228
+
229
+ selectBridgesGroupZuluPairedToOdd: SelectorIndices = numpy.flatnonzero(selectBridgesAligned & (~selectGroupAlphaAtEven) & selectGroupZuluAtEven)
230
+ arrayCurveGroups[selectBridgesGroupZuluPairedToOdd, columnGroupZulu] = flipTheExtra_0b1(
231
+ arrayCurveGroups[selectBridgesGroupZuluPairedToOdd, columnGroupZulu]
232
+ )
233
+
234
+ selectBridgesGroupAlphaPairedToOdd = None; del selectBridgesGroupAlphaPairedToOdd # pyright: ignore[reportAssignmentType] # noqa: E702
235
+ selectBridgesGroupZuluPairedToOdd = None; del selectBridgesGroupZuluPairedToOdd # pyright: ignore[reportAssignmentType] # noqa: E702
236
+ selectGroupAlphaAtEven = None; del selectGroupAlphaAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
237
+ selectGroupAlphaCurves = None; del selectGroupAlphaCurves # pyright: ignore[reportAssignmentType] # noqa: E702
238
+ selectGroupZuluAtEven = None; del selectGroupZuluAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
239
+ selectGroupZuluCurves = None; del selectGroupZuluCurves # pyright: ignore[reportAssignmentType] # noqa: E702
240
+ goByeBye()
241
+
242
+ # bridgesAligned; bridgesAlignedAtEven, bridgesGroupAlphaPairedToOdd, bridgesGroupZuluPairedToOdd ------------------------------------------------------------------
243
+ curveLocationsBridgesAligned: DataArray1D = (((arrayCurveGroups[selectBridgesAligned, columnGroupZulu] >> 2) << 1)
244
+ | (arrayCurveGroups[selectBridgesAligned, columnGroupAlpha] >> 2)
245
+ )
246
+ selectBridgesAlignedLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectBridgesAligned)[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
247
+
248
+ sliceAllocateBridgesAligned = SliceΩ = slice(sliceAllocateBridgesAligned.start, sliceAllocateBridgesAligned.stop - selectBridgesAligned.size + selectBridgesAlignedLessThanMaximum.size)
249
+ arrayCurveLocations[sliceAllocateBridgesAligned, columnDistinctCrossings] = arrayCurveGroups[selectBridgesAlignedLessThanMaximum, columnDistinctCrossings]
250
+ arrayCurveLocations[sliceAllocateBridgesAligned, columnCurveLocations] = curveLocationsBridgesAligned[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
251
+
252
+ arrayCurveGroups = None; del arrayCurveGroups # pyright: ignore[reportAssignmentType] # noqa: E702
253
+ curveLocationsBridgesAligned = None; del curveLocationsBridgesAligned # pyright: ignore[reportAssignmentType] # noqa: E702
254
+ del curveLocationsMAXIMUM
255
+ selectBridgesAligned = None; del selectBridgesAligned # pyright: ignore[reportAssignmentType] # noqa: E702
256
+ selectBridgesAlignedLessThanMaximum = None; del selectBridgesAlignedLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
257
+ goByeBye()
258
+
259
+ arrayCurveLocations.resize((SliceΩ.stop, columnsArrayCurveLocations))
260
+ arrayCurveGroups = aggregateCurveLocations(arrayCurveLocations)
261
+
262
+ arrayCurveLocations = None; del arrayCurveLocations # pyright: ignore[reportAssignmentType] # noqa: E702
263
+ del sliceAllocateBridgesAligned
264
+ del sliceAllocateBridgesSimple
265
+ del sliceAllocateGroupAlpha
266
+ del sliceAllocateGroupZulu
267
+ del SliceΩ
268
+ goByeBye()
269
+
270
+ return (bridges, arrayCurveGroups)
271
+
272
+ def convertArrayCurveGroups2dictionaryCurveGroups(arrayCurveGroups: DataArray3columns) -> dict[tuple[int, int], int]:
273
+ return {(int(row[columnGroupAlpha]), int(row[columnGroupZulu])): int(row[columnDistinctCrossings]) for row in arrayCurveGroups}
274
+
275
+ def doTheNeedful(n: int, dictionaryCurveLocations: dict[int, int]) -> int:
276
+ """Compute a(n) meanders with the transfer matrix algorithm.
277
+
278
+ Parameters
279
+ ----------
280
+ n : int
281
+ The index in the OEIS ID sequence.
282
+ dictionaryCurveLocations : dict[int, int]
283
+ A dictionary mapping curve locations to their counts.
284
+
285
+ Returns
286
+ -------
287
+ a(n) : int
288
+ The computed value of a(n).
289
+
290
+ Making sausage
291
+ --------------
292
+
293
+ As first computed by Iwan Jensen in 2000, A000682(41) = 6664356253639465480.
294
+ Citation: https://github.com/hunterhogan/mapFolding/blob/main/citations/Jensen.bibtex
295
+ See also https://oeis.org/A000682
296
+
297
+ I'm sure you instantly observed that A000682(41) = (6664356253639465480).bit_length() = 63 bits. And A005316(44) =
298
+ (18276178714484582264).bit_length() = 64 bits.
299
+
300
+ If you ask NumPy 2.3, "What is your relationship with integers with more than 64 bits?"
301
+ NumPy will say, "It's complicated."
302
+
303
+ Therefore, to take advantage of the computational excellence of NumPy when computing A000682(n) for n > 41, I must make some
304
+ adjustments at the total count approaches 64 bits.
305
+
306
+ The second complication is bit-packed integers. I use a loop that starts at `bridges = n` and decrements (`bridges -= 1`)
307
+ `until bridges = 0`. If `bridges > 29`, some of the bit-packed integers have more than 64 bits. "Hey NumPy, can I use
308
+ bit-packed integers with more than 64 bits?" NumPy: "It's complicated." Therefore, while `bridges` is decrementing, I don't
309
+ use NumPy until I believe the bit-packed integers will be less than 64 bits.
310
+
311
+ A third factor that works in my favor is that peak memory usage occurs when all types of integers are well under 64-bits wide.
312
+
313
+ In total, to compute a(n) for "large" n, I use three-stages.
314
+ 1. I use Python primitive `int` contained in a Python primitive `dict`.
315
+ 2. When the bit width of the bit-packed integers connected to `bridges` is small enough to use `numpy.uint64`, I switch to NumPy for the heavy lifting.
316
+ 3. When `distinctCrossings` subtotals might exceed 64 bits, I must switch back to Python primitives.
317
+ """
318
+ # NOTE '29' is based on two things. 1) `bridges = 29`, groupZuluLocator = 0xaaaaaaaaaaaaaaaa.bit_length() = 64. 2) If `bridges =
319
+ # 30` or a larger number, `OverflowError: int too big to convert`. Conclusion: '29' isn't necessarily correct or the best value:
320
+ # it merely fits within my limited ability to assess the correct value.
321
+ # NOTE the above was written when I had the `bridges >= bridgesMinimum` bug. So, apply '-1' to everything.
322
+ # NOTE This default value is necessary: it prevents `count64` from returning an incomplete dictionary when that is not necessary.
323
+ # TODO `count64_bridgesMaximum` might be a VERY good idea as a second safeguard against overflowing distinctCrossingsTotal. But
324
+ # I'm pretty sure I should use an actual check on maximum bit-width in arrayCurveGroups[:, columnDistinctCrossings] at the start
325
+ # of each while loop. Tests on A000682 showed that the max bit-width of arrayCurveGroups[:, columnDistinctCrossings] always
326
+ # increased by 1 or 2 bits on each iteration: never 0 and never 3. I did not test A005316. And I do not have a mathematical proof of the limit.
327
+
328
+ count64_bridgesMaximum = 28
329
+ bridgesMinimum = 0
330
+ distinctCrossings64bitLimitAsValueOf_n = 41
331
+ distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG = distinctCrossings64bitLimitAsValueOf_n - 3
332
+ distinctCrossings64bitLimitSafetyMargin = 4
333
+
334
+ dictionaryCurveGroups: dict[tuple[int, int], int] = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
335
+
336
+ if n >= count64_bridgesMaximum:
337
+ if n >= distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG:
338
+ bridgesMinimum = n - distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG + distinctCrossings64bitLimitSafetyMargin
339
+ n, dictionaryCurveGroups = count(n, dictionaryCurveGroups, count64_bridgesMaximum)
340
+ gc.collect()
341
+ n, arrayCurveGroups = count64(n, convertDictionaryCurveGroups2array(dictionaryCurveGroups), bridgesMinimum)
342
+ if n > 0:
343
+ gc.collect()
344
+ n, dictionaryCurveGroups = count(n, convertArrayCurveGroups2dictionaryCurveGroups(arrayCurveGroups), bridgesMinimum=0)
345
+ distinctCrossingsTotal = sum(dictionaryCurveGroups.values())
346
+ else:
347
+ distinctCrossingsTotal = int(arrayCurveGroups[0, columnDistinctCrossings])
348
+ return distinctCrossingsTotal