mapFolding 0.15.3__py3-none-any.whl → 0.15.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +15 -11
- mapFolding/_theSSOT.py +55 -62
- mapFolding/_theTypes.py +66 -4
- mapFolding/algorithms/__init__.py +1 -0
- mapFolding/algorithms/matrixMeanders.py +348 -0
- mapFolding/algorithms/oeisIDbyFormula.py +113 -0
- mapFolding/basecamp.py +55 -3
- mapFolding/oeis.py +40 -54
- mapFolding/{_oeisFormulas/matrixMeanders64.py → reference/meandersDumpingGround/matrixMeanders64retired.py} +37 -29
- mapFolding/someAssemblyRequired/makeAllModules.py +5 -5
- mapFolding/syntheticModules/algorithmA007822Numba.py +4 -2
- mapFolding/tests/conftest.py +28 -9
- mapFolding/tests/test_computations.py +31 -9
- mapFolding/tests/test_oeis.py +2 -20
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/METADATA +1 -1
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/RECORD +33 -47
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/entry_points.txt +0 -1
- mapFolding/_oeisFormulas/A000136.py +0 -4
- mapFolding/_oeisFormulas/A000560.py +0 -4
- mapFolding/_oeisFormulas/A000682.py +0 -85
- mapFolding/_oeisFormulas/A001010.py +0 -19
- mapFolding/_oeisFormulas/A001011.py +0 -5
- mapFolding/_oeisFormulas/A005315.py +0 -4
- mapFolding/_oeisFormulas/A005316.py +0 -10
- mapFolding/_oeisFormulas/A223094.py +0 -7
- mapFolding/_oeisFormulas/A259702.py +0 -4
- mapFolding/_oeisFormulas/A301620.py +0 -6
- mapFolding/_oeisFormulas/Z0Z_aOFn.py +0 -34
- mapFolding/_oeisFormulas/Z0Z_notes.py +0 -16
- mapFolding/_oeisFormulas/Z0Z_oeisMeanders.py +0 -74
- mapFolding/_oeisFormulas/Z0Z_symmetry.py +0 -131
- mapFolding/_oeisFormulas/__init__.py +0 -1
- mapFolding/_oeisFormulas/matrixMeanders.py +0 -134
- mapFolding/_oeisFormulas/matrixMeandersAnnex.py +0 -84
- /mapFolding/{daoOfMapFolding.py → algorithms/daoOfMapFolding.py} +0 -0
- /mapFolding/reference/{A005316JavaPort.py → meandersDumpingGround/A005316JavaPort.py} +0 -0
- /mapFolding/reference/{A005316imperative.py → meandersDumpingGround/A005316imperative.py} +0 -0
- /mapFolding/reference/{A005316intOptimized.py → meandersDumpingGround/A005316intOptimized.py} +0 -0
- /mapFolding/reference/{A005316optimized128bit.py → meandersDumpingGround/A005316optimized128bit.py} +0 -0
- /mapFolding/reference/{A005316primitiveOptimized.py → meandersDumpingGround/A005316primitiveOptimized.py} +0 -0
- /mapFolding/reference/{A005316redis.py → meandersDumpingGround/A005316redis.py} +0 -0
- /mapFolding/reference/{A005316write2disk.py → meandersDumpingGround/A005316write2disk.py} +0 -0
- /mapFolding/reference/{matrixMeandersBaseline.py → meandersDumpingGround/matrixMeandersBaseline.py} +0 -0
- /mapFolding/reference/{matrixMeandersBaselineAnnex.py → meandersDumpingGround/matrixMeandersBaselineAnnex.py} +0 -0
- /mapFolding/reference/{matrixMeandersBaselineV2.py → meandersDumpingGround/matrixMeandersBaselineV2.py} +0 -0
- /mapFolding/reference/{matrixMeandersSimpleQueue.py → meandersDumpingGround/matrixMeandersSimpleQueue.py} +0 -0
- /mapFolding/reference/{matrixMeandersSlicePop.py → meandersDumpingGround/matrixMeandersSlicePop.py} +0 -0
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/WHEEL +0 -0
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.15.3.dist-info → mapfolding-0.15.4.dist-info}/top_level.txt +0 -0
mapFolding/__init__.py
CHANGED
|
@@ -15,17 +15,17 @@ The computational framework integrates type safety, persistent result storage,
|
|
|
15
15
|
and mathematical validation through OEIS sequence integration.
|
|
16
16
|
|
|
17
17
|
Core Transformation Tools:
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
18
|
+
countFolds: Primary interface for computing folding pattern counts
|
|
19
|
+
MapFoldingState: Computational state management for recursive analysis
|
|
20
|
+
Connection graph generation: Mathematical foundation for folding relationships
|
|
21
|
+
Task division utilities: Experimental parallel computation options
|
|
22
|
+
OEIS integration: Mathematical validation and sequence discovery
|
|
23
23
|
|
|
24
24
|
Primary Use Cases:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
25
|
+
Mathematical research into folding pattern properties and relationships
|
|
26
|
+
Educational exploration of combinatorial mathematics concepts
|
|
27
|
+
Computational validation of theoretical results
|
|
28
|
+
Extension of known mathematical sequences through new discoveries
|
|
29
29
|
|
|
30
30
|
The package handles the full spectrum of map folding analysis, from simple
|
|
31
31
|
educational examples to research-grade computations requiring multi-day processing
|
|
@@ -50,6 +50,10 @@ from mapFolding._theTypes import (
|
|
|
50
50
|
DatatypeElephino as DatatypeElephino,
|
|
51
51
|
DatatypeFoldsTotal as DatatypeFoldsTotal,
|
|
52
52
|
DatatypeLeavesTotal as DatatypeLeavesTotal,
|
|
53
|
+
MetadataOEISidMapFolding as MetadataOEISidMapFolding,
|
|
54
|
+
MetadataOEISidMapFoldingManuallySet as MetadataOEISidMapFoldingManuallySet,
|
|
55
|
+
MetadataOEISidMeanders as MetadataOEISidMeanders,
|
|
56
|
+
MetadataOEISidMeandersManuallySet as MetadataOEISidMeandersManuallySet,
|
|
53
57
|
NumPyElephino as NumPyElephino,
|
|
54
58
|
NumPyFoldsTotal as NumPyFoldsTotal,
|
|
55
59
|
NumPyIntegerType as NumPyIntegerType,
|
|
@@ -80,10 +84,10 @@ from mapFolding.filesystemToolkit import (
|
|
|
80
84
|
from mapFolding.basecamp import countFolds as countFolds
|
|
81
85
|
|
|
82
86
|
from mapFolding.oeis import (
|
|
83
|
-
|
|
87
|
+
dictionaryOEISMapFolding as dictionaryOEISMapFolding,
|
|
88
|
+
dictionaryOEISMeanders as dictionaryOEISMeanders,
|
|
84
89
|
getFoldsTotalKnown as getFoldsTotalKnown,
|
|
85
90
|
getOEISids as getOEISids,
|
|
86
91
|
OEIS_for_n as OEIS_for_n,
|
|
87
92
|
oeisIDfor_n as oeisIDfor_n,
|
|
88
|
-
dictionaryOEIS as dictionaryOEIS,
|
|
89
93
|
)
|
mapFolding/_theSSOT.py
CHANGED
|
@@ -1,80 +1,50 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Access and configure package settings and metadata."""
|
|
2
2
|
|
|
3
|
-
Secondary goal: store hardcoded values until I implement a dynamic solution.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
from collections.abc import Callable
|
|
7
3
|
from hunterMakesPy import PackageSettings
|
|
4
|
+
from mapFolding._theTypes import MetadataOEISidMapFoldingManuallySet, MetadataOEISidMeandersManuallySet
|
|
8
5
|
from pathlib import Path
|
|
9
|
-
from typing import TypedDict
|
|
10
6
|
import dataclasses
|
|
11
7
|
import random
|
|
12
8
|
|
|
13
|
-
# TODO eliminate hardcoding
|
|
14
|
-
concurrencyPackageHARDCODED = 'multiprocessing'
|
|
15
|
-
"""Default package identifier for concurrent execution operations."""
|
|
16
|
-
|
|
17
|
-
class MetadataOEISidManuallySet(TypedDict):
|
|
18
|
-
"""Settings that are best selected by a human instead of algorithmically."""
|
|
19
|
-
|
|
20
|
-
getMapShape: Callable[[int], tuple[int, ...]]
|
|
21
|
-
"""Function to convert the OEIS sequence index, 'n', to its `mapShape` tuple."""
|
|
22
|
-
valuesBenchmark: list[int]
|
|
23
|
-
"""List of index values, 'n', to use when benchmarking the algorithm performance."""
|
|
24
|
-
valuesTestParallelization: list[int]
|
|
25
|
-
"""List of index values, 'n', to use when testing parallelization performance."""
|
|
26
|
-
valuesTestValidation: list[int]
|
|
27
|
-
"""List of index values, 'n', to use when testing validation performance."""
|
|
28
|
-
|
|
29
9
|
@dataclasses.dataclass
|
|
30
10
|
class mapFoldingPackageSettings(PackageSettings):
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
(AI generated docstring)
|
|
34
|
-
|
|
35
|
-
This dataclass serves as the single source of truth for package configuration,
|
|
36
|
-
providing both static and dynamically-resolved values needed throughout the
|
|
37
|
-
package lifecycle. The metadata on each field indicates when that value is
|
|
38
|
-
determined - either during packaging or at installation/runtime.
|
|
39
|
-
|
|
40
|
-
The design supports different evaluation phases to optimize performance and
|
|
41
|
-
reliability. Packaging-time values can be determined during package creation,
|
|
42
|
-
while installing-time values require filesystem access or module introspection.
|
|
11
|
+
"""Widely used settings that are especially useful for map folding algorithms.
|
|
43
12
|
|
|
44
13
|
Attributes
|
|
45
14
|
----------
|
|
15
|
+
identifierPackageFALLBACK : str = ''
|
|
16
|
+
Fallback package identifier used only during initialization when automatic discovery fails.
|
|
17
|
+
pathPackage : Path = Path()
|
|
18
|
+
Absolute path to the installed package directory. Automatically resolved from `identifierPackage` if not provided.
|
|
19
|
+
identifierPackage : str = ''
|
|
20
|
+
Canonical name of the package. Automatically extracted from `pyproject.toml`.
|
|
46
21
|
fileExtension : str = '.py'
|
|
47
|
-
|
|
48
|
-
packageName : str
|
|
49
|
-
Canonical name of the package as defined in project configuration.
|
|
50
|
-
pathPackage : Path
|
|
51
|
-
Absolute filesystem path to the installed package directory.
|
|
52
|
-
concurrencyPackage : str | None = None
|
|
53
|
-
Package identifier for concurrent execution operations.
|
|
22
|
+
Default file extension.
|
|
54
23
|
|
|
24
|
+
cacheDays : int = 30
|
|
25
|
+
Number of days to retain cached OEIS data before refreshing from the online source.
|
|
26
|
+
concurrencyPackage : str = 'multiprocessing'
|
|
27
|
+
Package identifier for concurrent execution operations.
|
|
28
|
+
OEISidMapFoldingManuallySet : dict[str, MetadataOEISidMapFoldingManuallySet]
|
|
29
|
+
Settings that are best selected by a human instead of algorithmically.
|
|
30
|
+
OEISidMeandersManuallySet : dict[str, MetadataOEISidMeandersManuallySet]
|
|
31
|
+
Settings that are best selected by a human instead of algorithmically for meander sequences.
|
|
55
32
|
"""
|
|
56
33
|
|
|
57
|
-
|
|
58
|
-
"""
|
|
59
|
-
Package identifier for concurrent execution operations.
|
|
60
|
-
|
|
61
|
-
(AI generated docstring)
|
|
34
|
+
OEISidMapFoldingManuallySet: dict[str, MetadataOEISidMapFoldingManuallySet] = dataclasses.field(default_factory=dict[str, MetadataOEISidMapFoldingManuallySet])
|
|
35
|
+
"""Settings that are best selected by a human instead of algorithmically."""
|
|
62
36
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
package specified in the module constants is used. Accepted values include
|
|
66
|
-
'multiprocessing' for standard parallel processing and 'numba' for
|
|
67
|
-
specialized numerical computations.
|
|
68
|
-
"""
|
|
37
|
+
OEISidMeandersManuallySet: dict[str, MetadataOEISidMeandersManuallySet] = dataclasses.field(default_factory=dict[str, MetadataOEISidMeandersManuallySet])
|
|
38
|
+
"""Settings that are best selected by a human instead of algorithmically for meander sequences."""
|
|
69
39
|
|
|
70
|
-
|
|
71
|
-
"""
|
|
40
|
+
cacheDays: int = 30
|
|
41
|
+
"""Number of days to retain cached OEIS data before refreshing from the online source."""
|
|
72
42
|
|
|
73
|
-
concurrencyPackage =
|
|
74
|
-
"""
|
|
43
|
+
concurrencyPackage: str = 'multiprocessing'
|
|
44
|
+
"""Package identifier for concurrent execution operations."""
|
|
75
45
|
|
|
76
46
|
# TODO I made a `TypedDict` before I knew how to make dataclasses and classes. Think about other data structures.
|
|
77
|
-
|
|
47
|
+
OEISidMapFoldingManuallySet: dict[str, MetadataOEISidMapFoldingManuallySet] = {
|
|
78
48
|
'A000136': {
|
|
79
49
|
'getMapShape': lambda n: (1, n),
|
|
80
50
|
'valuesBenchmark': [14],
|
|
@@ -119,11 +89,34 @@ settingsOEISManuallySelected: dict[str, MetadataOEISidManuallySet] = {
|
|
|
119
89
|
},
|
|
120
90
|
}
|
|
121
91
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
92
|
+
identifierPackageFALLBACK = "mapFolding"
|
|
93
|
+
"""Manually entered package name used as fallback when dynamic resolution fails."""
|
|
94
|
+
|
|
95
|
+
packageSettings = mapFoldingPackageSettings(identifierPackageFALLBACK=identifierPackageFALLBACK, OEISidMapFoldingManuallySet=OEISidMapFoldingManuallySet)
|
|
125
96
|
"""Global package settings."""
|
|
126
|
-
|
|
127
|
-
|
|
97
|
+
|
|
98
|
+
# TODO integrate into packageSettings
|
|
128
99
|
pathCache: Path = packageSettings.pathPackage / ".cache"
|
|
129
100
|
"""Local directory path for storing cached OEIS sequence data and metadata."""
|
|
101
|
+
OEISidMeandersManuallySet: dict[str, MetadataOEISidMeandersManuallySet] = {
|
|
102
|
+
'A000560': {'valuesTestValidation': [*range(3, 12)]},
|
|
103
|
+
'A000682': {'valuesTestValidation': [*range(3, 12)]},
|
|
104
|
+
'A001010': {'valuesTestValidation': [*range(3, 11)]},
|
|
105
|
+
'A001011': {'valuesTestValidation': [*range(3, 7)]},
|
|
106
|
+
'A005315': {'valuesTestValidation': [*range(3, 9)]},
|
|
107
|
+
'A005316': {'valuesTestValidation': [*range(3, 13)]},
|
|
108
|
+
'A060206': {'valuesTestValidation': [*range(3, 9)]},
|
|
109
|
+
'A077460': {'valuesTestValidation': [*range(3, 8)]},
|
|
110
|
+
'A078591': {'valuesTestValidation': [*range(3, 10)]},
|
|
111
|
+
'A223094': {'valuesTestValidation': [*range(3, 11)]},
|
|
112
|
+
'A259702': {'valuesTestValidation': [*range(3, 13)]},
|
|
113
|
+
'A301620': {'valuesTestValidation': [*range(3, 11)]},
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
# Recreate packageSettings with meanders settings included
|
|
117
|
+
packageSettings = mapFoldingPackageSettings(
|
|
118
|
+
identifierPackageFALLBACK=identifierPackageFALLBACK,
|
|
119
|
+
OEISidMapFoldingManuallySet=OEISidMapFoldingManuallySet,
|
|
120
|
+
OEISidMeandersManuallySet=OEISidMeandersManuallySet,
|
|
121
|
+
)
|
|
122
|
+
"""Global package settings."""
|
mapFolding/_theTypes.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
"""Types for defensive coding and for computation optimization."""
|
|
2
2
|
|
|
3
|
+
from collections.abc import Callable
|
|
3
4
|
from numpy import dtype, int_ as numpy_int, integer, ndarray, uint64 as numpy_uint64
|
|
4
|
-
from typing import Any, TypeAlias, TypeVar
|
|
5
|
+
from typing import Any, TypeAlias, TypedDict, TypeVar
|
|
5
6
|
|
|
6
7
|
NumPyIntegerType = TypeVar('NumPyIntegerType', bound=integer[Any], covariant=True)
|
|
7
8
|
"""Any NumPy integer type, which is usually between 8-bit signed and 64-bit unsigned."""
|
|
@@ -19,9 +20,9 @@ Note well
|
|
|
19
20
|
---------
|
|
20
21
|
Colossal values are found with the cross humpy inequality:
|
|
21
22
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
23
|
+
⎡ el ⎤ ⎡ ⎤
|
|
24
|
+
⎢ eph ⎥ X ⎢ rhi ⎥ <= elephino
|
|
25
|
+
⎣ ant ⎦ ⎣ no ⎦
|
|
25
26
|
|
|
26
27
|
"""
|
|
27
28
|
|
|
@@ -50,3 +51,64 @@ Array1DElephino: TypeAlias = ndarray[tuple[int], dtype[NumPyElephino]] # noqa:
|
|
|
50
51
|
|
|
51
52
|
Array1DFoldsTotal: TypeAlias = ndarray[tuple[int], dtype[NumPyFoldsTotal]] # noqa: UP040 The TypeAlias may be used to construct ("cast") a value to the type. And the identifier may be changed to a different type.
|
|
52
53
|
"""A `numpy.ndarray` with one axis and elements of type `NumPyFoldsTotal`."""
|
|
54
|
+
|
|
55
|
+
class MetadataOEISidMapFoldingManuallySet(TypedDict):
|
|
56
|
+
"""Settings that are best selected by a human instead of algorithmically."""
|
|
57
|
+
|
|
58
|
+
getMapShape: Callable[[int], tuple[int, ...]]
|
|
59
|
+
"""Function to convert the OEIS sequence index, 'n', to its `mapShape` tuple."""
|
|
60
|
+
valuesBenchmark: list[int]
|
|
61
|
+
"""List of index values, 'n', to use when benchmarking the algorithm performance."""
|
|
62
|
+
valuesTestParallelization: list[int]
|
|
63
|
+
"""List of index values, 'n', to use when testing parallelization performance."""
|
|
64
|
+
valuesTestValidation: list[int]
|
|
65
|
+
"""List of index values, 'n', to use when testing validation performance."""
|
|
66
|
+
|
|
67
|
+
class MetadataOEISidMapFolding(TypedDict):
|
|
68
|
+
"""Settings for an implemented OEIS sequence."""
|
|
69
|
+
|
|
70
|
+
description: str
|
|
71
|
+
"""The OEIS.org description of the integer sequence."""
|
|
72
|
+
getMapShape: Callable[[int], tuple[int, ...]]
|
|
73
|
+
"""Function to convert the OEIS sequence index, 'n', to its `mapShape` tuple."""
|
|
74
|
+
offset: int
|
|
75
|
+
"""The starting index, 'n', of the sequence, typically 0 or 1."""
|
|
76
|
+
valuesBenchmark: list[int]
|
|
77
|
+
"""List of index values, 'n', to use when benchmarking the algorithm performance."""
|
|
78
|
+
valuesKnown: dict[int, int]
|
|
79
|
+
"""Dictionary of sequence indices, 'n', to their known values, `foldsTotal`."""
|
|
80
|
+
valuesTestParallelization: list[int]
|
|
81
|
+
"""List of index values, 'n', to use when testing parallelization performance."""
|
|
82
|
+
valuesTestValidation: list[int]
|
|
83
|
+
"""List of index values, 'n', to use when testing validation performance."""
|
|
84
|
+
valueUnknown: int
|
|
85
|
+
"""The smallest value of 'n' for for which `foldsTotal` is unknown."""
|
|
86
|
+
|
|
87
|
+
# ruff: noqa: ERA001
|
|
88
|
+
class MetadataOEISidMeandersManuallySet(TypedDict):
|
|
89
|
+
"""Settings that are best selected by a human instead of algorithmically."""
|
|
90
|
+
|
|
91
|
+
# valuesBenchmark: list[int]
|
|
92
|
+
"""List of index values, 'n', to use when benchmarking the algorithm performance."""
|
|
93
|
+
# valuesTestParallelization: list[int]
|
|
94
|
+
"""List of index values, 'n', to use when testing parallelization performance."""
|
|
95
|
+
valuesTestValidation: list[int]
|
|
96
|
+
"""List of index values, 'n', to use when testing validation performance."""
|
|
97
|
+
|
|
98
|
+
class MetadataOEISidMeanders(TypedDict):
|
|
99
|
+
"""Settings for an implemented OEIS sequence."""
|
|
100
|
+
|
|
101
|
+
description: str
|
|
102
|
+
"""The OEIS.org description of the integer sequence."""
|
|
103
|
+
offset: int
|
|
104
|
+
"""The starting index, 'n', of the sequence, typically 0 or 1."""
|
|
105
|
+
# valuesBenchmark: list[int]
|
|
106
|
+
"""List of index values, 'n', to use when benchmarking the algorithm performance."""
|
|
107
|
+
valuesKnown: dict[int, int]
|
|
108
|
+
"""Dictionary of sequence indices, 'n', to their known values, `foldsTotal`."""
|
|
109
|
+
# valuesTestParallelization: list[int]
|
|
110
|
+
"""List of index values, 'n', to use when testing parallelization performance."""
|
|
111
|
+
valuesTestValidation: list[int]
|
|
112
|
+
"""List of index values, 'n', to use when testing validation performance."""
|
|
113
|
+
valueUnknown: int
|
|
114
|
+
"""The smallest value of 'n' for for which `foldsTotal` is unknown."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Hand-made algorithms."""
|
|
@@ -0,0 +1,348 @@
|
|
|
1
|
+
# ruff: noqa: D100 D103
|
|
2
|
+
from functools import cache
|
|
3
|
+
from gc import collect as goByeBye, set_threshold
|
|
4
|
+
from typing import Any, Literal
|
|
5
|
+
import gc
|
|
6
|
+
import numpy
|
|
7
|
+
|
|
8
|
+
# DEVELOPMENT INSTRUCTIONS FOR THIS MODULE
|
|
9
|
+
#
|
|
10
|
+
# Avoid early-return guard clauses, short-circuit returns, and multiple exit points. This codebase enforces a
|
|
11
|
+
# single-return-per-function pattern with stable shapes/dtypes due to AST transforms. An empty input is a problem, so allow it to
|
|
12
|
+
# fail early.
|
|
13
|
+
#
|
|
14
|
+
# If an algorithm has potential for infinite loops, fix the root cause: do NOT add artificial safety limits (e.g., maxIterations
|
|
15
|
+
# counters) to prevent infinite loops.
|
|
16
|
+
#
|
|
17
|
+
# Always use semantic column, index, or slice identifiers: Never hardcode the locations.
|
|
18
|
+
|
|
19
|
+
# TODO `set_threshold`: I know 0 means disabled, but I don't even understand if 1 means "as frequently as possible" or "almost never".
|
|
20
|
+
set_threshold(1, 1, 1)
|
|
21
|
+
Z0Z_bit_lengthSafetyLimit: int = 61
|
|
22
|
+
|
|
23
|
+
type DataArray1D = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64 | numpy.signedinteger[Any]]]
|
|
24
|
+
type DataArray2columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
|
|
25
|
+
type DataArray3columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
|
|
26
|
+
type SelectorBoolean = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.bool_]]
|
|
27
|
+
type SelectorIndices = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.intp]]
|
|
28
|
+
|
|
29
|
+
# NOTE This code blocks enables semantic references to your data.
|
|
30
|
+
columnsArrayCurveGroups = columnsArrayTotal = 3
|
|
31
|
+
columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1 # Something _feels_ right about this instead of `= -1`.
|
|
32
|
+
columnDistinctCrossings = columnΩ = columnΩ + 1
|
|
33
|
+
columnGroupAlpha = columnΩ = columnΩ + 1
|
|
34
|
+
columnGroupZulu = columnΩ = columnΩ + 1
|
|
35
|
+
if columnΩ != columnsArrayTotal - 1:
|
|
36
|
+
message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
|
|
37
|
+
raise ValueError(message)
|
|
38
|
+
del columnsArrayTotal, columnΩ
|
|
39
|
+
|
|
40
|
+
columnsArrayCurveLocations = columnsArrayTotal = 2
|
|
41
|
+
columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1
|
|
42
|
+
columnDistinctCrossings = columnΩ = columnΩ + 1
|
|
43
|
+
columnCurveLocations = columnΩ = columnΩ + 1
|
|
44
|
+
if columnΩ != columnsArrayTotal - 1:
|
|
45
|
+
message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
|
|
46
|
+
raise ValueError(message)
|
|
47
|
+
del columnsArrayTotal, columnΩ
|
|
48
|
+
|
|
49
|
+
groupAlphaLocator: int = 0x55555555555555555555555555555555
|
|
50
|
+
groupAlphaLocator64: int = 0x5555555555555555
|
|
51
|
+
groupZuluLocator: int = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
|
52
|
+
groupZuluLocator64: int = 0xaaaaaaaaaaaaaaaa
|
|
53
|
+
|
|
54
|
+
def convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations: dict[int, int]) -> dict[tuple[int, int], int]:
|
|
55
|
+
return {(curveLocations & groupAlphaLocator, (curveLocations & groupZuluLocator) >> 1): distinctCrossings
|
|
56
|
+
for curveLocations, distinctCrossings in dictionaryCurveLocations.items()}
|
|
57
|
+
|
|
58
|
+
def count(bridges: int, dictionaryCurveGroups: dict[tuple[int, int], int], bridgesMinimum: int = 0) -> tuple[int, dict[tuple[int, int], int]]:
|
|
59
|
+
|
|
60
|
+
dictionaryCurveLocations: dict[int, int] = {}
|
|
61
|
+
while bridges > bridgesMinimum:
|
|
62
|
+
bridges -= 1
|
|
63
|
+
|
|
64
|
+
curveLocationsMAXIMUM: int = 1 << (2 * bridges + 4)
|
|
65
|
+
|
|
66
|
+
for (groupAlpha, groupZulu), distinctCrossings in dictionaryCurveGroups.items():
|
|
67
|
+
groupAlphaCurves = groupAlpha != 1
|
|
68
|
+
groupZuluCurves = groupZulu != 1
|
|
69
|
+
|
|
70
|
+
# bridgesSimple
|
|
71
|
+
curveLocationAnalysis = ((groupAlpha | (groupZulu << 1)) << 2) | 3
|
|
72
|
+
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
73
|
+
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
74
|
+
|
|
75
|
+
if groupAlphaCurves:
|
|
76
|
+
curveLocationAnalysis = (groupAlpha >> 2) | (groupZulu << 3) | ((groupAlphaIsEven := 1 - (groupAlpha & 0b1)) << 1)
|
|
77
|
+
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
78
|
+
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
79
|
+
|
|
80
|
+
if groupZuluCurves:
|
|
81
|
+
curveLocationAnalysis = (groupZulu >> 1) | (groupAlpha << 2) | (groupZuluIsEven := 1 - (groupZulu & 1))
|
|
82
|
+
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
83
|
+
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
84
|
+
|
|
85
|
+
# bridgesAligned
|
|
86
|
+
if groupZuluCurves and groupAlphaCurves:
|
|
87
|
+
# One Truth-check to select a code path
|
|
88
|
+
groupsCanBePairedTogether = (groupZuluIsEven << 1) | groupAlphaIsEven # pyright: ignore[reportPossiblyUnboundVariable]
|
|
89
|
+
|
|
90
|
+
if groupsCanBePairedTogether != 0: # Case 0 (False, False)
|
|
91
|
+
XOrHere2makePair = 0b1
|
|
92
|
+
findUnpaired_0b1 = 0
|
|
93
|
+
|
|
94
|
+
if groupsCanBePairedTogether == 1: # Case 1: (False, True)
|
|
95
|
+
while findUnpaired_0b1 >= 0:
|
|
96
|
+
XOrHere2makePair <<= 2
|
|
97
|
+
findUnpaired_0b1 += 1 if (groupAlpha & XOrHere2makePair) == 0 else -1
|
|
98
|
+
groupAlpha ^= XOrHere2makePair # noqa: PLW2901
|
|
99
|
+
elif groupsCanBePairedTogether == 2: # Case 2: (True, False)
|
|
100
|
+
while findUnpaired_0b1 >= 0:
|
|
101
|
+
XOrHere2makePair <<= 2
|
|
102
|
+
findUnpaired_0b1 += 1 if (groupZulu & XOrHere2makePair) == 0 else -1
|
|
103
|
+
groupZulu ^= XOrHere2makePair # noqa: PLW2901
|
|
104
|
+
|
|
105
|
+
# Cases 1, 2, and 3 all compute curveLocationAnalysis
|
|
106
|
+
curveLocationAnalysis = ((groupZulu >> 2) << 1) | (groupAlpha >> 2)
|
|
107
|
+
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
108
|
+
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
109
|
+
|
|
110
|
+
dictionaryCurveGroups = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
|
|
111
|
+
dictionaryCurveLocations = {}
|
|
112
|
+
|
|
113
|
+
return (bridges, dictionaryCurveGroups)
|
|
114
|
+
|
|
115
|
+
@cache
|
|
116
|
+
def walkDyckPath(intWithExtra_0b1: int) -> int:
|
|
117
|
+
findTheExtra_0b1: int = 0
|
|
118
|
+
flipExtra_0b1_Here: int = 1
|
|
119
|
+
while True:
|
|
120
|
+
flipExtra_0b1_Here <<= 2
|
|
121
|
+
if (intWithExtra_0b1 & flipExtra_0b1_Here) == 0:
|
|
122
|
+
findTheExtra_0b1 += 1
|
|
123
|
+
else:
|
|
124
|
+
findTheExtra_0b1 -= 1
|
|
125
|
+
if findTheExtra_0b1 < 0:
|
|
126
|
+
break
|
|
127
|
+
return flipExtra_0b1_Here
|
|
128
|
+
|
|
129
|
+
@cache
|
|
130
|
+
def _flipTheExtra_0b1(avoidingLookupsInPerRowLoop: int) -> numpy.uint64:
|
|
131
|
+
"""Be a docstring."""
|
|
132
|
+
return numpy.uint64(avoidingLookupsInPerRowLoop ^ walkDyckPath(avoidingLookupsInPerRowLoop))
|
|
133
|
+
|
|
134
|
+
# TODO there is a better way to do this.
|
|
135
|
+
flipTheExtra_0b1 = numpy.vectorize(_flipTheExtra_0b1, otypes=[numpy.uint64])
|
|
136
|
+
"""The vectorize function is provided primarily for convenience, not for performance. The implementation is essentially a for loop."""
|
|
137
|
+
|
|
138
|
+
def aggregateCurveLocations(arrayCurveLocations: DataArray2columns) -> DataArray3columns:
|
|
139
|
+
arrayCurveGroups: DataArray3columns = numpy.tile(
|
|
140
|
+
A=numpy.unique(arrayCurveLocations[:, columnCurveLocations])
|
|
141
|
+
, reps=(columnsArrayCurveGroups, 1)
|
|
142
|
+
).T
|
|
143
|
+
arrayCurveGroups[:, columnDistinctCrossings] = 0
|
|
144
|
+
numpy.add.at(
|
|
145
|
+
arrayCurveGroups[:, columnDistinctCrossings]
|
|
146
|
+
, numpy.searchsorted(
|
|
147
|
+
a=arrayCurveGroups[:, columnCurveLocations]
|
|
148
|
+
, v=arrayCurveLocations[:, columnCurveLocations])
|
|
149
|
+
, arrayCurveLocations[:, columnDistinctCrossings]
|
|
150
|
+
)
|
|
151
|
+
# I'm computing groupZulu from curveLocations that are physically in `arrayCurveGroups`, so I'm using `columnCurveLocations`.
|
|
152
|
+
numpy.bitwise_and(arrayCurveGroups[:, columnCurveLocations], numpy.uint64(groupZuluLocator64), out=arrayCurveGroups[:, columnGroupZulu])
|
|
153
|
+
numpy.right_shift(arrayCurveGroups[:, columnGroupZulu], 1, out=arrayCurveGroups[:, columnGroupZulu])
|
|
154
|
+
# NOTE Do not alphabetize these operations. This column has curveLocations data that groupZulu needs.
|
|
155
|
+
arrayCurveGroups[:, columnGroupAlpha] &= groupAlphaLocator64
|
|
156
|
+
return arrayCurveGroups
|
|
157
|
+
|
|
158
|
+
def convertDictionaryCurveGroups2array(dictionaryCurveGroups: dict[tuple[int, int], int]) -> DataArray3columns:
|
|
159
|
+
arrayCurveGroups: DataArray3columns = numpy.tile(numpy.fromiter(dictionaryCurveGroups.values(), dtype=numpy.uint64), (columnsArrayCurveGroups, 1)).T
|
|
160
|
+
arrayKeys: DataArray2columns = numpy.array(list(dictionaryCurveGroups.keys()), dtype=numpy.uint64)
|
|
161
|
+
arrayCurveGroups[:, columnGroupAlpha] = arrayKeys[:, 0]
|
|
162
|
+
arrayCurveGroups[:, columnGroupZulu] = arrayKeys[:, 1]
|
|
163
|
+
return arrayCurveGroups
|
|
164
|
+
|
|
165
|
+
def count64(bridges: int, arrayCurveGroups: DataArray3columns, bridgesMinimum: int = 0) -> tuple[int, DataArray3columns]:
|
|
166
|
+
|
|
167
|
+
while bridges > bridgesMinimum and int(arrayCurveGroups[:, columnDistinctCrossings].max()).bit_length() < Z0Z_bit_lengthSafetyLimit:
|
|
168
|
+
bridges -= 1
|
|
169
|
+
curveLocationsMAXIMUM: numpy.uint64 = numpy.uint64(1 << (2 * bridges + 4))
|
|
170
|
+
|
|
171
|
+
selectGroupAlphaCurves: SelectorBoolean = arrayCurveGroups[:, columnGroupAlpha] > numpy.uint64(1)
|
|
172
|
+
curveLocationsGroupAlpha: DataArray1D = ((arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] >> 2)
|
|
173
|
+
| (arrayCurveGroups[selectGroupAlphaCurves, columnGroupZulu] << 3)
|
|
174
|
+
| ((numpy.uint64(1) - (arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] & 1)) << 1)
|
|
175
|
+
)
|
|
176
|
+
selectGroupAlphaCurvesLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectGroupAlphaCurves)[numpy.flatnonzero(curveLocationsGroupAlpha < curveLocationsMAXIMUM)]
|
|
177
|
+
|
|
178
|
+
selectGroupZuluCurves: SelectorBoolean = arrayCurveGroups[:, columnGroupZulu] > numpy.uint64(1)
|
|
179
|
+
curveLocationsGroupZulu: DataArray1D = (arrayCurveGroups[selectGroupZuluCurves, columnGroupZulu] >> 1
|
|
180
|
+
| arrayCurveGroups[selectGroupZuluCurves, columnGroupAlpha] << 2
|
|
181
|
+
| (numpy.uint64(1) - (arrayCurveGroups[selectGroupZuluCurves, columnGroupZulu] & 1))
|
|
182
|
+
)
|
|
183
|
+
selectGroupZuluCurvesLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectGroupZuluCurves)[numpy.flatnonzero(curveLocationsGroupZulu < curveLocationsMAXIMUM)]
|
|
184
|
+
|
|
185
|
+
selectBridgesSimpleLessThanMaximum: SelectorIndices = numpy.flatnonzero(
|
|
186
|
+
((arrayCurveGroups[:, columnGroupAlpha] << 2) | (arrayCurveGroups[:, columnGroupZulu] << 3) | 3) < curveLocationsMAXIMUM
|
|
187
|
+
) # Computation, but including `< curveLocationsMAXIMUM` is ~2% of total time.
|
|
188
|
+
|
|
189
|
+
# Selectors for bridgesAligned -------------------------------------------------
|
|
190
|
+
selectGroupAlphaAtEven: SelectorBoolean = (arrayCurveGroups[:, columnGroupAlpha] & 1) == numpy.uint64(0)
|
|
191
|
+
selectGroupZuluAtEven: SelectorBoolean = (arrayCurveGroups[:, columnGroupZulu] & 1) == numpy.uint64(0)
|
|
192
|
+
selectBridgesAligned: SelectorBoolean = selectGroupAlphaCurves & selectGroupZuluCurves & (selectGroupAlphaAtEven | selectGroupZuluAtEven)
|
|
193
|
+
|
|
194
|
+
SliceΩ: slice[int, int, Literal[1]] = slice(0,0)
|
|
195
|
+
sliceAllocateGroupAlpha = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectGroupAlphaCurvesLessThanMaximum.size)
|
|
196
|
+
sliceAllocateGroupZulu = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectGroupZuluCurvesLessThanMaximum.size)
|
|
197
|
+
sliceAllocateBridgesSimple = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectBridgesSimpleLessThanMaximum.size)
|
|
198
|
+
sliceAllocateBridgesAligned = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectBridgesAligned.size)
|
|
199
|
+
|
|
200
|
+
arrayCurveLocations: DataArray2columns = numpy.zeros((SliceΩ.stop, columnsArrayCurveLocations), dtype=arrayCurveGroups.dtype)
|
|
201
|
+
|
|
202
|
+
arrayCurveLocations[sliceAllocateGroupAlpha, columnCurveLocations] = curveLocationsGroupAlpha[numpy.flatnonzero(curveLocationsGroupAlpha < curveLocationsMAXIMUM)]
|
|
203
|
+
arrayCurveLocations[sliceAllocateGroupAlpha, columnDistinctCrossings] = arrayCurveGroups[selectGroupAlphaCurvesLessThanMaximum, columnDistinctCrossings]
|
|
204
|
+
|
|
205
|
+
arrayCurveLocations[sliceAllocateGroupZulu, columnCurveLocations] = curveLocationsGroupZulu[numpy.flatnonzero(curveLocationsGroupZulu < curveLocationsMAXIMUM)]
|
|
206
|
+
arrayCurveLocations[sliceAllocateGroupZulu, columnDistinctCrossings] = arrayCurveGroups[selectGroupZuluCurvesLessThanMaximum, columnDistinctCrossings]
|
|
207
|
+
|
|
208
|
+
# TODO Uh, it sure looks like I am doing this computation twice. Computation (without assignment) ~ 1.5% of total time.
|
|
209
|
+
arrayCurveLocations[sliceAllocateBridgesSimple, columnCurveLocations] = (
|
|
210
|
+
(arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupAlpha] << 2)
|
|
211
|
+
| (arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupZulu] << 3)
|
|
212
|
+
| 3
|
|
213
|
+
)
|
|
214
|
+
arrayCurveLocations[sliceAllocateBridgesSimple, columnDistinctCrossings] = arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnDistinctCrossings]
|
|
215
|
+
|
|
216
|
+
curveLocationsGroupAlpha = None; del curveLocationsGroupAlpha # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
217
|
+
curveLocationsGroupZulu = None; del curveLocationsGroupZulu # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
218
|
+
selectBridgesSimpleLessThanMaximum = None; del selectBridgesSimpleLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
219
|
+
selectGroupAlphaCurvesLessThanMaximum = None; del selectGroupAlphaCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
220
|
+
selectGroupZuluCurvesLessThanMaximum = None; del selectGroupZuluCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
221
|
+
goByeBye()
|
|
222
|
+
|
|
223
|
+
# NOTE this MODIFIES `arrayCurveGroups` for bridgesPairedToOdd ---------------------------------------------------------------------------------------
|
|
224
|
+
selectBridgesGroupAlphaPairedToOdd: SelectorIndices = numpy.flatnonzero(selectBridgesAligned & selectGroupAlphaAtEven & (~selectGroupZuluAtEven))
|
|
225
|
+
arrayCurveGroups[selectBridgesGroupAlphaPairedToOdd, columnGroupAlpha] = flipTheExtra_0b1(
|
|
226
|
+
arrayCurveGroups[selectBridgesGroupAlphaPairedToOdd, columnGroupAlpha]
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
selectBridgesGroupZuluPairedToOdd: SelectorIndices = numpy.flatnonzero(selectBridgesAligned & (~selectGroupAlphaAtEven) & selectGroupZuluAtEven)
|
|
230
|
+
arrayCurveGroups[selectBridgesGroupZuluPairedToOdd, columnGroupZulu] = flipTheExtra_0b1(
|
|
231
|
+
arrayCurveGroups[selectBridgesGroupZuluPairedToOdd, columnGroupZulu]
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
selectBridgesGroupAlphaPairedToOdd = None; del selectBridgesGroupAlphaPairedToOdd # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
235
|
+
selectBridgesGroupZuluPairedToOdd = None; del selectBridgesGroupZuluPairedToOdd # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
236
|
+
selectGroupAlphaAtEven = None; del selectGroupAlphaAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
237
|
+
selectGroupAlphaCurves = None; del selectGroupAlphaCurves # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
238
|
+
selectGroupZuluAtEven = None; del selectGroupZuluAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
239
|
+
selectGroupZuluCurves = None; del selectGroupZuluCurves # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
240
|
+
goByeBye()
|
|
241
|
+
|
|
242
|
+
# bridgesAligned; bridgesAlignedAtEven, bridgesGroupAlphaPairedToOdd, bridgesGroupZuluPairedToOdd ------------------------------------------------------------------
|
|
243
|
+
curveLocationsBridgesAligned: DataArray1D = (((arrayCurveGroups[selectBridgesAligned, columnGroupZulu] >> 2) << 1)
|
|
244
|
+
| (arrayCurveGroups[selectBridgesAligned, columnGroupAlpha] >> 2)
|
|
245
|
+
)
|
|
246
|
+
selectBridgesAlignedLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectBridgesAligned)[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
|
|
247
|
+
|
|
248
|
+
sliceAllocateBridgesAligned = SliceΩ = slice(sliceAllocateBridgesAligned.start, sliceAllocateBridgesAligned.stop - selectBridgesAligned.size + selectBridgesAlignedLessThanMaximum.size)
|
|
249
|
+
arrayCurveLocations[sliceAllocateBridgesAligned, columnDistinctCrossings] = arrayCurveGroups[selectBridgesAlignedLessThanMaximum, columnDistinctCrossings]
|
|
250
|
+
arrayCurveLocations[sliceAllocateBridgesAligned, columnCurveLocations] = curveLocationsBridgesAligned[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
|
|
251
|
+
|
|
252
|
+
arrayCurveGroups = None; del arrayCurveGroups # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
253
|
+
curveLocationsBridgesAligned = None; del curveLocationsBridgesAligned # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
254
|
+
del curveLocationsMAXIMUM
|
|
255
|
+
selectBridgesAligned = None; del selectBridgesAligned # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
256
|
+
selectBridgesAlignedLessThanMaximum = None; del selectBridgesAlignedLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
257
|
+
goByeBye()
|
|
258
|
+
|
|
259
|
+
arrayCurveLocations.resize((SliceΩ.stop, columnsArrayCurveLocations))
|
|
260
|
+
arrayCurveGroups = aggregateCurveLocations(arrayCurveLocations)
|
|
261
|
+
|
|
262
|
+
arrayCurveLocations = None; del arrayCurveLocations # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
263
|
+
del sliceAllocateBridgesAligned
|
|
264
|
+
del sliceAllocateBridgesSimple
|
|
265
|
+
del sliceAllocateGroupAlpha
|
|
266
|
+
del sliceAllocateGroupZulu
|
|
267
|
+
del SliceΩ
|
|
268
|
+
goByeBye()
|
|
269
|
+
|
|
270
|
+
return (bridges, arrayCurveGroups)
|
|
271
|
+
|
|
272
|
+
def convertArrayCurveGroups2dictionaryCurveGroups(arrayCurveGroups: DataArray3columns) -> dict[tuple[int, int], int]:
|
|
273
|
+
return {(int(row[columnGroupAlpha]), int(row[columnGroupZulu])): int(row[columnDistinctCrossings]) for row in arrayCurveGroups}
|
|
274
|
+
|
|
275
|
+
def doTheNeedful(n: int, dictionaryCurveLocations: dict[int, int]) -> int:
|
|
276
|
+
"""Compute a(n) meanders with the transfer matrix algorithm.
|
|
277
|
+
|
|
278
|
+
Parameters
|
|
279
|
+
----------
|
|
280
|
+
n : int
|
|
281
|
+
The index in the OEIS ID sequence.
|
|
282
|
+
dictionaryCurveLocations : dict[int, int]
|
|
283
|
+
A dictionary mapping curve locations to their counts.
|
|
284
|
+
|
|
285
|
+
Returns
|
|
286
|
+
-------
|
|
287
|
+
a(n) : int
|
|
288
|
+
The computed value of a(n).
|
|
289
|
+
|
|
290
|
+
Making sausage
|
|
291
|
+
--------------
|
|
292
|
+
|
|
293
|
+
As first computed by Iwan Jensen in 2000, A000682(41) = 6664356253639465480.
|
|
294
|
+
Citation: https://github.com/hunterhogan/mapFolding/blob/main/citations/Jensen.bibtex
|
|
295
|
+
See also https://oeis.org/A000682
|
|
296
|
+
|
|
297
|
+
I'm sure you instantly observed that A000682(41) = (6664356253639465480).bit_length() = 63 bits. And A005316(44) =
|
|
298
|
+
(18276178714484582264).bit_length() = 64 bits.
|
|
299
|
+
|
|
300
|
+
If you ask NumPy 2.3, "What is your relationship with integers with more than 64 bits?"
|
|
301
|
+
NumPy will say, "It's complicated."
|
|
302
|
+
|
|
303
|
+
Therefore, to take advantage of the computational excellence of NumPy when computing A000682(n) for n > 41, I must make some
|
|
304
|
+
adjustments at the total count approaches 64 bits.
|
|
305
|
+
|
|
306
|
+
The second complication is bit-packed integers. I use a loop that starts at `bridges = n` and decrements (`bridges -= 1`)
|
|
307
|
+
`until bridges = 0`. If `bridges > 29`, some of the bit-packed integers have more than 64 bits. "Hey NumPy, can I use
|
|
308
|
+
bit-packed integers with more than 64 bits?" NumPy: "It's complicated." Therefore, while `bridges` is decrementing, I don't
|
|
309
|
+
use NumPy until I believe the bit-packed integers will be less than 64 bits.
|
|
310
|
+
|
|
311
|
+
A third factor that works in my favor is that peak memory usage occurs when all types of integers are well under 64-bits wide.
|
|
312
|
+
|
|
313
|
+
In total, to compute a(n) for "large" n, I use three-stages.
|
|
314
|
+
1. I use Python primitive `int` contained in a Python primitive `dict`.
|
|
315
|
+
2. When the bit width of the bit-packed integers connected to `bridges` is small enough to use `numpy.uint64`, I switch to NumPy for the heavy lifting.
|
|
316
|
+
3. When `distinctCrossings` subtotals might exceed 64 bits, I must switch back to Python primitives.
|
|
317
|
+
"""
|
|
318
|
+
# NOTE '29' is based on two things. 1) `bridges = 29`, groupZuluLocator = 0xaaaaaaaaaaaaaaaa.bit_length() = 64. 2) If `bridges =
|
|
319
|
+
# 30` or a larger number, `OverflowError: int too big to convert`. Conclusion: '29' isn't necessarily correct or the best value:
|
|
320
|
+
# it merely fits within my limited ability to assess the correct value.
|
|
321
|
+
# NOTE the above was written when I had the `bridges >= bridgesMinimum` bug. So, apply '-1' to everything.
|
|
322
|
+
# NOTE This default value is necessary: it prevents `count64` from returning an incomplete dictionary when that is not necessary.
|
|
323
|
+
# TODO `count64_bridgesMaximum` might be a VERY good idea as a second safeguard against overflowing distinctCrossingsTotal. But
|
|
324
|
+
# I'm pretty sure I should use an actual check on maximum bit-width in arrayCurveGroups[:, columnDistinctCrossings] at the start
|
|
325
|
+
# of each while loop. Tests on A000682 showed that the max bit-width of arrayCurveGroups[:, columnDistinctCrossings] always
|
|
326
|
+
# increased by 1 or 2 bits on each iteration: never 0 and never 3. I did not test A005316. And I do not have a mathematical proof of the limit.
|
|
327
|
+
|
|
328
|
+
count64_bridgesMaximum = 28
|
|
329
|
+
bridgesMinimum = 0
|
|
330
|
+
distinctCrossings64bitLimitAsValueOf_n = 41
|
|
331
|
+
distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG = distinctCrossings64bitLimitAsValueOf_n - 3
|
|
332
|
+
distinctCrossings64bitLimitSafetyMargin = 4
|
|
333
|
+
|
|
334
|
+
dictionaryCurveGroups: dict[tuple[int, int], int] = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
|
|
335
|
+
|
|
336
|
+
if n >= count64_bridgesMaximum:
|
|
337
|
+
if n >= distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG:
|
|
338
|
+
bridgesMinimum = n - distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG + distinctCrossings64bitLimitSafetyMargin
|
|
339
|
+
n, dictionaryCurveGroups = count(n, dictionaryCurveGroups, count64_bridgesMaximum)
|
|
340
|
+
gc.collect()
|
|
341
|
+
n, arrayCurveGroups = count64(n, convertDictionaryCurveGroups2array(dictionaryCurveGroups), bridgesMinimum)
|
|
342
|
+
if n > 0:
|
|
343
|
+
gc.collect()
|
|
344
|
+
n, dictionaryCurveGroups = count(n, convertArrayCurveGroups2dictionaryCurveGroups(arrayCurveGroups), bridgesMinimum=0)
|
|
345
|
+
distinctCrossingsTotal = sum(dictionaryCurveGroups.values())
|
|
346
|
+
else:
|
|
347
|
+
distinctCrossingsTotal = int(arrayCurveGroups[0, columnDistinctCrossings])
|
|
348
|
+
return distinctCrossingsTotal
|