mapFolding 0.15.4__py3-none-any.whl → 0.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapFolding/__init__.py +7 -9
- mapFolding/_theSSOT.py +1 -0
- mapFolding/algorithms/daoOfMapFolding.py +1 -2
- mapFolding/algorithms/getBucketsTotal.py +137 -0
- mapFolding/algorithms/matrixMeanders.py +457 -286
- mapFolding/algorithms/oeisIDbyFormula.py +310 -76
- mapFolding/algorithms/zCuzDocStoopidoeisIDbyFormula.py +84 -0
- mapFolding/basecamp.py +99 -14
- mapFolding/dataBaskets.py +74 -0
- mapFolding/oeis.py +3 -2
- mapFolding/reference/A000682facts.py +662 -0
- mapFolding/reference/A005316facts.py +62 -0
- mapFolding/reference/matrixMeandersAnalysis/__init__.py +1 -0
- mapFolding/reference/matrixMeandersAnalysis/evenEven.py +144 -0
- mapFolding/reference/matrixMeandersAnalysis/oddEven.py +54 -0
- mapFolding/someAssemblyRequired/A007822/A007822rawMaterials.py +55 -0
- mapFolding/someAssemblyRequired/A007822/__init__.py +0 -0
- mapFolding/someAssemblyRequired/A007822/makeA007822AsynchronousModules.py +185 -0
- mapFolding/someAssemblyRequired/A007822/makeA007822Modules.py +71 -0
- mapFolding/someAssemblyRequired/RecipeJob.py +2 -2
- mapFolding/someAssemblyRequired/__init__.py +9 -2
- mapFolding/someAssemblyRequired/_toolIfThis.py +4 -3
- mapFolding/someAssemblyRequired/_toolkitContainers.py +8 -8
- mapFolding/someAssemblyRequired/infoBooth.py +27 -30
- mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +1 -1
- mapFolding/someAssemblyRequired/makeJobTheorem2codon.py +5 -2
- mapFolding/someAssemblyRequired/makingModules_count.py +301 -0
- mapFolding/someAssemblyRequired/makingModules_doTheNeedful.py +120 -0
- mapFolding/someAssemblyRequired/mapFolding/__init__.py +0 -0
- mapFolding/someAssemblyRequired/mapFolding/makeMapFoldingModules.py +220 -0
- mapFolding/someAssemblyRequired/toolkitMakeModules.py +152 -0
- mapFolding/someAssemblyRequired/toolkitNumba.py +1 -1
- mapFolding/someAssemblyRequired/transformationTools.py +1 -0
- mapFolding/syntheticModules/A007822/__init__.py +1 -0
- mapFolding/syntheticModules/{algorithmA007822Numba.py → A007822/algorithmNumba.py} +2 -4
- mapFolding/syntheticModules/A007822/asynchronous.py +148 -0
- mapFolding/syntheticModules/A007822/asynchronousAnnex.py +68 -0
- mapFolding/syntheticModules/A007822/asynchronousTheorem2.py +53 -0
- mapFolding/syntheticModules/A007822/asynchronousTrimmed.py +47 -0
- mapFolding/syntheticModules/dataPackingA007822.py +1 -1
- mapFolding/tests/test_computations.py +2 -2
- mapFolding/trim_memory.py +62 -0
- mapFolding/zCuzDocStoopid/__init__.py +1 -0
- mapFolding/zCuzDocStoopid/makeDocstrings.py +63 -0
- {mapfolding-0.15.4.dist-info → mapfolding-0.16.0.dist-info}/METADATA +9 -2
- mapfolding-0.16.0.dist-info/RECORD +100 -0
- mapFolding/someAssemblyRequired/A007822rawMaterials.py +0 -46
- mapFolding/someAssemblyRequired/makeAllModules.py +0 -764
- mapfolding-0.15.4.dist-info/RECORD +0 -78
- /mapFolding/syntheticModules/{algorithmA007822.py → A007822/algorithm.py} +0 -0
- /mapFolding/syntheticModules/{initializeStateA007822.py → A007822/initializeState.py} +0 -0
- /mapFolding/syntheticModules/{theorem2A007822.py → A007822/theorem2.py} +0 -0
- /mapFolding/syntheticModules/{theorem2A007822Numba.py → A007822/theorem2Numba.py} +0 -0
- /mapFolding/syntheticModules/{theorem2A007822Trimmed.py → A007822/theorem2Trimmed.py} +0 -0
- {mapfolding-0.15.4.dist-info → mapfolding-0.16.0.dist-info}/WHEEL +0 -0
- {mapfolding-0.15.4.dist-info → mapfolding-0.16.0.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.15.4.dist-info → mapfolding-0.16.0.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.15.4.dist-info → mapfolding-0.16.0.dist-info}/top_level.txt +0 -0
|
@@ -1,119 +1,72 @@
|
|
|
1
|
-
|
|
1
|
+
"""Count meanders with matrix transfer algorithm.
|
|
2
|
+
|
|
3
|
+
Notes
|
|
4
|
+
-----
|
|
5
|
+
- Odd/even of `groupAlpha` == the odd/even of `curveLocations`. Proof: `groupAlphaIsEven = curveLocations & 1 & 1 ^ 1`.
|
|
6
|
+
- Odd/even of `groupZulu` == `curveLocations` second-least significant bit. So `groupZuluIsEven = bool(curveLocations & 2 ^ 2)`.
|
|
7
|
+
"""
|
|
2
8
|
from functools import cache
|
|
3
|
-
from gc import collect as goByeBye
|
|
4
|
-
from
|
|
5
|
-
import
|
|
9
|
+
from gc import collect as goByeBye
|
|
10
|
+
from hunterMakesPy import raiseIfNone
|
|
11
|
+
from mapFolding import MatrixMeandersState
|
|
12
|
+
from mapFolding.algorithms.getBucketsTotal import getBucketsTotal
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from warnings import warn
|
|
6
15
|
import numpy
|
|
16
|
+
import pandas
|
|
7
17
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# Avoid early-return guard clauses, short-circuit returns, and multiple exit points. This codebase enforces a
|
|
11
|
-
# single-return-per-function pattern with stable shapes/dtypes due to AST transforms. An empty input is a problem, so allow it to
|
|
12
|
-
# fail early.
|
|
13
|
-
#
|
|
14
|
-
# If an algorithm has potential for infinite loops, fix the root cause: do NOT add artificial safety limits (e.g., maxIterations
|
|
15
|
-
# counters) to prevent infinite loops.
|
|
16
|
-
#
|
|
17
|
-
# Always use semantic column, index, or slice identifiers: Never hardcode the locations.
|
|
18
|
-
|
|
19
|
-
# TODO `set_threshold`: I know 0 means disabled, but I don't even understand if 1 means "as frequently as possible" or "almost never".
|
|
20
|
-
set_threshold(1, 1, 1)
|
|
21
|
-
Z0Z_bit_lengthSafetyLimit: int = 61
|
|
22
|
-
|
|
23
|
-
type DataArray1D = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64 | numpy.signedinteger[Any]]]
|
|
24
|
-
type DataArray2columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
|
|
25
|
-
type DataArray3columns = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.uint64]]
|
|
26
|
-
type SelectorBoolean = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.bool_]]
|
|
27
|
-
type SelectorIndices = numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.intp]]
|
|
28
|
-
|
|
29
|
-
# NOTE This code blocks enables semantic references to your data.
|
|
30
|
-
columnsArrayCurveGroups = columnsArrayTotal = 3
|
|
31
|
-
columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1 # Something _feels_ right about this instead of `= -1`.
|
|
32
|
-
columnDistinctCrossings = columnΩ = columnΩ + 1
|
|
33
|
-
columnGroupAlpha = columnΩ = columnΩ + 1
|
|
34
|
-
columnGroupZulu = columnΩ = columnΩ + 1
|
|
35
|
-
if columnΩ != columnsArrayTotal - 1:
|
|
36
|
-
message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
|
|
37
|
-
raise ValueError(message)
|
|
38
|
-
del columnsArrayTotal, columnΩ
|
|
39
|
-
|
|
40
|
-
columnsArrayCurveLocations = columnsArrayTotal = 2
|
|
41
|
-
columnΩ: int = (columnsArrayTotal - columnsArrayTotal) - 1
|
|
42
|
-
columnDistinctCrossings = columnΩ = columnΩ + 1
|
|
43
|
-
columnCurveLocations = columnΩ = columnΩ + 1
|
|
44
|
-
if columnΩ != columnsArrayTotal - 1:
|
|
45
|
-
message = f"Please inspect the code above this `if` check. '{columnsArrayTotal = }', therefore '{columnΩ = }' must be '{columnsArrayTotal - 1 = }' due to 'zero-indexing.'"
|
|
46
|
-
raise ValueError(message)
|
|
47
|
-
del columnsArrayTotal, columnΩ
|
|
48
|
-
|
|
49
|
-
groupAlphaLocator: int = 0x55555555555555555555555555555555
|
|
50
|
-
groupAlphaLocator64: int = 0x5555555555555555
|
|
51
|
-
groupZuluLocator: int = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
|
52
|
-
groupZuluLocator64: int = 0xaaaaaaaaaaaaaaaa
|
|
53
|
-
|
|
54
|
-
def convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations: dict[int, int]) -> dict[tuple[int, int], int]:
|
|
55
|
-
return {(curveLocations & groupAlphaLocator, (curveLocations & groupZuluLocator) >> 1): distinctCrossings
|
|
56
|
-
for curveLocations, distinctCrossings in dictionaryCurveLocations.items()}
|
|
57
|
-
|
|
58
|
-
def count(bridges: int, dictionaryCurveGroups: dict[tuple[int, int], int], bridgesMinimum: int = 0) -> tuple[int, dict[tuple[int, int], int]]:
|
|
59
|
-
|
|
60
|
-
dictionaryCurveLocations: dict[int, int] = {}
|
|
61
|
-
while bridges > bridgesMinimum:
|
|
62
|
-
bridges -= 1
|
|
63
|
-
|
|
64
|
-
curveLocationsMAXIMUM: int = 1 << (2 * bridges + 4)
|
|
18
|
+
pathRoot: Path = Path.cwd() / 'curves'
|
|
19
|
+
pathRoot.mkdir(exist_ok=True, parents=True)
|
|
65
20
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
21
|
+
@cache
|
|
22
|
+
def _flipTheExtra_0b1(intWithExtra_0b1: numpy.uint64) -> numpy.uint64:
|
|
23
|
+
return numpy.uint64(intWithExtra_0b1 ^ walkDyckPath(int(intWithExtra_0b1)))
|
|
69
24
|
|
|
70
|
-
|
|
71
|
-
curveLocationAnalysis = ((groupAlpha | (groupZulu << 1)) << 2) | 3
|
|
72
|
-
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
73
|
-
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
25
|
+
flipTheExtra_0b1AsUfunc = numpy.frompyfunc(_flipTheExtra_0b1, 1, 1)
|
|
74
26
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
78
|
-
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
27
|
+
def outfitDictionaryCurveGroups(state: MatrixMeandersState) -> dict[tuple[int, int], int]:
|
|
28
|
+
"""Outfit `dictionaryCurveGroups` so it may manage the computations for one iteration of the transfer matrix.
|
|
79
29
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
if groupsCanBePairedTogether == 1: # Case 1: (False, True)
|
|
95
|
-
while findUnpaired_0b1 >= 0:
|
|
96
|
-
XOrHere2makePair <<= 2
|
|
97
|
-
findUnpaired_0b1 += 1 if (groupAlpha & XOrHere2makePair) == 0 else -1
|
|
98
|
-
groupAlpha ^= XOrHere2makePair # noqa: PLW2901
|
|
99
|
-
elif groupsCanBePairedTogether == 2: # Case 2: (True, False)
|
|
100
|
-
while findUnpaired_0b1 >= 0:
|
|
101
|
-
XOrHere2makePair <<= 2
|
|
102
|
-
findUnpaired_0b1 += 1 if (groupZulu & XOrHere2makePair) == 0 else -1
|
|
103
|
-
groupZulu ^= XOrHere2makePair # noqa: PLW2901
|
|
104
|
-
|
|
105
|
-
# Cases 1, 2, and 3 all compute curveLocationAnalysis
|
|
106
|
-
curveLocationAnalysis = ((groupZulu >> 2) << 1) | (groupAlpha >> 2)
|
|
107
|
-
if curveLocationAnalysis < curveLocationsMAXIMUM:
|
|
108
|
-
dictionaryCurveLocations[curveLocationAnalysis] = dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
109
|
-
|
|
110
|
-
dictionaryCurveGroups = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
|
|
111
|
-
dictionaryCurveLocations = {}
|
|
112
|
-
|
|
113
|
-
return (bridges, dictionaryCurveGroups)
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
state : MatrixMeandersState
|
|
33
|
+
The current state of the computation, including `dictionaryCurveLocations`.
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
dictionaryCurveGroups : dict[tuple[int, int], int]
|
|
38
|
+
A dictionary of `(groupAlpha, groupZulu)` to `distinctCrossings`.
|
|
39
|
+
"""
|
|
40
|
+
state.bitWidth = max(state.dictionaryCurveLocations.keys()).bit_length()
|
|
41
|
+
return {(curveLocations & state.locatorGroupAlpha, (curveLocations & state.locatorGroupZulu) >> 1): distinctCrossings
|
|
42
|
+
for curveLocations, distinctCrossings in state.dictionaryCurveLocations.items()}
|
|
114
43
|
|
|
115
44
|
@cache
|
|
116
45
|
def walkDyckPath(intWithExtra_0b1: int) -> int:
|
|
46
|
+
"""Find the bit position for flipping paired curve endpoints in meander transfer matrices.
|
|
47
|
+
|
|
48
|
+
Parameters
|
|
49
|
+
----------
|
|
50
|
+
intWithExtra_0b1 : int
|
|
51
|
+
Binary representation of curve locations with an extra bit encoding parity information.
|
|
52
|
+
|
|
53
|
+
Returns
|
|
54
|
+
-------
|
|
55
|
+
flipExtra_0b1_Here : int
|
|
56
|
+
Bit mask indicating the position where the balance condition fails, formatted as 2^(2k).
|
|
57
|
+
|
|
58
|
+
3L33T H@X0R
|
|
59
|
+
------------
|
|
60
|
+
Binary search for first negative balance in shifted bit pairs. Returns 2^(2k) mask for
|
|
61
|
+
bit position k where cumulative balance counter transitions from non-negative to negative.
|
|
62
|
+
|
|
63
|
+
Mathematics
|
|
64
|
+
-----------
|
|
65
|
+
Implements the Dyck path balance verification algorithm from Jensen's transfer matrix
|
|
66
|
+
enumeration. Computes the position where ∑(i=0 to k) (-1)^b_i < 0 for the first time,
|
|
67
|
+
where b_i are the bits of the input at positions 2i.
|
|
68
|
+
|
|
69
|
+
"""
|
|
117
70
|
findTheExtra_0b1: int = 0
|
|
118
71
|
flipExtra_0b1_Here: int = 1
|
|
119
72
|
while True:
|
|
@@ -126,223 +79,441 @@ def walkDyckPath(intWithExtra_0b1: int) -> int:
|
|
|
126
79
|
break
|
|
127
80
|
return flipExtra_0b1_Here
|
|
128
81
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
"""Be a docstring."""
|
|
132
|
-
return numpy.uint64(avoidingLookupsInPerRowLoop ^ walkDyckPath(avoidingLookupsInPerRowLoop))
|
|
133
|
-
|
|
134
|
-
# TODO there is a better way to do this.
|
|
135
|
-
flipTheExtra_0b1 = numpy.vectorize(_flipTheExtra_0b1, otypes=[numpy.uint64])
|
|
136
|
-
"""The vectorize function is provided primarily for convenience, not for performance. The implementation is essentially a for loop."""
|
|
137
|
-
|
|
138
|
-
def aggregateCurveLocations(arrayCurveLocations: DataArray2columns) -> DataArray3columns:
|
|
139
|
-
arrayCurveGroups: DataArray3columns = numpy.tile(
|
|
140
|
-
A=numpy.unique(arrayCurveLocations[:, columnCurveLocations])
|
|
141
|
-
, reps=(columnsArrayCurveGroups, 1)
|
|
142
|
-
).T
|
|
143
|
-
arrayCurveGroups[:, columnDistinctCrossings] = 0
|
|
144
|
-
numpy.add.at(
|
|
145
|
-
arrayCurveGroups[:, columnDistinctCrossings]
|
|
146
|
-
, numpy.searchsorted(
|
|
147
|
-
a=arrayCurveGroups[:, columnCurveLocations]
|
|
148
|
-
, v=arrayCurveLocations[:, columnCurveLocations])
|
|
149
|
-
, arrayCurveLocations[:, columnDistinctCrossings]
|
|
150
|
-
)
|
|
151
|
-
# I'm computing groupZulu from curveLocations that are physically in `arrayCurveGroups`, so I'm using `columnCurveLocations`.
|
|
152
|
-
numpy.bitwise_and(arrayCurveGroups[:, columnCurveLocations], numpy.uint64(groupZuluLocator64), out=arrayCurveGroups[:, columnGroupZulu])
|
|
153
|
-
numpy.right_shift(arrayCurveGroups[:, columnGroupZulu], 1, out=arrayCurveGroups[:, columnGroupZulu])
|
|
154
|
-
# NOTE Do not alphabetize these operations. This column has curveLocations data that groupZulu needs.
|
|
155
|
-
arrayCurveGroups[:, columnGroupAlpha] &= groupAlphaLocator64
|
|
156
|
-
return arrayCurveGroups
|
|
157
|
-
|
|
158
|
-
def convertDictionaryCurveGroups2array(dictionaryCurveGroups: dict[tuple[int, int], int]) -> DataArray3columns:
|
|
159
|
-
arrayCurveGroups: DataArray3columns = numpy.tile(numpy.fromiter(dictionaryCurveGroups.values(), dtype=numpy.uint64), (columnsArrayCurveGroups, 1)).T
|
|
160
|
-
arrayKeys: DataArray2columns = numpy.array(list(dictionaryCurveGroups.keys()), dtype=numpy.uint64)
|
|
161
|
-
arrayCurveGroups[:, columnGroupAlpha] = arrayKeys[:, 0]
|
|
162
|
-
arrayCurveGroups[:, columnGroupZulu] = arrayKeys[:, 1]
|
|
163
|
-
return arrayCurveGroups
|
|
164
|
-
|
|
165
|
-
def count64(bridges: int, arrayCurveGroups: DataArray3columns, bridgesMinimum: int = 0) -> tuple[int, DataArray3columns]:
|
|
166
|
-
|
|
167
|
-
while bridges > bridgesMinimum and int(arrayCurveGroups[:, columnDistinctCrossings].max()).bit_length() < Z0Z_bit_lengthSafetyLimit:
|
|
168
|
-
bridges -= 1
|
|
169
|
-
curveLocationsMAXIMUM: numpy.uint64 = numpy.uint64(1 << (2 * bridges + 4))
|
|
170
|
-
|
|
171
|
-
selectGroupAlphaCurves: SelectorBoolean = arrayCurveGroups[:, columnGroupAlpha] > numpy.uint64(1)
|
|
172
|
-
curveLocationsGroupAlpha: DataArray1D = ((arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] >> 2)
|
|
173
|
-
| (arrayCurveGroups[selectGroupAlphaCurves, columnGroupZulu] << 3)
|
|
174
|
-
| ((numpy.uint64(1) - (arrayCurveGroups[selectGroupAlphaCurves, columnGroupAlpha] & 1)) << 1)
|
|
175
|
-
)
|
|
176
|
-
selectGroupAlphaCurvesLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectGroupAlphaCurves)[numpy.flatnonzero(curveLocationsGroupAlpha < curveLocationsMAXIMUM)]
|
|
82
|
+
def areIntegersWide(state: MatrixMeandersState, dataframe: pandas.DataFrame | None = None, *, fixedSizeMAXIMUMcurveLocations: bool = False) -> bool:
|
|
83
|
+
"""Check if the largest values are wider than the maximum limits.
|
|
177
84
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
state : MatrixMeandersState
|
|
88
|
+
The current state of the computation, including `dictionaryCurveLocations`.
|
|
89
|
+
dataframe : pandas.DataFrame | None = None
|
|
90
|
+
Optional DataFrame containing 'analyzed' and 'distinctCrossings' columns. If provided, use this instead of `state.dictionaryCurveLocations`.
|
|
91
|
+
fixedSizeMAXIMUMcurveLocations : bool = False
|
|
92
|
+
Set this to `True` if you cast `state.MAXIMUMcurveLocations` to the same fixed size integer type as `state.datatypeCurveLocations`.
|
|
184
93
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
wider : bool
|
|
97
|
+
True if at least one integer is too wide.
|
|
98
|
+
|
|
99
|
+
Notes
|
|
100
|
+
-----
|
|
101
|
+
Casting `state.MAXIMUMcurveLocations` to a fixed-size 64-bit unsigned integer might cause the flow to be a little more
|
|
102
|
+
complicated because `MAXIMUMcurveLocations` is usually 1-bit larger than the `max(curveLocations)` value.
|
|
103
|
+
|
|
104
|
+
If you start the algorithm with very large `curveLocations` in your `dictionaryCurveLocations` (*i.e.,* A000682), then the
|
|
105
|
+
flow will go to a function that does not use fixed size integers. When the integers are below the limits (*e.g.,*
|
|
106
|
+
`bitWidthCurveLocationsMaximum`), the flow will go to a function with fixed size integers. In that case, casting
|
|
107
|
+
`MAXIMUMcurveLocations` to a fixed size merely delays the transition from one function to the other by one iteration.
|
|
108
|
+
|
|
109
|
+
If you start with small values in `dictionaryCurveLocations`, however, then the flow goes to the function with fixed size
|
|
110
|
+
integers and usually stays there until `distinctCrossings` is huge, which is near the end of the computation. If you cast
|
|
111
|
+
`MAXIMUMcurveLocations` into a 64-bit unsigned integer, however, then around `state.kOfMatrix == 28`, the bit width of
|
|
112
|
+
`MAXIMUMcurveLocations` might exceed the limit. That will cause the flow to go to the function that does not have fixed size
|
|
113
|
+
integers for a few iterations before returning to the function with fixed size integers.
|
|
114
|
+
"""
|
|
115
|
+
if dataframe is None:
|
|
116
|
+
curveLocationsWidest: int = max(state.dictionaryCurveLocations.keys()).bit_length()
|
|
117
|
+
distinctCrossingsWidest: int = max(state.dictionaryCurveLocations.values()).bit_length()
|
|
118
|
+
else:
|
|
119
|
+
curveLocationsWidest = int(dataframe['analyzed'].max()).bit_length()
|
|
120
|
+
distinctCrossingsWidest = int(dataframe['distinctCrossings'].max()).bit_length()
|
|
188
121
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
selectBridgesAligned: SelectorBoolean = selectGroupAlphaCurves & selectGroupZuluCurves & (selectGroupAlphaAtEven | selectGroupZuluAtEven)
|
|
122
|
+
MAXIMUMcurveLocations: int = 0
|
|
123
|
+
if fixedSizeMAXIMUMcurveLocations:
|
|
124
|
+
MAXIMUMcurveLocations = state.MAXIMUMcurveLocations
|
|
193
125
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
sliceAllocateBridgesAligned = SliceΩ = slice(SliceΩ.stop, SliceΩ.stop + selectBridgesAligned.size)
|
|
126
|
+
return (curveLocationsWidest > raiseIfNone(state.bitWidthCurveLocationsMaximum)
|
|
127
|
+
or distinctCrossingsWidest > raiseIfNone(state.bitWidthDistinctCrossingsMaximum)
|
|
128
|
+
or MAXIMUMcurveLocations > raiseIfNone(state.bitWidthCurveLocationsMaximum)
|
|
129
|
+
)
|
|
199
130
|
|
|
200
|
-
|
|
131
|
+
def countBigInt(state: MatrixMeandersState) -> MatrixMeandersState:
|
|
132
|
+
"""Count meanders with matrix transfer algorithm using Python primitive `int` contained in a Python primitive `dict`.
|
|
201
133
|
|
|
202
|
-
|
|
203
|
-
|
|
134
|
+
Parameters
|
|
135
|
+
----------
|
|
136
|
+
state : MatrixMeandersState
|
|
137
|
+
The algorithm state containing current `kOfMatrix`, `dictionaryCurveLocations`, and thresholds.
|
|
204
138
|
|
|
205
|
-
|
|
206
|
-
|
|
139
|
+
Notes
|
|
140
|
+
-----
|
|
141
|
+
The algorithm is sophisticated, but this implementation is straightforward. Compute each index one at a time, compute each
|
|
142
|
+
`curveLocations` one at a time, and compute each type of analysis one at a time.
|
|
143
|
+
"""
|
|
144
|
+
dictionaryCurveGroups: dict[tuple[int, int], int] = {}
|
|
207
145
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
(arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupAlpha] << 2)
|
|
211
|
-
| (arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnGroupZulu] << 3)
|
|
212
|
-
| 3
|
|
213
|
-
)
|
|
214
|
-
arrayCurveLocations[sliceAllocateBridgesSimple, columnDistinctCrossings] = arrayCurveGroups[selectBridgesSimpleLessThanMaximum, columnDistinctCrossings]
|
|
146
|
+
while (state.kOfMatrix > 0 and areIntegersWide(state)):
|
|
147
|
+
state.kOfMatrix -= 1
|
|
215
148
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
selectBridgesSimpleLessThanMaximum = None; del selectBridgesSimpleLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
219
|
-
selectGroupAlphaCurvesLessThanMaximum = None; del selectGroupAlphaCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
220
|
-
selectGroupZuluCurvesLessThanMaximum = None; del selectGroupZuluCurvesLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
149
|
+
dictionaryCurveGroups = outfitDictionaryCurveGroups(state)
|
|
150
|
+
state.dictionaryCurveLocations.clear()
|
|
221
151
|
goByeBye()
|
|
222
152
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
153
|
+
for (groupAlpha, groupZulu), distinctCrossings in dictionaryCurveGroups.items():
|
|
154
|
+
groupAlphaCurves: bool = groupAlpha > 1
|
|
155
|
+
groupZuluHasCurves: bool = groupZulu > 1
|
|
156
|
+
groupAlphaIsEven = groupZuluIsEven = 0
|
|
157
|
+
|
|
158
|
+
curveLocationAnalysis = ((groupAlpha | (groupZulu << 1)) << 2) | 3
|
|
159
|
+
# simple
|
|
160
|
+
if curveLocationAnalysis < state.MAXIMUMcurveLocations:
|
|
161
|
+
state.dictionaryCurveLocations[curveLocationAnalysis] = state.dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
162
|
+
|
|
163
|
+
if groupAlphaCurves:
|
|
164
|
+
curveLocationAnalysis = (groupAlpha >> 2) | (groupZulu << 3) | ((groupAlphaIsEven := 1 - (groupAlpha & 1)) << 1)
|
|
165
|
+
if curveLocationAnalysis < state.MAXIMUMcurveLocations:
|
|
166
|
+
state.dictionaryCurveLocations[curveLocationAnalysis] = state.dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
167
|
+
|
|
168
|
+
if groupZuluHasCurves:
|
|
169
|
+
curveLocationAnalysis = (groupZulu >> 1) | (groupAlpha << 2) | (groupZuluIsEven := 1 - (groupZulu & 1))
|
|
170
|
+
if curveLocationAnalysis < state.MAXIMUMcurveLocations:
|
|
171
|
+
state.dictionaryCurveLocations[curveLocationAnalysis] = state.dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
172
|
+
|
|
173
|
+
if groupAlphaCurves and groupZuluHasCurves and (groupAlphaIsEven or groupZuluIsEven):
|
|
174
|
+
# aligned
|
|
175
|
+
if groupAlphaIsEven and not groupZuluIsEven:
|
|
176
|
+
groupAlpha ^= walkDyckPath(groupAlpha) # noqa: PLW2901
|
|
177
|
+
elif groupZuluIsEven and not groupAlphaIsEven:
|
|
178
|
+
groupZulu ^= walkDyckPath(groupZulu) # noqa: PLW2901
|
|
179
|
+
|
|
180
|
+
curveLocationAnalysis: int = ((groupZulu >> 2) << 1) | (groupAlpha >> 2)
|
|
181
|
+
if curveLocationAnalysis < state.MAXIMUMcurveLocations:
|
|
182
|
+
state.dictionaryCurveLocations[curveLocationAnalysis] = state.dictionaryCurveLocations.get(curveLocationAnalysis, 0) + distinctCrossings
|
|
183
|
+
|
|
184
|
+
return state
|
|
185
|
+
|
|
186
|
+
# ruff: noqa: B023
|
|
187
|
+
|
|
188
|
+
def countPandas(state: MatrixMeandersState) -> MatrixMeandersState:
|
|
189
|
+
"""Count meanders with matrix transfer algorithm using pandas DataFrame.
|
|
190
|
+
|
|
191
|
+
Parameters
|
|
192
|
+
----------
|
|
193
|
+
state : MatrixMeandersState
|
|
194
|
+
The algorithm state containing current `kOfMatrix`, `dictionaryCurveLocations`, and thresholds.
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
state : MatrixMeandersState
|
|
199
|
+
Updated state with new `kOfMatrix` and `dictionaryCurveLocations`.
|
|
200
|
+
"""
|
|
201
|
+
dataframeAnalyzed = pandas.DataFrame({
|
|
202
|
+
'analyzed': pandas.Series(name='analyzed', data=state.dictionaryCurveLocations.keys(), copy=False, dtype=state.datatypeCurveLocations)
|
|
203
|
+
, 'distinctCrossings': pandas.Series(name='distinctCrossings', data=state.dictionaryCurveLocations.values(), copy=False, dtype=state.datatypeDistinctCrossings)
|
|
204
|
+
}, dtype=state.datatypeCurveLocations
|
|
205
|
+
)
|
|
206
|
+
state.dictionaryCurveLocations.clear()
|
|
207
|
+
|
|
208
|
+
while (state.kOfMatrix > 0 and not areIntegersWide(state, dataframeAnalyzed)):
|
|
209
|
+
|
|
210
|
+
def aggregateCurveLocations() -> None:
|
|
211
|
+
nonlocal dataframeAnalyzed
|
|
212
|
+
dataframeAnalyzed = dataframeAnalyzed.iloc[0:state.indexStartAnalyzed].groupby('analyzed', sort=False)['distinctCrossings'].aggregate('sum').reset_index()
|
|
213
|
+
|
|
214
|
+
def analyzeCurveLocationsAligned() -> None:
|
|
215
|
+
"""Compute `curveLocations` from `groupAlpha` and `groupZulu` if at least one is an even number.
|
|
216
|
+
|
|
217
|
+
Before computing `curveLocations`, some values of `groupAlpha` and `groupZulu` are modified.
|
|
218
|
+
|
|
219
|
+
Warning
|
|
220
|
+
-------
|
|
221
|
+
This function deletes rows from `dataframeCurveLocations`. Always run this analysis last.
|
|
222
|
+
|
|
223
|
+
Formula
|
|
224
|
+
-------
|
|
225
|
+
```python
|
|
226
|
+
if groupAlpha > 1 and groupZulu > 1 and (groupAlphaIsEven or groupZuluIsEven):
|
|
227
|
+
curveLocations = (groupAlpha >> 2) | ((groupZulu >> 2) << 1)
|
|
228
|
+
```
|
|
229
|
+
"""
|
|
230
|
+
nonlocal dataframeCurveLocations
|
|
231
|
+
|
|
232
|
+
# NOTE Step 1 drop unqualified rows
|
|
233
|
+
|
|
234
|
+
ImaGroupZulpha: pandas.Series = dataframeCurveLocations['curveLocations'].copy() # Ima `groupAlpha`.
|
|
235
|
+
ImaGroupZulpha &= state.locatorGroupAlpha # Ima `groupAlpha`.
|
|
236
|
+
|
|
237
|
+
dataframeCurveLocations = dataframeCurveLocations.loc[(ImaGroupZulpha > 1)] # if groupAlphaHasCurves
|
|
238
|
+
|
|
239
|
+
del ImaGroupZulpha
|
|
240
|
+
|
|
241
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
242
|
+
ImaGroupZulpha &= state.locatorGroupZulu # Ima `groupZulu`.
|
|
243
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
244
|
+
|
|
245
|
+
dataframeCurveLocations = dataframeCurveLocations.loc[(ImaGroupZulpha > 1)] # if groupZuluHasCurves
|
|
228
246
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
247
|
+
del ImaGroupZulpha
|
|
248
|
+
|
|
249
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
250
|
+
ImaGroupZulpha &= 0b10 # Ima `groupZulu`.
|
|
251
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
252
|
+
ImaGroupZulpha &= 1 # (groupZulu & 1)
|
|
253
|
+
ImaGroupZulpha ^= 1 # (1 - (groupZulu ...))
|
|
254
|
+
dataframeCurveLocations.loc[:, 'analyzed'] = ImaGroupZulpha # selectorGroupZuluAtEven
|
|
255
|
+
|
|
256
|
+
del ImaGroupZulpha
|
|
257
|
+
|
|
258
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupAlpha`.
|
|
259
|
+
ImaGroupZulpha &= 1 # (groupAlpha & 1)
|
|
260
|
+
ImaGroupZulpha ^= 1 # (1 - (groupAlpha ...))
|
|
261
|
+
ImaGroupZulpha = ImaGroupZulpha.astype(bool) # selectorGroupAlphaAtODD
|
|
262
|
+
|
|
263
|
+
dataframeCurveLocations = dataframeCurveLocations.loc[(ImaGroupZulpha) | (dataframeCurveLocations.loc[:, 'analyzed'])] # if (groupAlphaIsEven or groupZuluIsEven)
|
|
264
|
+
|
|
265
|
+
del ImaGroupZulpha
|
|
266
|
+
|
|
267
|
+
# NOTE Step 2 modify rows
|
|
268
|
+
|
|
269
|
+
# Make a selector for groupZuluAtEven, so you can modify groupAlpha
|
|
270
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
271
|
+
ImaGroupZulpha &= 0b10 # Ima `groupZulu`.
|
|
272
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
273
|
+
ImaGroupZulpha &= 1 # (groupZulu & 1)
|
|
274
|
+
ImaGroupZulpha ^= 1 # (1 - (groupZulu ...))
|
|
275
|
+
ImaGroupZulpha = ImaGroupZulpha.astype(bool) # selectorGroupZuluAtEven
|
|
276
|
+
|
|
277
|
+
dataframeCurveLocations.loc[:, 'analyzed'] = dataframeCurveLocations['curveLocations'] # Ima `groupAlpha`.
|
|
278
|
+
dataframeCurveLocations.loc[:, 'analyzed'] &= state.locatorGroupAlpha # (groupAlpha)
|
|
279
|
+
|
|
280
|
+
# if groupAlphaIsEven and not groupZuluIsEven, modifyGroupAlphaPairedToOdd
|
|
281
|
+
dataframeCurveLocations.loc[(~ImaGroupZulpha), 'analyzed'] = state.datatypeCurveLocations( # pyright: ignore[reportCallIssue, reportArgumentType]
|
|
282
|
+
flipTheExtra_0b1AsUfunc(dataframeCurveLocations.loc[(~ImaGroupZulpha), 'analyzed']))
|
|
283
|
+
|
|
284
|
+
del ImaGroupZulpha
|
|
285
|
+
|
|
286
|
+
# if groupZuluIsEven and not groupAlphaIsEven, modifyGroupZuluPairedToOdd
|
|
287
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
288
|
+
ImaGroupZulpha &= state.locatorGroupZulu # Ima `groupZulu`.
|
|
289
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
290
|
+
|
|
291
|
+
ImaGroupZulpha.loc[(dataframeCurveLocations.loc[:, 'curveLocations'] & 1).astype(bool)] = state.datatypeCurveLocations( # pyright: ignore[reportArgumentType, reportCallIssue]
|
|
292
|
+
flipTheExtra_0b1AsUfunc(ImaGroupZulpha.loc[(dataframeCurveLocations.loc[:, 'curveLocations'] & 1).astype(bool)])) # pyright: ignore[reportCallIssue, reportUnknownArgumentType, reportArgumentType]
|
|
293
|
+
|
|
294
|
+
# NOTE Step 3 compute curveLocations
|
|
295
|
+
|
|
296
|
+
dataframeCurveLocations.loc[:, 'analyzed'] //= 2**2 # (groupAlpha >> 2)
|
|
297
|
+
|
|
298
|
+
ImaGroupZulpha //= 2**2 # (groupZulu >> 2)
|
|
299
|
+
ImaGroupZulpha *= 2**1 # ((groupZulu ...) << 1)
|
|
300
|
+
|
|
301
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= ImaGroupZulpha # ... | (groupZulu ...)
|
|
302
|
+
|
|
303
|
+
del ImaGroupZulpha
|
|
304
|
+
|
|
305
|
+
dataframeCurveLocations.loc[dataframeCurveLocations['analyzed'] >= state.MAXIMUMcurveLocations, 'analyzed'] = 0
|
|
306
|
+
|
|
307
|
+
def analyzeCurveLocationsAlpha() -> None:
|
|
308
|
+
"""Compute `curveLocations` from `groupAlpha`.
|
|
309
|
+
|
|
310
|
+
Formula
|
|
311
|
+
-------
|
|
312
|
+
```python
|
|
313
|
+
if groupAlpha > 1:
|
|
314
|
+
curveLocations = ((1 - (groupAlpha & 1)) << 1) | (groupZulu << 3) | (groupAlpha >> 2)
|
|
315
|
+
# `(1 - (groupAlpha & 1)` is an evenness test.
|
|
316
|
+
```
|
|
317
|
+
"""
|
|
318
|
+
nonlocal dataframeCurveLocations
|
|
319
|
+
dataframeCurveLocations['analyzed'] = dataframeCurveLocations['curveLocations']
|
|
320
|
+
dataframeCurveLocations.loc[:, 'analyzed'] &= 1 # (groupAlpha & 1)
|
|
321
|
+
dataframeCurveLocations.loc[:, 'analyzed'] ^= 1 # (1 - (groupAlpha ...))
|
|
322
|
+
|
|
323
|
+
dataframeCurveLocations.loc[:, 'analyzed'] *= 2**1 # ((groupAlpha ...) << 1)
|
|
324
|
+
|
|
325
|
+
ImaGroupZulpha: pandas.Series = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
326
|
+
ImaGroupZulpha &= state.locatorGroupZulu # Ima `groupZulu`.
|
|
327
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
328
|
+
|
|
329
|
+
ImaGroupZulpha *= 2**3 # (groupZulu << 3)
|
|
330
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= ImaGroupZulpha # ... | (groupZulu ...)
|
|
331
|
+
|
|
332
|
+
del ImaGroupZulpha
|
|
333
|
+
|
|
334
|
+
dataframeCurveLocations.loc[:, 'analyzed'] *= 2**2 # ... | (groupAlpha >> 2)
|
|
335
|
+
|
|
336
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupAlpha`.
|
|
337
|
+
ImaGroupZulpha &= state.locatorGroupAlpha # Ima `groupAlpha`.
|
|
338
|
+
|
|
339
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= ImaGroupZulpha # ... | (groupAlpha)
|
|
340
|
+
dataframeCurveLocations.loc[:, 'analyzed'] //= 2**2 # (... >> 2)
|
|
341
|
+
|
|
342
|
+
dataframeCurveLocations.loc[(ImaGroupZulpha <= 1), 'analyzed'] = 0 # if groupAlpha > 1
|
|
343
|
+
|
|
344
|
+
del ImaGroupZulpha
|
|
345
|
+
|
|
346
|
+
dataframeCurveLocations.loc[dataframeCurveLocations['analyzed'] >= state.MAXIMUMcurveLocations, 'analyzed'] = 0
|
|
347
|
+
|
|
348
|
+
def analyzeCurveLocationsSimple() -> None:
|
|
349
|
+
"""Compute curveLocations with the 'simple' bridges formula.
|
|
350
|
+
|
|
351
|
+
Formula
|
|
352
|
+
-------
|
|
353
|
+
```python
|
|
354
|
+
curveLocations = ((groupAlpha | (groupZulu << 1)) << 2) | 3
|
|
355
|
+
```
|
|
356
|
+
|
|
357
|
+
Notes
|
|
358
|
+
-----
|
|
359
|
+
Using `+= 3` instead of `|= 3` is valid in this specific case. Left shift by two means the last bits are '0b00'. '0 + 3'
|
|
360
|
+
is '0b11', and '0b00 | 0b11' is also '0b11'.
|
|
361
|
+
|
|
362
|
+
"""
|
|
363
|
+
nonlocal dataframeCurveLocations
|
|
364
|
+
dataframeCurveLocations['analyzed'] = dataframeCurveLocations['curveLocations']
|
|
365
|
+
dataframeCurveLocations.loc[:, 'analyzed'] &= state.locatorGroupAlpha
|
|
366
|
+
|
|
367
|
+
groupZulu: pandas.Series = dataframeCurveLocations['curveLocations'].copy()
|
|
368
|
+
groupZulu &= state.locatorGroupZulu
|
|
369
|
+
groupZulu //= 2**1 # (groupZulu >> 1)
|
|
370
|
+
groupZulu *= 2**1 # (groupZulu << 1)
|
|
371
|
+
|
|
372
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= groupZulu # ((groupAlpha | (groupZulu ...))
|
|
373
|
+
|
|
374
|
+
del groupZulu
|
|
375
|
+
|
|
376
|
+
dataframeCurveLocations.loc[:, 'analyzed'] *= 2**2 # (... << 2)
|
|
377
|
+
dataframeCurveLocations.loc[:, 'analyzed'] += 3 # (...) | 3
|
|
378
|
+
dataframeCurveLocations.loc[dataframeCurveLocations['analyzed'] >= state.MAXIMUMcurveLocations, 'analyzed'] = 0
|
|
379
|
+
|
|
380
|
+
def analyzeCurveLocationsZulu() -> None:
|
|
381
|
+
"""Compute `curveLocations` from `groupZulu`.
|
|
382
|
+
|
|
383
|
+
Formula
|
|
384
|
+
-------
|
|
385
|
+
```python
|
|
386
|
+
if groupZulu > 1:
|
|
387
|
+
curveLocations = (1 - (groupZulu & 1)) | (groupAlpha << 2) | (groupZulu >> 1)
|
|
388
|
+
```
|
|
389
|
+
"""
|
|
390
|
+
nonlocal dataframeCurveLocations
|
|
391
|
+
dataframeCurveLocations.loc[:, 'analyzed'] = dataframeCurveLocations['curveLocations'] # Ima `groupZulu`.
|
|
392
|
+
dataframeCurveLocations.loc[:, 'analyzed'] &= 0b10 # Ima `groupZulu`.
|
|
393
|
+
dataframeCurveLocations.loc[:, 'analyzed'] //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
394
|
+
dataframeCurveLocations.loc[:, 'analyzed'] &= 1 # (groupZulu & 1)
|
|
395
|
+
dataframeCurveLocations.loc[:, 'analyzed'] ^= 1 # (1 - (groupZulu ...))
|
|
396
|
+
|
|
397
|
+
ImaGroupZulpha: pandas.Series = dataframeCurveLocations['curveLocations'].copy() # Ima `groupAlpha`.
|
|
398
|
+
ImaGroupZulpha &= state.locatorGroupAlpha # Ima `groupAlpha`.
|
|
399
|
+
|
|
400
|
+
ImaGroupZulpha *= 2**2 # (groupAlpha << 2)
|
|
401
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= ImaGroupZulpha # ... | (groupAlpha ...)
|
|
402
|
+
|
|
403
|
+
del ImaGroupZulpha
|
|
404
|
+
|
|
405
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
406
|
+
ImaGroupZulpha &= state.locatorGroupZulu # Ima `groupZulu`.
|
|
407
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
408
|
+
|
|
409
|
+
ImaGroupZulpha //= 2**1 # (groupZulu >> 1)
|
|
410
|
+
|
|
411
|
+
dataframeCurveLocations.loc[:, 'analyzed'] |= ImaGroupZulpha # ... | (groupZulu ...)
|
|
412
|
+
|
|
413
|
+
del ImaGroupZulpha
|
|
414
|
+
|
|
415
|
+
ImaGroupZulpha = dataframeCurveLocations['curveLocations'].copy() # Ima `groupZulu`.
|
|
416
|
+
ImaGroupZulpha &= state.locatorGroupZulu # Ima `groupZulu`.
|
|
417
|
+
ImaGroupZulpha //= 2**1 # Ima `groupZulu` (groupZulu >> 1)
|
|
418
|
+
|
|
419
|
+
dataframeCurveLocations.loc[ImaGroupZulpha <= 1, 'analyzed'] = 0 # if groupZulu > 1
|
|
420
|
+
|
|
421
|
+
del ImaGroupZulpha
|
|
422
|
+
|
|
423
|
+
dataframeCurveLocations.loc[dataframeCurveLocations['analyzed'] >= state.MAXIMUMcurveLocations, 'analyzed'] = 0
|
|
424
|
+
|
|
425
|
+
def recordCurveLocations() -> None:
|
|
426
|
+
nonlocal dataframeAnalyzed
|
|
427
|
+
|
|
428
|
+
indexStopAnalyzed: int = state.indexStartAnalyzed + int((dataframeCurveLocations['analyzed'] > 0).sum()) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType]
|
|
429
|
+
|
|
430
|
+
if indexStopAnalyzed > state.indexStartAnalyzed:
|
|
431
|
+
if len(dataframeAnalyzed.index) < indexStopAnalyzed:
|
|
432
|
+
warn(f"Lengthened `dataframeAnalyzed` from {len(dataframeAnalyzed.index)} to {indexStopAnalyzed=}; n={state.n}, {state.kOfMatrix=}.", stacklevel=2)
|
|
433
|
+
dataframeAnalyzed = dataframeAnalyzed.reindex(index=pandas.RangeIndex(indexStopAnalyzed), fill_value=0)
|
|
434
|
+
|
|
435
|
+
dataframeAnalyzed.loc[state.indexStartAnalyzed:indexStopAnalyzed - 1, ['analyzed', 'distinctCrossings']] = (
|
|
436
|
+
dataframeCurveLocations.loc[(dataframeCurveLocations['analyzed'] > 0), ['analyzed', 'distinctCrossings']
|
|
437
|
+
].to_numpy(dtype=state.datatypeCurveLocations, copy=False)
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
state.indexStartAnalyzed = indexStopAnalyzed
|
|
441
|
+
|
|
442
|
+
del indexStopAnalyzed
|
|
443
|
+
|
|
444
|
+
dataframeCurveLocations = pandas.DataFrame({
|
|
445
|
+
'curveLocations': pandas.Series(name='curveLocations', data=dataframeAnalyzed['analyzed'], copy=False, dtype=state.datatypeCurveLocations)
|
|
446
|
+
, 'analyzed': pandas.Series(name='analyzed', data=0, dtype=state.datatypeCurveLocations)
|
|
447
|
+
, 'distinctCrossings': pandas.Series(name='distinctCrossings', data=dataframeAnalyzed['distinctCrossings'], copy=False, dtype=state.datatypeDistinctCrossings)
|
|
448
|
+
} # pyright: ignore[reportUnknownArgumentType]
|
|
232
449
|
)
|
|
233
450
|
|
|
234
|
-
|
|
235
|
-
selectBridgesGroupZuluPairedToOdd = None; del selectBridgesGroupZuluPairedToOdd # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
236
|
-
selectGroupAlphaAtEven = None; del selectGroupAlphaAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
237
|
-
selectGroupAlphaCurves = None; del selectGroupAlphaCurves # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
238
|
-
selectGroupZuluAtEven = None; del selectGroupZuluAtEven # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
239
|
-
selectGroupZuluCurves = None; del selectGroupZuluCurves # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
451
|
+
del dataframeAnalyzed
|
|
240
452
|
goByeBye()
|
|
241
453
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
454
|
+
state.bitWidth = int(dataframeCurveLocations['curveLocations'].max()).bit_length()
|
|
455
|
+
length: int = getBucketsTotal(state)
|
|
456
|
+
dataframeAnalyzed = pandas.DataFrame({
|
|
457
|
+
'analyzed': pandas.Series(0, pandas.RangeIndex(length), dtype=state.datatypeCurveLocations, name='analyzed')
|
|
458
|
+
, 'distinctCrossings': pandas.Series(0, pandas.RangeIndex(length), dtype=state.datatypeDistinctCrossings, name='distinctCrossings')
|
|
459
|
+
}, index=pandas.RangeIndex(length), columns=['analyzed', 'distinctCrossings'], dtype=state.datatypeCurveLocations # pyright: ignore[reportUnknownArgumentType]
|
|
245
460
|
)
|
|
246
|
-
selectBridgesAlignedLessThanMaximum: SelectorIndices = numpy.flatnonzero(selectBridgesAligned)[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
|
|
247
461
|
|
|
248
|
-
|
|
249
|
-
arrayCurveLocations[sliceAllocateBridgesAligned, columnDistinctCrossings] = arrayCurveGroups[selectBridgesAlignedLessThanMaximum, columnDistinctCrossings]
|
|
250
|
-
arrayCurveLocations[sliceAllocateBridgesAligned, columnCurveLocations] = curveLocationsBridgesAligned[numpy.flatnonzero(curveLocationsBridgesAligned < curveLocationsMAXIMUM)]
|
|
462
|
+
state.kOfMatrix -= 1
|
|
251
463
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
selectBridgesAlignedLessThanMaximum = None; del selectBridgesAlignedLessThanMaximum # pyright: ignore[reportAssignmentType] # noqa: E702
|
|
257
|
-
goByeBye()
|
|
464
|
+
state.indexStartAnalyzed = 0
|
|
465
|
+
|
|
466
|
+
analyzeCurveLocationsSimple()
|
|
467
|
+
recordCurveLocations()
|
|
258
468
|
|
|
259
|
-
|
|
260
|
-
|
|
469
|
+
analyzeCurveLocationsAlpha()
|
|
470
|
+
recordCurveLocations()
|
|
261
471
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
del
|
|
472
|
+
analyzeCurveLocationsZulu()
|
|
473
|
+
recordCurveLocations()
|
|
474
|
+
|
|
475
|
+
analyzeCurveLocationsAligned()
|
|
476
|
+
recordCurveLocations()
|
|
477
|
+
del dataframeCurveLocations
|
|
268
478
|
goByeBye()
|
|
269
479
|
|
|
270
|
-
|
|
480
|
+
aggregateCurveLocations()
|
|
481
|
+
|
|
482
|
+
if state.n >= 45: # for data collection
|
|
483
|
+
print(state.n, state.kOfMatrix+1, state.indexStartAnalyzed, sep=',') # noqa: T201
|
|
271
484
|
|
|
272
|
-
|
|
273
|
-
return
|
|
485
|
+
state.dictionaryCurveLocations = dataframeAnalyzed.set_index('analyzed')['distinctCrossings'].to_dict()
|
|
486
|
+
return state
|
|
274
487
|
|
|
275
|
-
def doTheNeedful(
|
|
488
|
+
def doTheNeedful(state: MatrixMeandersState) -> int:
|
|
276
489
|
"""Compute a(n) meanders with the transfer matrix algorithm.
|
|
277
490
|
|
|
278
491
|
Parameters
|
|
279
492
|
----------
|
|
280
|
-
|
|
281
|
-
The
|
|
282
|
-
dictionaryCurveLocations : dict[int, int]
|
|
283
|
-
A dictionary mapping curve locations to their counts.
|
|
493
|
+
state : MatrixMeandersState
|
|
494
|
+
The algorithm state containing current `kOfMatrix`, `dictionaryCurveLocations`, and thresholds.
|
|
284
495
|
|
|
285
496
|
Returns
|
|
286
497
|
-------
|
|
287
498
|
a(n) : int
|
|
288
499
|
The computed value of a(n).
|
|
289
500
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
As first computed by Iwan Jensen in 2000, A000682(41) = 6664356253639465480.
|
|
501
|
+
Notes
|
|
502
|
+
-----
|
|
294
503
|
Citation: https://github.com/hunterhogan/mapFolding/blob/main/citations/Jensen.bibtex
|
|
295
|
-
See also https://oeis.org/A000682
|
|
296
|
-
|
|
297
|
-
I'm sure you instantly observed that A000682(41) = (6664356253639465480).bit_length() = 63 bits. And A005316(44) =
|
|
298
|
-
(18276178714484582264).bit_length() = 64 bits.
|
|
299
504
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
505
|
+
See Also
|
|
506
|
+
--------
|
|
507
|
+
https://oeis.org/A000682
|
|
508
|
+
https://oeis.org/A005316
|
|
509
|
+
"""
|
|
510
|
+
while state.kOfMatrix > 0:
|
|
511
|
+
goByeBye()
|
|
305
512
|
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
513
|
+
if areIntegersWide(state):
|
|
514
|
+
state = countBigInt(state)
|
|
515
|
+
else:
|
|
516
|
+
state = countPandas(state)
|
|
310
517
|
|
|
311
|
-
|
|
518
|
+
return sum(state.dictionaryCurveLocations.values())
|
|
312
519
|
|
|
313
|
-
In total, to compute a(n) for "large" n, I use three-stages.
|
|
314
|
-
1. I use Python primitive `int` contained in a Python primitive `dict`.
|
|
315
|
-
2. When the bit width of the bit-packed integers connected to `bridges` is small enough to use `numpy.uint64`, I switch to NumPy for the heavy lifting.
|
|
316
|
-
3. When `distinctCrossings` subtotals might exceed 64 bits, I must switch back to Python primitives.
|
|
317
|
-
"""
|
|
318
|
-
# NOTE '29' is based on two things. 1) `bridges = 29`, groupZuluLocator = 0xaaaaaaaaaaaaaaaa.bit_length() = 64. 2) If `bridges =
|
|
319
|
-
# 30` or a larger number, `OverflowError: int too big to convert`. Conclusion: '29' isn't necessarily correct or the best value:
|
|
320
|
-
# it merely fits within my limited ability to assess the correct value.
|
|
321
|
-
# NOTE the above was written when I had the `bridges >= bridgesMinimum` bug. So, apply '-1' to everything.
|
|
322
|
-
# NOTE This default value is necessary: it prevents `count64` from returning an incomplete dictionary when that is not necessary.
|
|
323
|
-
# TODO `count64_bridgesMaximum` might be a VERY good idea as a second safeguard against overflowing distinctCrossingsTotal. But
|
|
324
|
-
# I'm pretty sure I should use an actual check on maximum bit-width in arrayCurveGroups[:, columnDistinctCrossings] at the start
|
|
325
|
-
# of each while loop. Tests on A000682 showed that the max bit-width of arrayCurveGroups[:, columnDistinctCrossings] always
|
|
326
|
-
# increased by 1 or 2 bits on each iteration: never 0 and never 3. I did not test A005316. And I do not have a mathematical proof of the limit.
|
|
327
|
-
|
|
328
|
-
count64_bridgesMaximum = 28
|
|
329
|
-
bridgesMinimum = 0
|
|
330
|
-
distinctCrossings64bitLimitAsValueOf_n = 41
|
|
331
|
-
distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG = distinctCrossings64bitLimitAsValueOf_n - 3
|
|
332
|
-
distinctCrossings64bitLimitSafetyMargin = 4
|
|
333
|
-
|
|
334
|
-
dictionaryCurveGroups: dict[tuple[int, int], int] = convertDictionaryCurveLocations2CurveGroups(dictionaryCurveLocations)
|
|
335
|
-
|
|
336
|
-
if n >= count64_bridgesMaximum:
|
|
337
|
-
if n >= distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG:
|
|
338
|
-
bridgesMinimum = n - distinctCrossingsSubtotal64bitLimitAsValueOf_n_WAG + distinctCrossings64bitLimitSafetyMargin
|
|
339
|
-
n, dictionaryCurveGroups = count(n, dictionaryCurveGroups, count64_bridgesMaximum)
|
|
340
|
-
gc.collect()
|
|
341
|
-
n, arrayCurveGroups = count64(n, convertDictionaryCurveGroups2array(dictionaryCurveGroups), bridgesMinimum)
|
|
342
|
-
if n > 0:
|
|
343
|
-
gc.collect()
|
|
344
|
-
n, dictionaryCurveGroups = count(n, convertArrayCurveGroups2dictionaryCurveGroups(arrayCurveGroups), bridgesMinimum=0)
|
|
345
|
-
distinctCrossingsTotal = sum(dictionaryCurveGroups.values())
|
|
346
|
-
else:
|
|
347
|
-
distinctCrossingsTotal = int(arrayCurveGroups[0, columnDistinctCrossings])
|
|
348
|
-
return distinctCrossingsTotal
|