mapFolding 0.16.2__py3-none-any.whl → 0.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- easyRun/A000682.py +2 -2
- easyRun/NOTcountingFolds.py +16 -8
- easyRun/countFolds.py +9 -2
- easyRun/generateAllModules.py +14 -0
- easyRun/meanders.py +4 -4
- mapFolding/__init__.py +1 -0
- mapFolding/_theSSOT.py +3 -2
- mapFolding/_theTypes.py +3 -0
- mapFolding/algorithms/A000136constraintPropagation.py +95 -0
- mapFolding/algorithms/A000136elimination.py +163 -0
- mapFolding/algorithms/A000136eliminationParallel.py +77 -0
- mapFolding/algorithms/A086345.py +75 -0
- mapFolding/algorithms/matrixMeanders.py +59 -18
- mapFolding/algorithms/matrixMeandersNumPyndas.py +841 -0
- mapFolding/algorithms/oeisIDbyFormula.py +2 -2
- mapFolding/algorithms/symmetricFolds.py +35 -0
- mapFolding/basecamp.py +100 -153
- mapFolding/dataBaskets.py +142 -65
- mapFolding/filesystemToolkit.py +4 -32
- mapFolding/oeis.py +5 -12
- mapFolding/reference/A086345Wu.py +25 -0
- mapFolding/reference/irvineJavaPort.py +3 -3
- mapFolding/reference/matrixMeandersAnalysis/signatures.py +3 -0
- mapFolding/reference/meandersDumpingGround/matrixMeandersNumPyV1finalForm.py +1 -1
- mapFolding/someAssemblyRequired/A007822/A007822rawMaterials.py +10 -45
- mapFolding/someAssemblyRequired/A007822/_asynchronousAnnex.py +51 -0
- mapFolding/someAssemblyRequired/A007822/makeA007822AsynchronousModules.py +39 -196
- mapFolding/someAssemblyRequired/A007822/makeA007822Modules.py +57 -43
- mapFolding/someAssemblyRequired/RecipeJob.py +84 -34
- mapFolding/someAssemblyRequired/__init__.py +4 -8
- mapFolding/someAssemblyRequired/_toolkitContainers.py +38 -7
- mapFolding/someAssemblyRequired/infoBooth.py +41 -23
- mapFolding/someAssemblyRequired/makeJobTheorem2Numba.py +140 -164
- mapFolding/someAssemblyRequired/makeJobTheorem2codon.py +63 -96
- mapFolding/someAssemblyRequired/makingModules_count.py +26 -30
- mapFolding/someAssemblyRequired/makingModules_doTheNeedful.py +10 -72
- mapFolding/someAssemblyRequired/{mapFolding → mapFoldingModules}/makeMapFoldingModules.py +30 -35
- mapFolding/someAssemblyRequired/meanders/makeMeandersModules.py +13 -11
- mapFolding/someAssemblyRequired/toolkitMakeModules.py +5 -31
- mapFolding/someAssemblyRequired/toolkitNumba.py +3 -2
- mapFolding/someAssemblyRequired/transformationTools.py +12 -15
- mapFolding/syntheticModules/A007822/algorithm.py +45 -50
- mapFolding/syntheticModules/A007822/asynchronous.py +92 -36
- mapFolding/syntheticModules/A007822/initializeState.py +19 -23
- mapFolding/syntheticModules/A007822/theorem2.py +20 -24
- mapFolding/syntheticModules/A007822/theorem2Numba.py +23 -25
- mapFolding/syntheticModules/A007822/theorem2Trimmed.py +19 -23
- mapFolding/syntheticModules/countParallelNumba.py +1 -2
- mapFolding/syntheticModules/daoOfMapFoldingNumba.py +5 -4
- mapFolding/syntheticModules/initializeState.py +1 -1
- mapFolding/syntheticModules/meanders/bigInt.py +59 -22
- mapFolding/syntheticModules/theorem2.py +1 -1
- mapFolding/syntheticModules/theorem2Numba.py +30 -9
- mapFolding/syntheticModules/theorem2Trimmed.py +2 -2
- mapFolding/tests/test_computations.py +29 -3
- {mapfolding-0.16.2.dist-info → mapfolding-0.17.0.dist-info}/METADATA +11 -8
- mapfolding-0.17.0.dist-info/RECORD +107 -0
- mapFolding/_dataPacking.py +0 -68
- mapFolding/algorithms/matrixMeandersBeDry.py +0 -182
- mapFolding/algorithms/matrixMeandersNumPy.py +0 -333
- mapFolding/algorithms/matrixMeandersPandas.py +0 -334
- mapFolding/reference/meandersDumpingGround/A005316intOptimized.py +0 -122
- mapFolding/reference/meandersDumpingGround/A005316optimized128bit.py +0 -79
- mapFolding/reference/meandersDumpingGround/matrixMeandersBaseline.py +0 -65
- mapFolding/reference/meandersDumpingGround/matrixMeandersBaselineAnnex.py +0 -84
- mapFolding/reference/meandersDumpingGround/matrixMeandersSimpleQueue.py +0 -90
- mapFolding/syntheticModules/A007822/algorithmNumba.py +0 -94
- mapFolding/syntheticModules/A007822/asynchronousAnnex.py +0 -66
- mapFolding/syntheticModules/A007822/asynchronousAnnexNumba.py +0 -70
- mapFolding/syntheticModules/A007822/asynchronousNumba.py +0 -79
- mapFolding/syntheticModules/A007822/asynchronousTheorem2.py +0 -65
- mapFolding/syntheticModules/A007822/asynchronousTrimmed.py +0 -56
- mapFolding/syntheticModules/dataPacking.py +0 -26
- mapFolding/syntheticModules/dataPackingA007822.py +0 -92
- mapfolding-0.16.2.dist-info/RECORD +0 -115
- /mapFolding/someAssemblyRequired/{mapFolding → mapFoldingModules}/__init__.py +0 -0
- {mapfolding-0.16.2.dist-info → mapfolding-0.17.0.dist-info}/WHEEL +0 -0
- {mapfolding-0.16.2.dist-info → mapfolding-0.17.0.dist-info}/entry_points.txt +0 -0
- {mapfolding-0.16.2.dist-info → mapfolding-0.17.0.dist-info}/licenses/LICENSE +0 -0
- {mapfolding-0.16.2.dist-info → mapfolding-0.17.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,841 @@
|
|
|
1
|
+
"""Transfer matrix algorithm implementations in NumPy (*Num*erical *Py*thon) and pandas.
|
|
2
|
+
|
|
3
|
+
Citations
|
|
4
|
+
---------
|
|
5
|
+
- https://github.com/hunterhogan/mapFolding/blob/main/citations/Jensen.bib
|
|
6
|
+
- https://github.com/hunterhogan/mapFolding/blob/main/citations/Howroyd.bib
|
|
7
|
+
|
|
8
|
+
See Also
|
|
9
|
+
--------
|
|
10
|
+
`matrixMeanders`: transfer matrix algorithm implementation in pure Python with `int` (*int*eger) contained in a `dict` (*dict*ionary).
|
|
11
|
+
https://oeis.org/A000682
|
|
12
|
+
https://oeis.org/A005316
|
|
13
|
+
https://github.com/archmageirvine/joeis/blob/5dc2148344bff42182e2128a6c99df78044558c5/src/irvine/oeis/a005/A005316.java
|
|
14
|
+
"""
|
|
15
|
+
from functools import cache
|
|
16
|
+
from gc import collect as goByeBye
|
|
17
|
+
from hunterMakesPy import raiseIfNone
|
|
18
|
+
from mapFolding import ShapeArray, ShapeSlicer
|
|
19
|
+
from mapFolding.algorithms.matrixMeanders import walkDyckPath
|
|
20
|
+
from mapFolding.dataBaskets import MatrixMeandersState
|
|
21
|
+
from mapFolding.reference.A000682facts import A000682_n_boundary_buckets
|
|
22
|
+
from mapFolding.reference.A005316facts import A005316_n_boundary_buckets
|
|
23
|
+
from numpy import (
|
|
24
|
+
bitwise_and, bitwise_left_shift, bitwise_or, bitwise_right_shift, bitwise_xor, greater, less_equal, multiply, subtract)
|
|
25
|
+
from numpy.typing import NDArray
|
|
26
|
+
from typing import Any, TYPE_CHECKING, TypeAlias
|
|
27
|
+
from warnings import warn
|
|
28
|
+
import dataclasses
|
|
29
|
+
import numpy
|
|
30
|
+
import pandas
|
|
31
|
+
|
|
32
|
+
# TODO Don't require pandas to run NumPy version. The "challenge" is that `areIntegersWide` can take a DataFrame.
|
|
33
|
+
if TYPE_CHECKING:
|
|
34
|
+
from numpy.lib._arraysetops_impl import UniqueInverseResult
|
|
35
|
+
|
|
36
|
+
"""Goals:
|
|
37
|
+
- Extreme abstraction.
|
|
38
|
+
- Find operations with latent intermediate arrays and make the intermediate array explicit.
|
|
39
|
+
- Reduce or eliminate intermediate arrays and selector arrays.
|
|
40
|
+
- Write formulas in prefix notation.
|
|
41
|
+
- For each formula, find an equivalent prefix notation formula that never uses the same variable as input more than once: that
|
|
42
|
+
would allow the evaluation of the expression with only a single stack, which saves memory.
|
|
43
|
+
- Standardize code as much as possible to create duplicate code.
|
|
44
|
+
- Convert duplicate code to procedures.
|
|
45
|
+
"""
|
|
46
|
+
# TODO Ideally, all of the hardcoded `numpy.uint64` would be abstracted to match the `datatypeArcCode` and `datatypeCrossings`
|
|
47
|
+
# fields of `MatrixMeandersNumPyState`, which probably means defining those datatypes outside of `MatrixMeandersNumPyState`.
|
|
48
|
+
|
|
49
|
+
@dataclasses.dataclass(slots=True)
|
|
50
|
+
class MatrixMeandersNumPyState(MatrixMeandersState):
|
|
51
|
+
"""Hold the state of a meanders transfer matrix algorithm computation implemented in NumPy (*Num*erical *Py*thon) or pandas."""
|
|
52
|
+
|
|
53
|
+
arrayArcCodes: NDArray[numpy.uint64] = dataclasses.field(default_factory=lambda: numpy.empty((0,), dtype=numpy.uint64))
|
|
54
|
+
arrayCrossings: NDArray[numpy.uint64] = dataclasses.field(default_factory=lambda: numpy.empty((0,), dtype=numpy.uint64))
|
|
55
|
+
|
|
56
|
+
bitWidthLimitArcCode: int | None = None
|
|
57
|
+
bitWidthLimitCrossings: int | None = None
|
|
58
|
+
|
|
59
|
+
datatypeArcCode: TypeAlias = numpy.uint64 # noqa: UP040
|
|
60
|
+
"""The fixed-size integer type used to store `arcCode`."""
|
|
61
|
+
datatypeCrossings: TypeAlias = numpy.uint64 # noqa: UP040
|
|
62
|
+
"""The fixed-size integer type used to store `crossings`."""
|
|
63
|
+
# Hypothetically, the above datatypes could be different from each other, especially in pandas.
|
|
64
|
+
|
|
65
|
+
indexTarget: int = 0
|
|
66
|
+
"""What is being indexed depends on the algorithm flavor."""
|
|
67
|
+
|
|
68
|
+
def __post_init__(self) -> None:
|
|
69
|
+
"""Post init."""
|
|
70
|
+
if self.bitWidthLimitArcCode is None:
|
|
71
|
+
_bitWidthOfFixedSizeInteger: int = numpy.dtype(self.datatypeArcCode).itemsize * 8 # bits
|
|
72
|
+
|
|
73
|
+
_offsetNecessary: int = 3 # For example, `bitsZulu << 3`.
|
|
74
|
+
_offsetSafety: int = 1 # I don't have mathematical proof of how many extra bits I need.
|
|
75
|
+
_offset: int = _offsetNecessary + _offsetSafety
|
|
76
|
+
|
|
77
|
+
self.bitWidthLimitArcCode = _bitWidthOfFixedSizeInteger - _offset
|
|
78
|
+
|
|
79
|
+
del _bitWidthOfFixedSizeInteger, _offsetNecessary, _offsetSafety, _offset
|
|
80
|
+
|
|
81
|
+
if self.bitWidthLimitCrossings is None:
|
|
82
|
+
_bitWidthOfFixedSizeInteger: int = numpy.dtype(self.datatypeCrossings).itemsize * 8 # bits
|
|
83
|
+
|
|
84
|
+
_offsetNecessary: int = 0 # I don't know of any.
|
|
85
|
+
_offsetEstimation: int = 3 # See 'reference' directory.
|
|
86
|
+
_offsetSafety: int = 1
|
|
87
|
+
_offset: int = _offsetNecessary + _offsetEstimation + _offsetSafety
|
|
88
|
+
|
|
89
|
+
self.bitWidthLimitCrossings = _bitWidthOfFixedSizeInteger - _offset
|
|
90
|
+
|
|
91
|
+
del _bitWidthOfFixedSizeInteger, _offsetNecessary, _offsetEstimation, _offsetSafety, _offset
|
|
92
|
+
|
|
93
|
+
def makeDictionary(self) -> None:
|
|
94
|
+
"""Convert from NumPy `ndarray` (*Num*erical *Py*thon *n-d*imensional array) to Python `dict` (*dict*ionary)."""
|
|
95
|
+
self.dictionaryMeanders = {int(key): int(value) for key, value in zip(self.arrayArcCodes, self.arrayCrossings, strict=True)}
|
|
96
|
+
self.arrayArcCodes = numpy.empty((0,), dtype=self.datatypeArcCode)
|
|
97
|
+
self.arrayCrossings = numpy.empty((0,), dtype=self.datatypeCrossings)
|
|
98
|
+
|
|
99
|
+
def makeArray(self) -> None:
|
|
100
|
+
"""Convert from Python `dict` (*dict*ionary) to NumPy `ndarray` (*Num*erical *Py*thon *n-d*imensional array)."""
|
|
101
|
+
self.arrayArcCodes = numpy.array(list(self.dictionaryMeanders.keys()), dtype=self.datatypeArcCode)
|
|
102
|
+
self.arrayCrossings = numpy.array(list(self.dictionaryMeanders.values()), dtype=self.datatypeCrossings)
|
|
103
|
+
self.bitWidth = int(self.arrayArcCodes.max()).bit_length()
|
|
104
|
+
self.dictionaryMeanders = {}
|
|
105
|
+
|
|
106
|
+
def setBitWidthNumPy(self) -> None:
|
|
107
|
+
"""Set `bitWidth` from the current `arrayArcCodes`."""
|
|
108
|
+
self.bitWidth = int(self.arrayArcCodes.max()).bit_length()
|
|
109
|
+
|
|
110
|
+
def areIntegersWide(state: MatrixMeandersNumPyState, *, dataframe: pandas.DataFrame | None = None, fixedSizeMAXIMUMarcCode: bool = False) -> bool:
|
|
111
|
+
"""Check if the largest values are wider than the maximum limits.
|
|
112
|
+
|
|
113
|
+
Parameters
|
|
114
|
+
----------
|
|
115
|
+
state : MatrixMeandersState
|
|
116
|
+
The current state of the computation, including `dictionaryMeanders`.
|
|
117
|
+
dataframe : pandas.DataFrame | None = None
|
|
118
|
+
DataFrame containing 'analyzed' and 'crossings' columns. If provided, use this instead of `state.dictionaryMeanders`.
|
|
119
|
+
fixedSizeMAXIMUMarcCode : bool = False
|
|
120
|
+
Set this to `True` if you cast `state.MAXIMUMarcCode` to the same fixed size integer type as `state.datatypeArcCode`.
|
|
121
|
+
|
|
122
|
+
Returns
|
|
123
|
+
-------
|
|
124
|
+
wider : bool
|
|
125
|
+
True if at least one integer is wider than the fixed-size integers.
|
|
126
|
+
|
|
127
|
+
Notes
|
|
128
|
+
-----
|
|
129
|
+
Casting `state.MAXIMUMarcCode` to a fixed-size 64-bit unsigned integer might cause the flow to be a little more
|
|
130
|
+
complicated because `MAXIMUMarcCode` is usually 1-bit larger than the `max(arcCode)` value.
|
|
131
|
+
|
|
132
|
+
If you start the algorithm with very large `arcCode` in your `dictionaryMeanders` (*i.e.,* A000682), then the
|
|
133
|
+
flow will go to a function that does not use fixed size integers. When the integers are below the limits (*e.g.,*
|
|
134
|
+
`bitWidthArcCodeMaximum`), the flow will go to a function with fixed size integers. In that case, casting
|
|
135
|
+
`MAXIMUMarcCode` to a fixed size merely delays the transition from one function to the other by one iteration.
|
|
136
|
+
|
|
137
|
+
If you start with small values in `dictionaryMeanders`, however, then the flow goes to the function with fixed size
|
|
138
|
+
integers and usually stays there until `crossings` is huge, which is near the end of the computation. If you cast
|
|
139
|
+
`MAXIMUMarcCode` into a 64-bit unsigned integer, however, then around `state.boundary == 28`, the bit width of
|
|
140
|
+
`MAXIMUMarcCode` might exceed the limit. That will cause the flow to go to the function that does not have fixed size
|
|
141
|
+
integers for a few iterations before returning to the function with fixed size integers.
|
|
142
|
+
"""
|
|
143
|
+
if dataframe is not None:
|
|
144
|
+
arcCodeWidest = int(dataframe['analyzed'].max()).bit_length()
|
|
145
|
+
crossingsWidest = int(dataframe['crossings'].max()).bit_length()
|
|
146
|
+
elif not state.dictionaryMeanders:
|
|
147
|
+
arcCodeWidest = int(state.arrayArcCodes.max()).bit_length()
|
|
148
|
+
crossingsWidest = int(state.arrayCrossings.max()).bit_length()
|
|
149
|
+
else:
|
|
150
|
+
arcCodeWidest: int = max(state.dictionaryMeanders.keys()).bit_length()
|
|
151
|
+
crossingsWidest: int = max(state.dictionaryMeanders.values()).bit_length()
|
|
152
|
+
|
|
153
|
+
MAXIMUMarcCode: int = 0
|
|
154
|
+
if fixedSizeMAXIMUMarcCode:
|
|
155
|
+
MAXIMUMarcCode = state.MAXIMUMarcCode
|
|
156
|
+
|
|
157
|
+
return (arcCodeWidest > raiseIfNone(state.bitWidthLimitArcCode)
|
|
158
|
+
or crossingsWidest > raiseIfNone(state.bitWidthLimitCrossings)
|
|
159
|
+
or MAXIMUMarcCode > raiseIfNone(state.bitWidthLimitArcCode)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
@cache
|
|
163
|
+
def _flipTheExtra_0b1(intWithExtra_0b1: numpy.uint64) -> numpy.uint64:
|
|
164
|
+
return numpy.uint64(intWithExtra_0b1 ^ walkDyckPath(int(intWithExtra_0b1)))
|
|
165
|
+
|
|
166
|
+
flipTheExtra_0b1AsUfunc = numpy.frompyfunc(_flipTheExtra_0b1, 1, 1)
|
|
167
|
+
"""Flip a bit based on Dyck path: element-wise ufunc (*u*niversal *func*tion) for a NumPy `ndarray` (*Num*erical *Py*thon *n-d*imensional array).
|
|
168
|
+
|
|
169
|
+
Warning
|
|
170
|
+
-------
|
|
171
|
+
The function will loop infinitely if *any* element does not have a bit that needs flipping.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
arrayTarget : numpy.ndarray[tuple[int], numpy.dtype[numpy.unsignedinteger[Any]]]
|
|
176
|
+
An array with one axis of unsigned integers and unbalanced closures.
|
|
177
|
+
|
|
178
|
+
Returns
|
|
179
|
+
-------
|
|
180
|
+
arrayFlipped : numpy.ndarray[tuple[int], numpy.dtype[numpy.unsignedinteger[Any]]]
|
|
181
|
+
An array with the same shape as `arrayTarget` but with one bit flipped in each element.
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
def getBucketsTotal(state: MatrixMeandersNumPyState, safetyMultiplicand: float = 1.2) -> int: # noqa: ARG001
|
|
185
|
+
"""Under renovation: Estimate the total number of non-unique arcCode that will be computed from the existing arcCode.
|
|
186
|
+
|
|
187
|
+
Warning
|
|
188
|
+
-------
|
|
189
|
+
Because `countPandas` does not store anything in `state.arrayArcCodes`, if `countPandas` requests
|
|
190
|
+
bucketsTotal for a value not in the dictionary, the returned value will be 0. But `countPandas` should have a safety
|
|
191
|
+
check that will allocate more space.
|
|
192
|
+
|
|
193
|
+
Notes
|
|
194
|
+
-----
|
|
195
|
+
TODO remake this function from scratch.
|
|
196
|
+
|
|
197
|
+
Factors:
|
|
198
|
+
- The starting quantity of `arcCode`.
|
|
199
|
+
- The value(s) of the starting `arcCode`.
|
|
200
|
+
- n
|
|
201
|
+
- boundary
|
|
202
|
+
- Whether this bucketsTotal is increasing, as compared to all of the prior bucketsTotal.
|
|
203
|
+
- If increasing, is it exponential or logarithmic?
|
|
204
|
+
- The maximum value.
|
|
205
|
+
- If decreasing, I don't really know the factors.
|
|
206
|
+
- If I know the actual value or if I must estimate it.
|
|
207
|
+
|
|
208
|
+
Figure out an intelligent flow for so many factors.
|
|
209
|
+
"""
|
|
210
|
+
theDictionary: dict[str, dict[int, dict[int, int]]] = {'A005316': A005316_n_boundary_buckets, 'A000682': A000682_n_boundary_buckets}
|
|
211
|
+
bucketsTotal: int = theDictionary.get(state.oeisID, {}).get(state.n, {}).get(state.boundary, 0)
|
|
212
|
+
if bucketsTotal <= 0:
|
|
213
|
+
bucketsTotal = int(3.55 * len(state.arrayArcCodes))
|
|
214
|
+
|
|
215
|
+
return bucketsTotal
|
|
216
|
+
|
|
217
|
+
def countNumPy(state: MatrixMeandersNumPyState) -> MatrixMeandersNumPyState:
|
|
218
|
+
"""Count crossings with transfer matrix algorithm implemented in NumPy (*Num*erical *Py*thon).
|
|
219
|
+
|
|
220
|
+
Parameters
|
|
221
|
+
----------
|
|
222
|
+
state : MatrixMeandersState
|
|
223
|
+
The algorithm state.
|
|
224
|
+
|
|
225
|
+
Returns
|
|
226
|
+
-------
|
|
227
|
+
state : MatrixMeandersState
|
|
228
|
+
Updated state including `boundary` and `arrayMeanders`.
|
|
229
|
+
|
|
230
|
+
Notes
|
|
231
|
+
-----
|
|
232
|
+
This version is *relatively* slow for small values of `n` (*e.g.*, 3 seconds vs. 3 milliseconds) because of my aggressive use
|
|
233
|
+
of garbage collection because I don't really know how to manage memory. On the other hand, it uses less memory for extreme
|
|
234
|
+
values of `n`, which makes it faster due to less disk swapping--as compared to the pandas implementation and other NumPy
|
|
235
|
+
implementations I tried.
|
|
236
|
+
"""
|
|
237
|
+
indicesPrepArea: int = 1
|
|
238
|
+
indexAnalysis = 0
|
|
239
|
+
slicerAnalysis: ShapeSlicer = ShapeSlicer(length=..., indices=indexAnalysis)
|
|
240
|
+
|
|
241
|
+
indicesAnalyzed: int = 2
|
|
242
|
+
indexArcCode, indexCrossings = range(indicesAnalyzed)
|
|
243
|
+
slicerArcCode: ShapeSlicer = ShapeSlicer(length=..., indices=indexArcCode)
|
|
244
|
+
slicerCrossings: ShapeSlicer = ShapeSlicer(length=..., indices=indexCrossings)
|
|
245
|
+
|
|
246
|
+
while state.boundary > 0 and not areIntegersWide(state):
|
|
247
|
+
def aggregateAnalyzed(arrayAnalyzed: NDArray[numpy.uint64], state: MatrixMeandersNumPyState) -> MatrixMeandersNumPyState:
|
|
248
|
+
"""Create new `arrayMeanders` by deduplicating `arcCode` and summing `crossings`."""
|
|
249
|
+
unique: UniqueInverseResult[numpy.uint64] = numpy.unique_inverse(arrayAnalyzed[slicerArcCode])
|
|
250
|
+
|
|
251
|
+
state.arrayArcCodes = unique.values # noqa: PD011
|
|
252
|
+
state.arrayCrossings = numpy.zeros_like(state.arrayArcCodes, dtype=state.datatypeCrossings)
|
|
253
|
+
numpy.add.at(state.arrayCrossings, unique.inverse_indices, arrayAnalyzed[slicerCrossings])
|
|
254
|
+
del unique
|
|
255
|
+
|
|
256
|
+
return state
|
|
257
|
+
|
|
258
|
+
def makeStorage[个: numpy.integer[Any]](dataTarget: NDArray[个], state: MatrixMeandersNumPyState, storageTarget: NDArray[numpy.uint64], indexAssignment: int = indexArcCode) -> NDArray[个]:
|
|
259
|
+
"""Store `dataTarget` in `storageTarget` on `indexAssignment` if there is enough space, otherwise allocate a new array."""
|
|
260
|
+
lengthStorageTarget: int = len(storageTarget)
|
|
261
|
+
storageAvailable: int = lengthStorageTarget - state.indexTarget
|
|
262
|
+
lengthDataTarget: int = len(dataTarget)
|
|
263
|
+
|
|
264
|
+
if storageAvailable >= lengthDataTarget:
|
|
265
|
+
indexStart: int = lengthStorageTarget - lengthDataTarget
|
|
266
|
+
sliceStorage: slice = slice(indexStart, lengthStorageTarget)
|
|
267
|
+
del indexStart
|
|
268
|
+
slicerStorageAtIndex: ShapeSlicer = ShapeSlicer(length=sliceStorage, indices=indexAssignment)
|
|
269
|
+
del sliceStorage
|
|
270
|
+
storageTarget[slicerStorageAtIndex] = dataTarget.copy()
|
|
271
|
+
arrayStorage = storageTarget[slicerStorageAtIndex].view() # pyright: ignore[reportAssignmentType]
|
|
272
|
+
del slicerStorageAtIndex
|
|
273
|
+
else:
|
|
274
|
+
arrayStorage: NDArray[个] = dataTarget.copy()
|
|
275
|
+
|
|
276
|
+
del storageAvailable, lengthDataTarget, lengthStorageTarget
|
|
277
|
+
|
|
278
|
+
return arrayStorage
|
|
279
|
+
|
|
280
|
+
def recordAnalysis(arrayAnalyzed: NDArray[numpy.uint64], state: MatrixMeandersNumPyState, arcCode: NDArray[numpy.uint64]) -> MatrixMeandersNumPyState:
|
|
281
|
+
"""Record valid `arcCode` and corresponding `crossings` in `arrayAnalyzed`.
|
|
282
|
+
|
|
283
|
+
This abstraction makes it easier to implement `numpy.memmap` or other options.
|
|
284
|
+
"""
|
|
285
|
+
selectorOverLimit = arcCode > state.MAXIMUMarcCode
|
|
286
|
+
arcCode[selectorOverLimit] = 0
|
|
287
|
+
del selectorOverLimit
|
|
288
|
+
|
|
289
|
+
selectorAnalysis: NDArray[numpy.intp] = numpy.flatnonzero(arcCode)
|
|
290
|
+
|
|
291
|
+
indexStop: int = state.indexTarget + len(selectorAnalysis)
|
|
292
|
+
sliceAnalysis: slice = slice(state.indexTarget, indexStop)
|
|
293
|
+
state.indexTarget = indexStop
|
|
294
|
+
del indexStop
|
|
295
|
+
|
|
296
|
+
slicerArcCodeAnalysis = ShapeSlicer(length=sliceAnalysis, indices=indexArcCode)
|
|
297
|
+
slicerCrossingsAnalysis = ShapeSlicer(length=sliceAnalysis, indices=indexCrossings)
|
|
298
|
+
del sliceAnalysis
|
|
299
|
+
|
|
300
|
+
arrayAnalyzed[slicerArcCodeAnalysis] = arcCode[selectorAnalysis]
|
|
301
|
+
del slicerArcCodeAnalysis
|
|
302
|
+
|
|
303
|
+
arrayAnalyzed[slicerCrossingsAnalysis] = state.arrayCrossings[selectorAnalysis]
|
|
304
|
+
del slicerCrossingsAnalysis, selectorAnalysis
|
|
305
|
+
goByeBye()
|
|
306
|
+
return state
|
|
307
|
+
|
|
308
|
+
state.setBitWidthNumPy()
|
|
309
|
+
state.setBitsLocator()
|
|
310
|
+
|
|
311
|
+
lengthArrayAnalyzed: int = getBucketsTotal(state, 1.2)
|
|
312
|
+
shape = ShapeArray(length=lengthArrayAnalyzed, indices=indicesAnalyzed)
|
|
313
|
+
del lengthArrayAnalyzed
|
|
314
|
+
goByeBye()
|
|
315
|
+
|
|
316
|
+
arrayAnalyzed: NDArray[numpy.uint64] = numpy.zeros(shape, dtype=state.datatypeArcCode)
|
|
317
|
+
del shape
|
|
318
|
+
|
|
319
|
+
shape = ShapeArray(length=len(state.arrayArcCodes), indices=indicesPrepArea)
|
|
320
|
+
arrayPrepArea: NDArray[numpy.uint64] = numpy.zeros(shape, dtype=state.datatypeArcCode)
|
|
321
|
+
del shape
|
|
322
|
+
|
|
323
|
+
prepArea: NDArray[numpy.uint64] = arrayPrepArea[slicerAnalysis].view()
|
|
324
|
+
|
|
325
|
+
state.indexTarget = 0
|
|
326
|
+
|
|
327
|
+
state.boundary -= 1
|
|
328
|
+
state.setMAXIMUMarcCode()
|
|
329
|
+
|
|
330
|
+
# =============== analyze aligned ===== if bitsAlpha > 1 and bitsZulu > 1 =============================================
|
|
331
|
+
# NOTE In other versions, this analysis step is last because I modify the data. In this version, I don't modify the data.
|
|
332
|
+
arrayBitsAlpha: NDArray[numpy.uint64] = bitwise_and(state.arrayArcCodes, state.bitsLocator) # NOTE extra array
|
|
333
|
+
# ======= > * > bitsAlpha 1 bitsZulu 1 ====================
|
|
334
|
+
greater(arrayBitsAlpha, 1, out=prepArea)
|
|
335
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
336
|
+
bitwise_right_shift(bitsZuluStack, 1, out=bitsZuluStack) # O indexArcCode X indexCrossings
|
|
337
|
+
bitwise_and(bitsZuluStack, state.bitsLocator, out=bitsZuluStack)
|
|
338
|
+
multiply(bitsZuluStack, prepArea, out=prepArea)
|
|
339
|
+
greater(prepArea, 1, out=prepArea)
|
|
340
|
+
selectorGreaterThan1: NDArray[numpy.uint64] = makeStorage(prepArea, state, arrayAnalyzed, indexArcCode)
|
|
341
|
+
# X indexArcCode X indexCrossings
|
|
342
|
+
# ======= if bitsAlphaAtEven and not bitsZuluAtEven ======= # ======= ^ & | ^ & bitsZulu 1 1 bitsAlpha 1 1 ============
|
|
343
|
+
bitwise_and(bitsZuluStack, 1, out=prepArea)
|
|
344
|
+
del bitsZuluStack # X indexArcCode O indexCrossings
|
|
345
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
346
|
+
bitwise_or(arrayBitsAlpha, prepArea, out=prepArea)
|
|
347
|
+
bitwise_and(prepArea, 1, out=prepArea)
|
|
348
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
349
|
+
|
|
350
|
+
bitwise_and(selectorGreaterThan1, prepArea, out=prepArea)
|
|
351
|
+
selectorAlignAlpha: NDArray[numpy.intp] = makeStorage(numpy.flatnonzero(prepArea), state, arrayAnalyzed, indexCrossings)
|
|
352
|
+
# X indexArcCode X indexCrossings
|
|
353
|
+
arrayBitsAlpha[selectorAlignAlpha] = flipTheExtra_0b1AsUfunc(arrayBitsAlpha[selectorAlignAlpha])
|
|
354
|
+
del selectorAlignAlpha # X indexArcCode O indexCrossings
|
|
355
|
+
|
|
356
|
+
# ======= if bitsZuluAtEven and not bitsAlphaAtEven ======= # ======= ^ & | ^ & bitsAlpha 1 1 bitsZulu 1 1 ============
|
|
357
|
+
bitsAlphaStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
358
|
+
bitwise_and(bitsAlphaStack, state.bitsLocator, out=bitsAlphaStack)
|
|
359
|
+
bitwise_and(bitsAlphaStack, 1, out=prepArea)
|
|
360
|
+
del bitsAlphaStack # X indexArcCode O indexCrossings
|
|
361
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
362
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
363
|
+
bitwise_right_shift(bitsZuluStack, 1, out=bitsZuluStack)
|
|
364
|
+
bitwise_and(bitsZuluStack, state.bitsLocator, out=bitsZuluStack)
|
|
365
|
+
bitwise_or(bitsZuluStack, prepArea, out=prepArea)
|
|
366
|
+
del bitsZuluStack # X indexArcCode O indexCrossings
|
|
367
|
+
bitwise_and(prepArea, 1, out=prepArea)
|
|
368
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
369
|
+
|
|
370
|
+
bitwise_and(selectorGreaterThan1, prepArea, out=prepArea)
|
|
371
|
+
selectorAlignZulu: NDArray[numpy.intp] = makeStorage(numpy.flatnonzero(prepArea), state, arrayAnalyzed, indexCrossings)
|
|
372
|
+
# X indexArcCode X indexCrossings
|
|
373
|
+
# ======= bitsAlphaAtEven or bitsZuluAtEven =============== # ======= ^ & & bitsAlpha 1 bitsZulu 1 ====================
|
|
374
|
+
bitwise_and(state.arrayArcCodes, state.bitsLocator, out=prepArea)
|
|
375
|
+
bitwise_and(prepArea, 1, out=prepArea)
|
|
376
|
+
sherpaBitsZulu: NDArray[numpy.uint64] = bitwise_right_shift(state.arrayArcCodes, 1) # NOTE 2° extra array
|
|
377
|
+
bitwise_and(sherpaBitsZulu, state.bitsLocator, out=sherpaBitsZulu)
|
|
378
|
+
bitwise_and(sherpaBitsZulu, prepArea, out=prepArea)
|
|
379
|
+
del sherpaBitsZulu # NOTE del 2° extra array
|
|
380
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
381
|
+
|
|
382
|
+
bitwise_and(selectorGreaterThan1, prepArea, out=prepArea) # `selectorBitsAtEven`
|
|
383
|
+
del selectorGreaterThan1 # O indexArcCode X indexCrossings
|
|
384
|
+
bitwise_xor(prepArea, 1, out=prepArea)
|
|
385
|
+
selectorDisqualified: NDArray[numpy.intp] = makeStorage(numpy.flatnonzero(prepArea), state, arrayAnalyzed, indexArcCode)
|
|
386
|
+
# X indexArcCode X indexCrossings
|
|
387
|
+
bitwise_right_shift(state.arrayArcCodes, 1, out=prepArea)
|
|
388
|
+
bitwise_and(prepArea, state.bitsLocator, out=prepArea)
|
|
389
|
+
|
|
390
|
+
prepArea[selectorAlignZulu] = flipTheExtra_0b1AsUfunc(prepArea[selectorAlignZulu])
|
|
391
|
+
del selectorAlignZulu # X indexArcCode O indexCrossings
|
|
392
|
+
|
|
393
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(prepArea, state, arrayAnalyzed, indexCrossings)
|
|
394
|
+
|
|
395
|
+
# ======= (bitsZulu >> 2 << 3 | bitsAlpha) >> 2 =========== # ======= >> | << >> bitsZulu 2 3 bitsAlpha 2 =============
|
|
396
|
+
bitwise_right_shift(bitsZuluStack, 2, out=prepArea)
|
|
397
|
+
del bitsZuluStack # X indexArcCode O indexCrossings
|
|
398
|
+
bitwise_left_shift(prepArea, 3, out=prepArea)
|
|
399
|
+
bitwise_or(arrayBitsAlpha, prepArea, out=prepArea)
|
|
400
|
+
del arrayBitsAlpha # NOTE del extra array
|
|
401
|
+
bitwise_right_shift(prepArea, 2, out=prepArea)
|
|
402
|
+
|
|
403
|
+
prepArea[selectorDisqualified] = 0
|
|
404
|
+
del selectorDisqualified # O indexArcCode O indexCrossings
|
|
405
|
+
|
|
406
|
+
state = recordAnalysis(arrayAnalyzed, state, prepArea)
|
|
407
|
+
|
|
408
|
+
# ----------------- analyze bitsAlpha ------- (1 - (bitsAlpha & 1)) << 1 | bitsAlpha >> 2 | bitsZulu << 3 ---------
|
|
409
|
+
bitsAlphaStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexArcCode)
|
|
410
|
+
bitwise_and(bitsAlphaStack, state.bitsLocator, out=bitsAlphaStack) # X indexArcCode O indexCrossings
|
|
411
|
+
# ------- >> | << | (<< - 1 & bitsAlpha 1 1) << bitsZulu 3 2 bitsAlpha 2 ----------
|
|
412
|
+
bitwise_and(bitsAlphaStack, 1, out=bitsAlphaStack)
|
|
413
|
+
subtract(1, bitsAlphaStack, out=bitsAlphaStack)
|
|
414
|
+
bitwise_left_shift(bitsAlphaStack, 1, out=bitsAlphaStack)
|
|
415
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
416
|
+
bitwise_right_shift(bitsZuluStack, 1, out=bitsZuluStack)
|
|
417
|
+
bitwise_and(bitsZuluStack, state.bitsLocator, out=bitsZuluStack)
|
|
418
|
+
bitwise_left_shift(bitsZuluStack, 3, out=prepArea)
|
|
419
|
+
del bitsZuluStack # X indexArcCode O indexCrossings
|
|
420
|
+
bitwise_or(bitsAlphaStack, prepArea, out=prepArea)
|
|
421
|
+
del bitsAlphaStack # O indexArcCode O indexCrossings
|
|
422
|
+
bitwise_left_shift(prepArea, 2, out=prepArea)
|
|
423
|
+
bitsAlphaStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
424
|
+
bitwise_and(bitsAlphaStack, state.bitsLocator, out=bitsAlphaStack) # O indexArcCode X indexCrossings
|
|
425
|
+
bitwise_or(bitsAlphaStack, prepArea, out=prepArea)
|
|
426
|
+
bitwise_right_shift(prepArea, 2, out=prepArea)
|
|
427
|
+
|
|
428
|
+
# ------- if bitsAlpha > 1 ------------ > bitsAlpha 1 -----
|
|
429
|
+
less_equal(bitsAlphaStack, 1, out=bitsAlphaStack)
|
|
430
|
+
selectorUnderLimit: NDArray[numpy.intp] = makeStorage(numpy.flatnonzero(bitsAlphaStack), state, arrayAnalyzed, indexArcCode)
|
|
431
|
+
del bitsAlphaStack # X indexArcCode O indexCrossings
|
|
432
|
+
prepArea[selectorUnderLimit] = 0
|
|
433
|
+
del selectorUnderLimit # O indexArcCode O indexCrossings
|
|
434
|
+
|
|
435
|
+
state = recordAnalysis(arrayAnalyzed, state, prepArea)
|
|
436
|
+
|
|
437
|
+
# ----------------- analyze bitsZulu ---------- (1 - (bitsZulu & 1)) | bitsAlpha << 2 | bitsZulu >> 1 -------------
|
|
438
|
+
arrayBitsZulu: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
439
|
+
arrayBitsZulu = bitwise_right_shift(arrayBitsZulu, 1) # O indexArcCode X indexCrossings
|
|
440
|
+
arrayBitsZulu = bitwise_and(arrayBitsZulu, state.bitsLocator)
|
|
441
|
+
# ------- >> | << | (- 1 & bitsZulu 1) << bitsAlpha 2 1 bitsZulu 1 ----------
|
|
442
|
+
bitwise_and(arrayBitsZulu, 1, out=arrayBitsZulu)
|
|
443
|
+
subtract(1, arrayBitsZulu, out=arrayBitsZulu)
|
|
444
|
+
bitsAlphaStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexArcCode)
|
|
445
|
+
bitwise_and(bitsAlphaStack, state.bitsLocator, out=bitsAlphaStack) # X indexArcCode X indexCrossings
|
|
446
|
+
bitwise_left_shift(bitsAlphaStack, 2, out=prepArea)
|
|
447
|
+
del bitsAlphaStack # O indexArcCode X indexCrossings
|
|
448
|
+
bitwise_or(arrayBitsZulu, prepArea, out=prepArea)
|
|
449
|
+
del arrayBitsZulu # O indexArcCode O indexCrossings
|
|
450
|
+
bitwise_left_shift(prepArea, 1, out=prepArea)
|
|
451
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
452
|
+
bitwise_right_shift(bitsZuluStack, 1, out=bitsZuluStack) # O indexArcCode X indexCrossings
|
|
453
|
+
bitwise_and(bitsZuluStack, state.bitsLocator, out=bitsZuluStack)
|
|
454
|
+
bitwise_or(bitsZuluStack, prepArea, out=prepArea)
|
|
455
|
+
bitwise_right_shift(prepArea, 1, out=prepArea)
|
|
456
|
+
|
|
457
|
+
# ------- if bitsZulu > 1 ------------- > bitsZulu 1 ------
|
|
458
|
+
less_equal(bitsZuluStack, 1, out=bitsZuluStack)
|
|
459
|
+
selectorUnderLimit = makeStorage(numpy.flatnonzero(bitsZuluStack), state, arrayAnalyzed, indexArcCode)
|
|
460
|
+
del bitsZuluStack # X indexArcCode O indexCrossings
|
|
461
|
+
prepArea[selectorUnderLimit] = 0
|
|
462
|
+
del selectorUnderLimit # O indexArcCode O indexCrossings
|
|
463
|
+
|
|
464
|
+
state = recordAnalysis(arrayAnalyzed, state, prepArea)
|
|
465
|
+
|
|
466
|
+
# ----------------- analyze simple ------------------------ (bitsZulu << 1 | bitsAlpha) << 2 | 3 ------------------
|
|
467
|
+
bitsZuluStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexCrossings)
|
|
468
|
+
bitwise_right_shift(bitsZuluStack, 1, out=bitsZuluStack) # O indexArcCode X indexCrossings
|
|
469
|
+
bitwise_and(bitsZuluStack, state.bitsLocator, out=bitsZuluStack)
|
|
470
|
+
# ------- | << | bitsAlpha << bitsZulu 1 2 3 --------------
|
|
471
|
+
bitwise_left_shift(bitsZuluStack, 1, out=prepArea)
|
|
472
|
+
del bitsZuluStack # O indexArcCode O indexCrossings
|
|
473
|
+
bitsAlphaStack: NDArray[numpy.uint64] = makeStorage(state.arrayArcCodes, state, arrayAnalyzed, indexArcCode)
|
|
474
|
+
bitwise_and(bitsAlphaStack, state.bitsLocator, out=bitsAlphaStack) # X indexArcCode O indexCrossings
|
|
475
|
+
bitwise_or(bitsAlphaStack, prepArea, out=prepArea)
|
|
476
|
+
del bitsAlphaStack # O indexArcCode O indexCrossings
|
|
477
|
+
bitwise_left_shift(prepArea, 2, out=prepArea)
|
|
478
|
+
bitwise_or(prepArea, 3, out=prepArea)
|
|
479
|
+
|
|
480
|
+
state = recordAnalysis(arrayAnalyzed, state, prepArea)
|
|
481
|
+
|
|
482
|
+
del prepArea, arrayPrepArea
|
|
483
|
+
# ----------------------------------------------- aggregation ---------------------------------------------------------
|
|
484
|
+
state.arrayArcCodes = numpy.zeros((0,), dtype=state.datatypeArcCode)
|
|
485
|
+
arrayAnalyzed.resize((state.indexTarget, indicesAnalyzed))
|
|
486
|
+
|
|
487
|
+
goByeBye()
|
|
488
|
+
state = aggregateAnalyzed(arrayAnalyzed, state)
|
|
489
|
+
|
|
490
|
+
del arrayAnalyzed
|
|
491
|
+
|
|
492
|
+
if state.n >= 45: # Data collection for 'reference' directory.
|
|
493
|
+
# oeisID,n,boundary,buckets,arcCodes,arcCodeBitWidth,crossingsBitWidth
|
|
494
|
+
print(state.oeisID, state.n, state.boundary+1, state.indexTarget, len(state.arrayArcCodes), int(state.arrayArcCodes.max()).bit_length(), int(state.arrayCrossings.max()).bit_length(), sep=',') # noqa: T201
|
|
495
|
+
return state
|
|
496
|
+
|
|
497
|
+
def countPandas(state: MatrixMeandersNumPyState) -> MatrixMeandersNumPyState:
|
|
498
|
+
"""Count meanders with matrix transfer algorithm using pandas DataFrame.
|
|
499
|
+
|
|
500
|
+
Parameters
|
|
501
|
+
----------
|
|
502
|
+
state : MatrixMeandersState
|
|
503
|
+
The algorithm state containing current `boundary`, `dictionaryMeanders`, and thresholds.
|
|
504
|
+
|
|
505
|
+
Returns
|
|
506
|
+
-------
|
|
507
|
+
state : MatrixMeandersState
|
|
508
|
+
Updated state with new `boundary` and `dictionaryMeanders`.
|
|
509
|
+
"""
|
|
510
|
+
dataframeAnalyzed = pandas.DataFrame({
|
|
511
|
+
'analyzed': pandas.Series(name='analyzed', data=state.dictionaryMeanders.keys(), copy=False, dtype=state.datatypeArcCode)
|
|
512
|
+
, 'crossings': pandas.Series(name='crossings', data=state.dictionaryMeanders.values(), copy=False, dtype=state.datatypeCrossings)
|
|
513
|
+
}
|
|
514
|
+
)
|
|
515
|
+
state.dictionaryMeanders.clear()
|
|
516
|
+
|
|
517
|
+
while (state.boundary > 0 and not areIntegersWide(state, dataframe=dataframeAnalyzed)):
|
|
518
|
+
|
|
519
|
+
def aggregateArcCodes() -> None:
|
|
520
|
+
nonlocal dataframeAnalyzed
|
|
521
|
+
dataframeAnalyzed = dataframeAnalyzed.iloc[0:state.indexTarget].groupby('analyzed', sort=False)['crossings'].aggregate('sum').reset_index()
|
|
522
|
+
|
|
523
|
+
def analyzeArcCodesAligned(dataframeMeanders: pandas.DataFrame) -> pandas.DataFrame:
|
|
524
|
+
"""Compute `arcCode` from `bitsAlpha` and `bitsZulu` if at least one is an even number.
|
|
525
|
+
|
|
526
|
+
Before computing `arcCode`, some values of `bitsAlpha` and `bitsZulu` are modified.
|
|
527
|
+
|
|
528
|
+
Warning
|
|
529
|
+
-------
|
|
530
|
+
This function deletes rows from `dataframeMeanders`. Always run this analysis last.
|
|
531
|
+
|
|
532
|
+
Formula
|
|
533
|
+
-------
|
|
534
|
+
```python
|
|
535
|
+
if bitsAlpha > 1 and bitsZulu > 1 and (bitsAlphaIsEven or bitsZuluIsEven):
|
|
536
|
+
arcCode = (bitsAlpha >> 2) | ((bitsZulu >> 2) << 1)
|
|
537
|
+
```
|
|
538
|
+
"""
|
|
539
|
+
# -------- Step 1 drop unqualified rows ---------------------------
|
|
540
|
+
dataframeMeanders['analyzed'] = dataframeMeanders['arcCode'].copy()
|
|
541
|
+
dataframeMeanders['analyzed'] &= state.bitsLocator # `bitsAlpha`
|
|
542
|
+
|
|
543
|
+
dataframeMeanders['analyzed'] = dataframeMeanders['analyzed'].gt(1) # `if bitsAlphaHasArcs`
|
|
544
|
+
|
|
545
|
+
bitsTarget: pandas.Series = dataframeMeanders['arcCode'].copy()
|
|
546
|
+
bitsTarget //= 2**1
|
|
547
|
+
bitsTarget &= state.bitsLocator # `bitsZulu`
|
|
548
|
+
|
|
549
|
+
dataframeMeanders['analyzed'] *= bitsTarget
|
|
550
|
+
del bitsTarget
|
|
551
|
+
dataframeMeanders = dataframeMeanders.loc[(dataframeMeanders['analyzed'] > 1)] # `if (bitsAlphaHasArcs and bitsZuluHasArcs)`
|
|
552
|
+
|
|
553
|
+
dataframeMeanders.loc[:, 'analyzed'] = dataframeMeanders['arcCode'].copy()
|
|
554
|
+
dataframeMeanders.loc[:, 'analyzed'] &= state.bitsLocator # `bitsAlpha`
|
|
555
|
+
|
|
556
|
+
dataframeMeanders.loc[:, 'analyzed'] &= 1 # One step of `bitsAlphaAtEven`.
|
|
557
|
+
|
|
558
|
+
bitsTarget: pandas.Series = dataframeMeanders['arcCode'].copy()
|
|
559
|
+
bitsTarget //= 2**1
|
|
560
|
+
bitsTarget &= state.bitsLocator # `bitsZulu`
|
|
561
|
+
|
|
562
|
+
dataframeMeanders.loc[:, 'analyzed'] &= bitsTarget # One step of `bitsZuluAtEven`.
|
|
563
|
+
del bitsTarget
|
|
564
|
+
dataframeMeanders.loc[:, 'analyzed'] ^= 1 # Combined second step for `bitsAlphaAtEven` and `bitsZuluAtEven`.
|
|
565
|
+
|
|
566
|
+
dataframeMeanders = dataframeMeanders.loc[(dataframeMeanders['analyzed'] > 0)] # `if (bitsAlphaIsEven or bitsZuluIsEven)`
|
|
567
|
+
|
|
568
|
+
# ------- Step 2 modify rows --------------------------------------
|
|
569
|
+
# Make a selector for bitsZuluAtOdd, so you can modify bitsAlpha
|
|
570
|
+
dataframeMeanders.loc[:, 'analyzed'] = dataframeMeanders['arcCode'].copy()
|
|
571
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**1 # Truncated conversion to `bitsZulu`
|
|
572
|
+
dataframeMeanders.loc[:, 'analyzed'] &= 1 # `selectorBitsZuluAtOdd`
|
|
573
|
+
|
|
574
|
+
bitsTarget = dataframeMeanders['arcCode'].copy()
|
|
575
|
+
bitsTarget &= state.bitsLocator # `bitsAlpha`
|
|
576
|
+
|
|
577
|
+
# `if bitsAlphaAtEven and not bitsZuluAtEven`, modify `bitsAlphaPairedToOdd`
|
|
578
|
+
bitsTarget.loc[(dataframeMeanders['analyzed'] > 0)] = state.datatypeArcCode(
|
|
579
|
+
flipTheExtra_0b1AsUfunc(bitsTarget.loc[(dataframeMeanders['analyzed'] > 0)]))
|
|
580
|
+
|
|
581
|
+
dataframeMeanders.loc[:, 'analyzed'] = dataframeMeanders['arcCode'].copy()
|
|
582
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**1
|
|
583
|
+
dataframeMeanders.loc[:, 'analyzed'] &= state.bitsLocator # `bitsZulu`
|
|
584
|
+
|
|
585
|
+
# `if bitsZuluAtEven and not bitsAlphaAtEven`, modify `bitsZuluPairedToOdd`
|
|
586
|
+
dataframeMeanders.loc[((dataframeMeanders.loc[:, 'arcCode'] & 1) > 0), 'analyzed'] = state.datatypeArcCode(
|
|
587
|
+
flipTheExtra_0b1AsUfunc(dataframeMeanders.loc[((dataframeMeanders.loc[:, 'arcCode'] & 1) > 0), 'analyzed']))
|
|
588
|
+
|
|
589
|
+
# -------- Step 3 compute `arcCode` -------------------------------
|
|
590
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**2 # (bitsZulu >> 2)
|
|
591
|
+
dataframeMeanders.loc[:, 'analyzed'] *= 2**3 # (... << 3)
|
|
592
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsTarget # (... | bitsAlpha)
|
|
593
|
+
del bitsTarget
|
|
594
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**2 # ... >> 2
|
|
595
|
+
|
|
596
|
+
dataframeMeanders.loc[dataframeMeanders['analyzed'] >= state.MAXIMUMarcCode, 'analyzed'] = 0
|
|
597
|
+
|
|
598
|
+
return dataframeMeanders
|
|
599
|
+
|
|
600
|
+
def analyzeArcCodesSimple(dataframeMeanders: pandas.DataFrame) -> pandas.DataFrame:
|
|
601
|
+
"""Compute arcCode with the 'simple' formula.
|
|
602
|
+
|
|
603
|
+
Formula
|
|
604
|
+
-------
|
|
605
|
+
```python
|
|
606
|
+
arcCode = ((bitsAlpha | (bitsZulu << 1)) << 2) | 3
|
|
607
|
+
```
|
|
608
|
+
|
|
609
|
+
Notes
|
|
610
|
+
-----
|
|
611
|
+
Using `+= 3` instead of `|= 3` is valid in this specific case. Left shift by two means the last bits are '0b00'. '0 + 3'
|
|
612
|
+
is '0b11', and '0b00 | 0b11' is also '0b11'.
|
|
613
|
+
|
|
614
|
+
"""
|
|
615
|
+
dataframeMeanders['analyzed'] = dataframeMeanders['arcCode']
|
|
616
|
+
dataframeMeanders.loc[:, 'analyzed'] &= state.bitsLocator
|
|
617
|
+
|
|
618
|
+
bitsZulu: pandas.Series = dataframeMeanders['arcCode'].copy()
|
|
619
|
+
bitsZulu //= 2**1
|
|
620
|
+
bitsZulu &= state.bitsLocator # `bitsZulu`
|
|
621
|
+
|
|
622
|
+
bitsZulu *= 2**1 # (bitsZulu << 1)
|
|
623
|
+
|
|
624
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsZulu # ((bitsAlpha | (bitsZulu ...))
|
|
625
|
+
|
|
626
|
+
del bitsZulu
|
|
627
|
+
|
|
628
|
+
dataframeMeanders.loc[:, 'analyzed'] *= 2**2 # (... << 2)
|
|
629
|
+
dataframeMeanders.loc[:, 'analyzed'] += 3 # (...) | 3
|
|
630
|
+
dataframeMeanders.loc[dataframeMeanders['analyzed'] >= state.MAXIMUMarcCode, 'analyzed'] = 0
|
|
631
|
+
|
|
632
|
+
return dataframeMeanders
|
|
633
|
+
|
|
634
|
+
def analyzeBitsAlpha(dataframeMeanders: pandas.DataFrame) -> pandas.DataFrame:
|
|
635
|
+
"""Compute `arcCode` from `bitsAlpha`.
|
|
636
|
+
|
|
637
|
+
Formula
|
|
638
|
+
-------
|
|
639
|
+
```python
|
|
640
|
+
if bitsAlpha > 1:
|
|
641
|
+
arcCode = ((1 - (bitsAlpha & 1)) << 1) | (bitsZulu << 3) | (bitsAlpha >> 2)
|
|
642
|
+
# `(1 - (bitsAlpha & 1)` is an evenness test.
|
|
643
|
+
```
|
|
644
|
+
"""
|
|
645
|
+
dataframeMeanders['analyzed'] = dataframeMeanders['arcCode'] # Truncated creation of `bitsAlpha`
|
|
646
|
+
dataframeMeanders.loc[:, 'analyzed'] &= 1 # (bitsAlpha & 1)
|
|
647
|
+
dataframeMeanders.loc[:, 'analyzed'] = 1 - dataframeMeanders.loc[:, 'analyzed'] # (1 - (bitsAlpha ...))
|
|
648
|
+
|
|
649
|
+
dataframeMeanders.loc[:, 'analyzed'] *= 2**1 # ((bitsAlpha ...) << 1)
|
|
650
|
+
|
|
651
|
+
bitsTarget: pandas.Series = dataframeMeanders['arcCode'].copy()
|
|
652
|
+
bitsTarget //= 2**1
|
|
653
|
+
bitsTarget &= state.bitsLocator # `bitsZulu`
|
|
654
|
+
|
|
655
|
+
bitsTarget *= 2**3 # (bitsZulu << 3)
|
|
656
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsTarget # ... | (bitsZulu ...)
|
|
657
|
+
|
|
658
|
+
del bitsTarget
|
|
659
|
+
# TODO clarify the note.
|
|
660
|
+
"""NOTE In this code block, I rearranged the "formula" to use `bitsTarget` for two goals.
|
|
661
|
+
1. `(bitsAlpha >> 2)`.
|
|
662
|
+
2. `if bitsAlpha > 1`. The trick is in the equivalence of v1 and v2.
|
|
663
|
+
|
|
664
|
+
v1: BITScow | (BITSwalk >> 2)
|
|
665
|
+
v2: ((BITScow << 2) | BITSwalk) >> 2
|
|
666
|
+
|
|
667
|
+
The "formula" calls for v1, but by using v2, `bitsTarget` is not changed. Therefore, because `bitsTarget` is
|
|
668
|
+
`bitsAlpha`, I can use `bitsTarget` for goal 2, `if bitsAlpha > 1`.
|
|
669
|
+
"""
|
|
670
|
+
dataframeMeanders.loc[:, 'analyzed'] *= 2**2 # ... | (bitsAlpha >> 2)
|
|
671
|
+
|
|
672
|
+
bitsTarget = dataframeMeanders['arcCode'].copy()
|
|
673
|
+
bitsTarget &= state.bitsLocator # `bitsAlpha`
|
|
674
|
+
|
|
675
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsTarget # ... | (bitsAlpha)
|
|
676
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**2 # (... >> 2)
|
|
677
|
+
|
|
678
|
+
dataframeMeanders.loc[(bitsTarget <= 1), 'analyzed'] = 0 # if bitsAlpha > 1
|
|
679
|
+
|
|
680
|
+
del bitsTarget
|
|
681
|
+
|
|
682
|
+
dataframeMeanders.loc[dataframeMeanders['analyzed'] >= state.MAXIMUMarcCode, 'analyzed'] = 0
|
|
683
|
+
|
|
684
|
+
return dataframeMeanders
|
|
685
|
+
|
|
686
|
+
def analyzeBitsZulu(dataframeMeanders: pandas.DataFrame) -> pandas.DataFrame:
|
|
687
|
+
"""Compute `arcCode` from `bitsZulu`.
|
|
688
|
+
|
|
689
|
+
Formula
|
|
690
|
+
-------
|
|
691
|
+
```python
|
|
692
|
+
if bitsZulu > 1:
|
|
693
|
+
arcCode = (1 - (bitsZulu & 1)) | (bitsAlpha << 2) | (bitsZulu >> 1)
|
|
694
|
+
```
|
|
695
|
+
"""
|
|
696
|
+
# NOTE `(1 - (bitsZulu & 1))` is an evenness test: we want a single bit as the answer.
|
|
697
|
+
dataframeMeanders.loc[:, 'analyzed'] = dataframeMeanders['arcCode']
|
|
698
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**1
|
|
699
|
+
dataframeMeanders.loc[:, 'analyzed'] &= 1 # Truncated creation of `bitsZulu`.
|
|
700
|
+
dataframeMeanders.loc[:, 'analyzed'] &= 1 # (bitsZulu & 1)
|
|
701
|
+
dataframeMeanders.loc[:, 'analyzed'] = 1 - dataframeMeanders.loc[:, 'analyzed'] # (1 - (bitsZulu ...))
|
|
702
|
+
|
|
703
|
+
bitsTarget: pandas.Series = dataframeMeanders['arcCode'].copy()
|
|
704
|
+
bitsTarget &= state.bitsLocator # `bitsAlpha`
|
|
705
|
+
|
|
706
|
+
bitsTarget *= 2**2 # (bitsAlpha << 2)
|
|
707
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsTarget # ... | (bitsAlpha ...)
|
|
708
|
+
del bitsTarget
|
|
709
|
+
|
|
710
|
+
# NOTE Same trick as in `analyzeBitsAlpha`.
|
|
711
|
+
dataframeMeanders.loc[:, 'analyzed'] *= 2**1 # (... << 1)
|
|
712
|
+
|
|
713
|
+
bitsTarget = dataframeMeanders['arcCode'].copy()
|
|
714
|
+
bitsTarget //= 2**1
|
|
715
|
+
bitsTarget &= state.bitsLocator # `bitsZulu`
|
|
716
|
+
|
|
717
|
+
dataframeMeanders.loc[:, 'analyzed'] |= bitsTarget # ... | (bitsZulu)
|
|
718
|
+
dataframeMeanders.loc[:, 'analyzed'] //= 2**1 # (... >> 1)
|
|
719
|
+
|
|
720
|
+
dataframeMeanders.loc[bitsTarget <= 1, 'analyzed'] = 0 # if bitsZulu > 1
|
|
721
|
+
del bitsTarget
|
|
722
|
+
|
|
723
|
+
dataframeMeanders.loc[dataframeMeanders['analyzed'] >= state.MAXIMUMarcCode, 'analyzed'] = 0
|
|
724
|
+
|
|
725
|
+
return dataframeMeanders
|
|
726
|
+
|
|
727
|
+
def recordArcCodes(dataframeMeanders: pandas.DataFrame) -> pandas.DataFrame:
|
|
728
|
+
"""This abstraction makes it easier to do things such as write to disk.""" # noqa: D401, D404
|
|
729
|
+
nonlocal dataframeAnalyzed
|
|
730
|
+
|
|
731
|
+
indexStopAnalyzed: int = state.indexTarget + int((dataframeMeanders['analyzed'] > 0).sum())
|
|
732
|
+
|
|
733
|
+
if indexStopAnalyzed > state.indexTarget:
|
|
734
|
+
if len(dataframeAnalyzed.index) < indexStopAnalyzed:
|
|
735
|
+
warn(f"Lengthened `dataframeAnalyzed` from {len(dataframeAnalyzed.index)} to {indexStopAnalyzed=}; n={state.n}, {state.boundary=}.", stacklevel=2)
|
|
736
|
+
dataframeAnalyzed = dataframeAnalyzed.reindex(index=pandas.RangeIndex(indexStopAnalyzed), fill_value=0)
|
|
737
|
+
|
|
738
|
+
dataframeAnalyzed.loc[state.indexTarget:indexStopAnalyzed - 1, ['analyzed']] = (
|
|
739
|
+
dataframeMeanders.loc[(dataframeMeanders['analyzed'] > 0), ['analyzed']
|
|
740
|
+
].to_numpy(dtype=state.datatypeArcCode, copy=False)
|
|
741
|
+
)
|
|
742
|
+
|
|
743
|
+
dataframeAnalyzed.loc[state.indexTarget:indexStopAnalyzed - 1, ['crossings']] = (
|
|
744
|
+
dataframeMeanders.loc[(dataframeMeanders['analyzed'] > 0), ['crossings']
|
|
745
|
+
].to_numpy(dtype=state.datatypeCrossings, copy=False)
|
|
746
|
+
)
|
|
747
|
+
|
|
748
|
+
state.indexTarget = indexStopAnalyzed
|
|
749
|
+
|
|
750
|
+
del indexStopAnalyzed
|
|
751
|
+
|
|
752
|
+
return dataframeMeanders
|
|
753
|
+
|
|
754
|
+
dataframeMeanders = pandas.DataFrame({
|
|
755
|
+
'arcCode': pandas.Series(name='arcCode', data=dataframeAnalyzed['analyzed'], copy=False, dtype=state.datatypeArcCode)
|
|
756
|
+
, 'analyzed': pandas.Series(name='analyzed', data=0, dtype=state.datatypeArcCode)
|
|
757
|
+
, 'crossings': pandas.Series(name='crossings', data=dataframeAnalyzed['crossings'], copy=False, dtype=state.datatypeCrossings)
|
|
758
|
+
}
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
del dataframeAnalyzed
|
|
762
|
+
goByeBye()
|
|
763
|
+
|
|
764
|
+
state.bitWidth = int(dataframeMeanders['arcCode'].max()).bit_length()
|
|
765
|
+
state.setBitsLocator()
|
|
766
|
+
length: int = getBucketsTotal(state)
|
|
767
|
+
dataframeAnalyzed = pandas.DataFrame({
|
|
768
|
+
'analyzed': pandas.Series(name='analyzed', data=0, index=pandas.RangeIndex(length), dtype=state.datatypeArcCode)
|
|
769
|
+
, 'crossings': pandas.Series(name='crossings', data=0, index=pandas.RangeIndex(length), dtype=state.datatypeCrossings)
|
|
770
|
+
}, index=pandas.RangeIndex(length)
|
|
771
|
+
)
|
|
772
|
+
|
|
773
|
+
state.boundary -= 1
|
|
774
|
+
state.setMAXIMUMarcCode()
|
|
775
|
+
|
|
776
|
+
state.indexTarget = 0
|
|
777
|
+
|
|
778
|
+
dataframeMeanders: pandas.DataFrame = analyzeArcCodesSimple(dataframeMeanders)
|
|
779
|
+
dataframeMeanders = recordArcCodes(dataframeMeanders)
|
|
780
|
+
|
|
781
|
+
dataframeMeanders = analyzeBitsAlpha(dataframeMeanders)
|
|
782
|
+
dataframeMeanders = recordArcCodes(dataframeMeanders)
|
|
783
|
+
|
|
784
|
+
dataframeMeanders = analyzeBitsZulu(dataframeMeanders)
|
|
785
|
+
dataframeMeanders = recordArcCodes(dataframeMeanders)
|
|
786
|
+
|
|
787
|
+
dataframeMeanders = analyzeArcCodesAligned(dataframeMeanders)
|
|
788
|
+
dataframeMeanders = recordArcCodes(dataframeMeanders)
|
|
789
|
+
del dataframeMeanders
|
|
790
|
+
goByeBye()
|
|
791
|
+
|
|
792
|
+
aggregateArcCodes()
|
|
793
|
+
|
|
794
|
+
state.dictionaryMeanders = dataframeAnalyzed.set_index('analyzed')['crossings'].to_dict()
|
|
795
|
+
del dataframeAnalyzed
|
|
796
|
+
return state
|
|
797
|
+
|
|
798
|
+
def doTheNeedful(state: MatrixMeandersNumPyState) -> int:
|
|
799
|
+
"""Compute `crossings` with a transfer matrix algorithm implemented in NumPy.
|
|
800
|
+
|
|
801
|
+
Parameters
|
|
802
|
+
----------
|
|
803
|
+
state : MatrixMeandersState
|
|
804
|
+
The algorithm state.
|
|
805
|
+
|
|
806
|
+
Returns
|
|
807
|
+
-------
|
|
808
|
+
crossings : int
|
|
809
|
+
The computed value of `crossings`.
|
|
810
|
+
"""
|
|
811
|
+
while state.boundary > 0:
|
|
812
|
+
if areIntegersWide(state):
|
|
813
|
+
from mapFolding.syntheticModules.meanders.bigInt import countBigInt # noqa: PLC0415
|
|
814
|
+
state = countBigInt(state)
|
|
815
|
+
else:
|
|
816
|
+
state.makeArray()
|
|
817
|
+
state = countNumPy(state)
|
|
818
|
+
state.makeDictionary()
|
|
819
|
+
return sum(state.dictionaryMeanders.values())
|
|
820
|
+
|
|
821
|
+
def doTheNeedfulPandas(state: MatrixMeandersNumPyState) -> int:
|
|
822
|
+
"""Compute `crossings` with a transfer matrix algorithm implemented in pandas.
|
|
823
|
+
|
|
824
|
+
Parameters
|
|
825
|
+
----------
|
|
826
|
+
state : MatrixMeandersState
|
|
827
|
+
The algorithm state.
|
|
828
|
+
|
|
829
|
+
Returns
|
|
830
|
+
-------
|
|
831
|
+
crossings : int
|
|
832
|
+
The computed value of `crossings`.
|
|
833
|
+
"""
|
|
834
|
+
while state.boundary > 0:
|
|
835
|
+
if areIntegersWide(state):
|
|
836
|
+
from mapFolding.syntheticModules.meanders.bigInt import countBigInt # noqa: PLC0415
|
|
837
|
+
state = countBigInt(state)
|
|
838
|
+
else:
|
|
839
|
+
state = countPandas(state)
|
|
840
|
+
return sum(state.dictionaryMeanders.values())
|
|
841
|
+
|