densitty 0.8.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- densitty/__init__.py +9 -0
- densitty/ansi.py +0 -1
- densitty/axis.py +222 -59
- densitty/binning.py +200 -115
- densitty/colorbar.py +84 -0
- densitty/detect.py +142 -24
- densitty/{plot.py → plotting.py} +77 -19
- densitty/smoothing.py +315 -0
- densitty/truecolor.py +2 -1
- densitty/util.py +149 -124
- densitty/util.pyi +38 -0
- {densitty-0.8.2.dist-info → densitty-1.0.0.dist-info}/METADATA +11 -6
- densitty-1.0.0.dist-info/RECORD +18 -0
- {densitty-0.8.2.dist-info → densitty-1.0.0.dist-info}/WHEEL +1 -1
- densitty-0.8.2.dist-info/RECORD +0 -15
- {densitty-0.8.2.dist-info → densitty-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {densitty-0.8.2.dist-info → densitty-1.0.0.dist-info}/top_level.txt +0 -0
densitty/smoothing.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
"""Creation of 2-D density maps for (x,y) data"""
|
|
2
|
+
|
|
3
|
+
import dataclasses
|
|
4
|
+
import math
|
|
5
|
+
from typing import Callable, Optional, Sequence
|
|
6
|
+
|
|
7
|
+
from .axis import Axis
|
|
8
|
+
from .binning import (
|
|
9
|
+
FullBinsArg,
|
|
10
|
+
calc_value_range,
|
|
11
|
+
expand_bins_arg,
|
|
12
|
+
histogram2d,
|
|
13
|
+
segment_interval,
|
|
14
|
+
)
|
|
15
|
+
from .util import FloatLike, ValueRange, make_decimal, partial_first, partial_second
|
|
16
|
+
|
|
17
|
+
BareSmoothingFunc = Callable[[FloatLike, FloatLike], FloatLike]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclasses.dataclass
|
|
21
|
+
class SmoothingFuncWithWidth:
|
|
22
|
+
"""Smoothing function plus precalculated widths"""
|
|
23
|
+
|
|
24
|
+
func: BareSmoothingFunc
|
|
25
|
+
# Precalculated widths at certain fractional height (0.5 and 0.001):
|
|
26
|
+
precalc_widths: dict[FloatLike, tuple[FloatLike, FloatLike]]
|
|
27
|
+
|
|
28
|
+
def __call__(self, delta_x: FloatLike, delta_y: FloatLike) -> FloatLike:
|
|
29
|
+
return self.func(delta_x, delta_y)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
SmoothingFunc = BareSmoothingFunc | SmoothingFuncWithWidth
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def gaussian(
|
|
36
|
+
delta: tuple[FloatLike, FloatLike],
|
|
37
|
+
inv_cov: tuple[tuple[FloatLike, FloatLike], tuple[FloatLike, FloatLike]],
|
|
38
|
+
):
|
|
39
|
+
"""Unnormalized Gaussian
|
|
40
|
+
delta: vector of ((x - x0), (y - y0))
|
|
41
|
+
inv_cov: inverse covariance matrix (aka precision)
|
|
42
|
+
"""
|
|
43
|
+
exponent = (
|
|
44
|
+
(delta[0] * delta[0] * inv_cov[0][0])
|
|
45
|
+
+ 2 * (delta[0] * delta[1] * inv_cov[0][1])
|
|
46
|
+
+ (delta[1] * delta[1] * inv_cov[1][1])
|
|
47
|
+
)
|
|
48
|
+
return math.exp(-exponent / 2)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def gaussian_with_inv_cov(inv_cov) -> SmoothingFunc:
|
|
52
|
+
"""Produce a kernel function for a Gaussian with specified inverse covariance"""
|
|
53
|
+
|
|
54
|
+
def out(delta_x: FloatLike, delta_y: FloatLike) -> FloatLike:
|
|
55
|
+
return gaussian((delta_x, delta_y), inv_cov)
|
|
56
|
+
|
|
57
|
+
return out
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def gaussian_with_sigmas(sigma_x, sigma_y) -> SmoothingFunc:
|
|
61
|
+
"""Produce a kernel function for a Gaussian with specified X & Y widths"""
|
|
62
|
+
inv_cov = ((sigma_x**-2, 0), (0, sigma_y**-2))
|
|
63
|
+
return gaussian_with_inv_cov(inv_cov)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def triangle(width_x, width_y) -> SmoothingFunc:
|
|
67
|
+
"""Produce a kernel function for a 2-D triangle with specified width/height
|
|
68
|
+
This is much cheaper computationally than the Gaussian, and gives decent results.
|
|
69
|
+
It has the nice property that if the widths are multiples of the output "bin" size,
|
|
70
|
+
the total output weight is independent of the exact alignment of the output bins.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def out(delta_x: FloatLike, delta_y: FloatLike) -> FloatLike:
|
|
74
|
+
x_factor = max(0.0, width_x / 2 - abs(delta_x))
|
|
75
|
+
y_factor = max(0.0, width_y / 2 - abs(delta_y))
|
|
76
|
+
return x_factor * y_factor
|
|
77
|
+
|
|
78
|
+
return SmoothingFuncWithWidth(
|
|
79
|
+
out,
|
|
80
|
+
{
|
|
81
|
+
0.5: (width_x / 4, width_y / 4),
|
|
82
|
+
0.001: (width_x / 2, width_y / 2),
|
|
83
|
+
},
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def pick_kernel_bandwidth(
|
|
88
|
+
points: Sequence[tuple[FloatLike, FloatLike]],
|
|
89
|
+
bins: tuple[int, int],
|
|
90
|
+
ranges: Optional[tuple[Optional[ValueRange], Optional[ValueRange]]] = None,
|
|
91
|
+
smoothness: FloatLike = 3,
|
|
92
|
+
smooth_fraction: FloatLike = 0.5,
|
|
93
|
+
) -> tuple[float, float]:
|
|
94
|
+
"""Determine an 'optimal' width for a kernel based on histogram binning
|
|
95
|
+
|
|
96
|
+
Parameters
|
|
97
|
+
----------
|
|
98
|
+
points: Sequence of X,Y points each should be (float, float)
|
|
99
|
+
bins: tuple(int, int)
|
|
100
|
+
expected output number of columns/rows in plot
|
|
101
|
+
so kernel will at least be on the order of one bin
|
|
102
|
+
ranges: optional tuple of ValueRanges
|
|
103
|
+
expected output plot range. Determined from data if unset.
|
|
104
|
+
smoothness: float
|
|
105
|
+
Number of points in a histogram bin that is deemed "smooth"
|
|
106
|
+
1: minumum smoothing. 3 gives reasonable results.
|
|
107
|
+
smooth_fraction: float (fraction 0.0..1.0)
|
|
108
|
+
fraction of non-zero bins that must have the desired smoothness
|
|
109
|
+
0.5 => median non-zero bin
|
|
110
|
+
"""
|
|
111
|
+
if bins[0] <= 0 or bins[1] <= 0:
|
|
112
|
+
raise ValueError("Number of bins must be nonzero")
|
|
113
|
+
|
|
114
|
+
# we'll reduce the number of bins gradually until we get the right smoothness
|
|
115
|
+
# track the number of bins in each direction as a float, so we can maintain the
|
|
116
|
+
# aspect ratio without roundoff error accumulating:
|
|
117
|
+
float_bins: tuple[float, float] = bins
|
|
118
|
+
|
|
119
|
+
# bin_step: how much we reduce the # of bins by each iteration.
|
|
120
|
+
# 1.0 in the larger direction, a fraction in the smaller direction:
|
|
121
|
+
if bins[0] > bins[1]:
|
|
122
|
+
bin_step = (1.0, (bins[1] / bins[0]))
|
|
123
|
+
else:
|
|
124
|
+
bin_step = ((bins[0] / bins[1]), 1.0)
|
|
125
|
+
while bins[0] > 0 and bins[1] > 0:
|
|
126
|
+
binned, x_axis, y_axis = histogram2d(points, bins, ranges, align=False)
|
|
127
|
+
nonzero_bins = [b for row in binned for b in row if b > 0]
|
|
128
|
+
test_pos = int(len(nonzero_bins) * (1.0 - smooth_fraction))
|
|
129
|
+
test_val = sorted(nonzero_bins)[test_pos]
|
|
130
|
+
if test_val >= smoothness:
|
|
131
|
+
break
|
|
132
|
+
float_bins = (float_bins[0] - bin_step[0], float_bins[1] - bin_step[1])
|
|
133
|
+
bins = (round(float_bins[0]), round(float_bins[1]))
|
|
134
|
+
else:
|
|
135
|
+
# We never managed to get 'smoothness' per bin, so just give up and smooth a lot
|
|
136
|
+
float_bins = (1, 1)
|
|
137
|
+
|
|
138
|
+
x_width = float(x_axis.value_range.max - x_axis.value_range.min) / float_bins[0] / 4
|
|
139
|
+
y_width = float(y_axis.value_range.max - y_axis.value_range.min) / float_bins[1] / 4
|
|
140
|
+
|
|
141
|
+
return (x_width, y_width)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def func_span(f: Callable, fractional_height: FloatLike):
|
|
145
|
+
"""Calculate the half-width of function at specified height"""
|
|
146
|
+
maximum = f(0)
|
|
147
|
+
target = maximum * fractional_height
|
|
148
|
+
# variables 'upper' and 'lower' s.t. f(lower) > maximum/3 and f(upper) < maximum/2
|
|
149
|
+
lower, upper = 0.0, 1.0
|
|
150
|
+
# Interval might not contain target, so double 'upper' until it does
|
|
151
|
+
for _ in range(100):
|
|
152
|
+
if f(upper) <= target:
|
|
153
|
+
break
|
|
154
|
+
lower = upper
|
|
155
|
+
upper *= 2
|
|
156
|
+
else:
|
|
157
|
+
raise ValueError("Unable to compute kernel function half-width")
|
|
158
|
+
|
|
159
|
+
# If our initial interval did contain target, the interval may be orders of magnitude too large
|
|
160
|
+
# We'll bisect until 'lower' moves, then bisect 10 times more
|
|
161
|
+
iter_count = 0
|
|
162
|
+
for _ in range(100):
|
|
163
|
+
test = (lower + upper) / 2
|
|
164
|
+
if f(test) < target:
|
|
165
|
+
upper = test
|
|
166
|
+
else:
|
|
167
|
+
lower = test
|
|
168
|
+
if lower > 0:
|
|
169
|
+
iter_count += 1
|
|
170
|
+
if iter_count >= 10:
|
|
171
|
+
break
|
|
172
|
+
else:
|
|
173
|
+
raise ValueError("Unable to compute kernel function half-width")
|
|
174
|
+
|
|
175
|
+
return (lower + upper) / 2
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def func_width_at_height(f: SmoothingFunc, height_fraction: float) -> tuple[FloatLike, FloatLike]:
|
|
179
|
+
"""Helper to calculate function width at a given fractional height."""
|
|
180
|
+
if isinstance(f, SmoothingFuncWithWidth) and height_fraction in f.precalc_widths:
|
|
181
|
+
return f.precalc_widths[height_fraction]
|
|
182
|
+
x_width = func_span(partial_first(f), height_fraction)
|
|
183
|
+
y_width = func_span(partial_second(f), height_fraction)
|
|
184
|
+
if isinstance(f, SmoothingFuncWithWidth):
|
|
185
|
+
f.precalc_widths[height_fraction] = (x_width, y_width)
|
|
186
|
+
return x_width, y_width
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def func_width_half_height(f: SmoothingFunc) -> tuple[FloatLike, FloatLike]:
|
|
190
|
+
"""Provide the (half) width of the function at half height (HWHM)"""
|
|
191
|
+
return func_width_at_height(f, 0.5)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def func_width(f: SmoothingFunc) -> tuple[FloatLike, FloatLike]:
|
|
195
|
+
"""Provide the (half) width of the function where it becomes negligible
|
|
196
|
+
|
|
197
|
+
Note: here we're just finding where the function gets down to 1/1000 of max,
|
|
198
|
+
which neglects that the area scales with the radius from the function center,
|
|
199
|
+
so for very slowly decaying functions (1/r, say) we may be excluding a lot of total weight.
|
|
200
|
+
"""
|
|
201
|
+
return func_width_at_height(f, 0.001)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def smooth_to_bins(
|
|
205
|
+
points: Sequence[tuple[FloatLike, FloatLike]],
|
|
206
|
+
kernel: SmoothingFunc,
|
|
207
|
+
x_centers: Sequence[FloatLike],
|
|
208
|
+
y_centers: Sequence[FloatLike],
|
|
209
|
+
) -> Sequence[Sequence[float]]:
|
|
210
|
+
"""Generate smoothed/density values over a grid, given data points and a kernel
|
|
211
|
+
|
|
212
|
+
Parameters
|
|
213
|
+
----------
|
|
214
|
+
points: Sequence of (X,Y) tuples: the data points to smooth
|
|
215
|
+
kernel: Smoothing Function
|
|
216
|
+
x_centers: Sequence of values: Centers of output columns
|
|
217
|
+
y_centers: Sequence of values: Centers of output rows
|
|
218
|
+
"""
|
|
219
|
+
# pylint: disable=too-many-locals
|
|
220
|
+
x_ctr_f = [float(x) for x in x_centers]
|
|
221
|
+
y_ctr_f = [float(y) for y in y_centers]
|
|
222
|
+
|
|
223
|
+
out = [[0.0] * len(x_centers) for _ in range(len(y_centers))]
|
|
224
|
+
|
|
225
|
+
# Make the assumption that the bin centers are evenly spaced, so we can
|
|
226
|
+
# calculate bin position from index and vice versa
|
|
227
|
+
x_delta = x_ctr_f[1] - x_ctr_f[0]
|
|
228
|
+
y_delta = y_ctr_f[1] - y_ctr_f[0]
|
|
229
|
+
|
|
230
|
+
kernel_width = func_width(kernel)
|
|
231
|
+
# Find width of the kernel in terms of X/Y indexes of the centers:
|
|
232
|
+
kernel_width_di = (
|
|
233
|
+
round(kernel_width[0] / x_delta) + 1,
|
|
234
|
+
round(kernel_width[1] / y_delta) + 1,
|
|
235
|
+
)
|
|
236
|
+
for point in points:
|
|
237
|
+
p = (float(point[0]), float(point[1]))
|
|
238
|
+
min_xi = max(round((p[0] - x_ctr_f[0]) / x_delta) - kernel_width_di[0], 0)
|
|
239
|
+
min_yi = max(round((p[1] - y_ctr_f[0]) / y_delta) - kernel_width_di[1], 0)
|
|
240
|
+
|
|
241
|
+
for x_i, bin_x in enumerate(x_ctr_f[min_xi : min_xi + 2 * kernel_width_di[0]], min_xi):
|
|
242
|
+
for y_i, bin_y in enumerate(y_ctr_f[min_yi : min_yi + 2 * kernel_width_di[1]], min_yi):
|
|
243
|
+
out[y_i][x_i] += float(kernel((p[0] - bin_x), (p[1] - bin_y)))
|
|
244
|
+
return out
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def pad_range(range_unpadded: ValueRange, padding: FloatLike):
|
|
248
|
+
"""Add padding to both sides of a ValueRange"""
|
|
249
|
+
range_padding = make_decimal(padding)
|
|
250
|
+
return ValueRange(range_unpadded.min - range_padding, range_unpadded.max + range_padding)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def smooth2d(
|
|
254
|
+
points: Sequence[tuple[FloatLike, FloatLike]],
|
|
255
|
+
kernel: SmoothingFunc,
|
|
256
|
+
bins: FullBinsArg = None,
|
|
257
|
+
ranges: Optional[tuple[Optional[ValueRange], Optional[ValueRange]]] = None,
|
|
258
|
+
align=True,
|
|
259
|
+
**axis_args,
|
|
260
|
+
) -> tuple[Sequence[Sequence[float]], Axis, Axis]:
|
|
261
|
+
"""Smooth (x,y) points out into a 2-D Density plot
|
|
262
|
+
|
|
263
|
+
Parameters
|
|
264
|
+
----------
|
|
265
|
+
points: Sequence of (X,Y) tuples: the points to smooth into "bins"
|
|
266
|
+
kernel: SmoothingFunc
|
|
267
|
+
Smoothing function, takes (delta_x, delta_y) and outputs value
|
|
268
|
+
bins: int or (int, int) or [float,...] or ([float,...], [float,...])
|
|
269
|
+
int: number of output rows & columns (default: 10)
|
|
270
|
+
(int,int): number of columns (X), rows (Y)
|
|
271
|
+
list[float]: Column/Row centers
|
|
272
|
+
(list[float], list[float]): column centers for X, column centers for Y
|
|
273
|
+
Default: binning.DEFAULT_NUM_BINS
|
|
274
|
+
ranges: Optional (ValueRange, ValueRange)
|
|
275
|
+
((x_min, x_max), (y_min, y_max)) for the row/column centers if 'bins' is int
|
|
276
|
+
Default: take from data min/max, with buffer based on kernel width
|
|
277
|
+
align: bool (default: True)
|
|
278
|
+
pick bin edges at 'round' values if # of bins is provided
|
|
279
|
+
axis_args: Extra arguments to pass through to Axis constructor
|
|
280
|
+
|
|
281
|
+
returns: Sequence[Sequence[int]], (x-)Axis, (y-)Axis
|
|
282
|
+
"""
|
|
283
|
+
|
|
284
|
+
_, num_bins, bin_centers = expand_bins_arg(bins)
|
|
285
|
+
|
|
286
|
+
if bin_centers and ranges:
|
|
287
|
+
# First and last bin centers imply a range, which may be inconsistent
|
|
288
|
+
# with the passed-in ranges. Only supply one or the other.
|
|
289
|
+
raise ValueError("Both 'ranges' and bin centers provided")
|
|
290
|
+
|
|
291
|
+
if bin_centers:
|
|
292
|
+
x_centers, y_centers = bin_centers
|
|
293
|
+
else:
|
|
294
|
+
# No centers, just number of bins, and maybe user-specified ranges
|
|
295
|
+
if ranges:
|
|
296
|
+
x_range, y_range = ranges
|
|
297
|
+
else:
|
|
298
|
+
x_range, y_range = None, None
|
|
299
|
+
# if we use a range based on the min/max of the data, we also
|
|
300
|
+
# include some padding based on the half-width of the kernel
|
|
301
|
+
padding = func_width_half_height(kernel)
|
|
302
|
+
if not x_range:
|
|
303
|
+
x_range = calc_value_range(tuple(x for x, _ in points))
|
|
304
|
+
x_range = pad_range(x_range, padding[0])
|
|
305
|
+
if not y_range:
|
|
306
|
+
y_range = calc_value_range(tuple(y for _, y in points))
|
|
307
|
+
y_range = pad_range(y_range, padding[1])
|
|
308
|
+
|
|
309
|
+
x_centers = segment_interval(num_bins[0], x_range, align)
|
|
310
|
+
y_centers = segment_interval(num_bins[1], y_range, align)
|
|
311
|
+
|
|
312
|
+
x_axis = Axis((x_centers[0], x_centers[-1]), values_are_edges=False, **axis_args)
|
|
313
|
+
y_axis = Axis((y_centers[0], y_centers[-1]), values_are_edges=False, **axis_args)
|
|
314
|
+
|
|
315
|
+
return (smooth_to_bins(points, kernel, x_centers, y_centers), x_axis, y_axis)
|
densitty/truecolor.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""ANSI "True color" (24b, 16M colors) support."""
|
|
2
2
|
|
|
3
|
+
import operator
|
|
3
4
|
import math
|
|
4
5
|
|
|
5
6
|
from typing import Optional, Sequence
|
|
@@ -37,7 +38,7 @@ def _linear_rgb_to_rgb(channel):
|
|
|
37
38
|
|
|
38
39
|
def _vector_transform(v, m):
|
|
39
40
|
"""Returns v * m, where v is a vector and m is a matrix (list of columns)."""
|
|
40
|
-
return [
|
|
41
|
+
return [sum(map(operator.mul, v, col)) for col in m]
|
|
41
42
|
|
|
42
43
|
|
|
43
44
|
def _rgb_to_lab(rgb: Vec) -> Vec:
|
densitty/util.py
CHANGED
|
@@ -1,26 +1,27 @@
|
|
|
1
1
|
"""Utility functions."""
|
|
2
2
|
|
|
3
|
-
from
|
|
4
|
-
from collections import namedtuple
|
|
5
|
-
from decimal import Decimal
|
|
6
|
-
import math
|
|
7
|
-
from typing import Any, Protocol, Sequence, SupportsFloat
|
|
3
|
+
from __future__ import annotations # for pre-Python 3.12 compatibility
|
|
8
4
|
|
|
5
|
+
import math
|
|
6
|
+
import typing
|
|
9
7
|
|
|
10
|
-
|
|
11
|
-
|
|
8
|
+
from bisect import bisect_left
|
|
9
|
+
from decimal import BasicContext, Decimal, DecimalTuple
|
|
10
|
+
from fractions import Fraction
|
|
11
|
+
from typing import Any, Callable, NamedTuple, Sequence
|
|
12
12
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def __abs__(self) -> T: ...
|
|
13
|
+
# FloatLike and Vec are defined in the stubs file util.pyi for type checking
|
|
14
|
+
# At runtime, define as Any so older Python versions don't choke:
|
|
15
|
+
if not typing.TYPE_CHECKING:
|
|
16
|
+
FloatLike = Any
|
|
17
|
+
Vec = Any
|
|
19
18
|
|
|
20
19
|
|
|
21
|
-
ValueRange
|
|
20
|
+
class ValueRange(NamedTuple):
|
|
21
|
+
"""Encapsulates a range from min..max"""
|
|
22
22
|
|
|
23
|
-
|
|
23
|
+
min: Decimal
|
|
24
|
+
max: Decimal
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
def clamp(x, min_x, max_x):
|
|
@@ -76,44 +77,97 @@ def nearest(stepwise: Sequence, x: float):
|
|
|
76
77
|
return stepwise[clamped_idx]
|
|
77
78
|
|
|
78
79
|
|
|
79
|
-
def
|
|
80
|
-
"""
|
|
81
|
-
|
|
80
|
+
def make_decimal(x: FloatLike) -> Decimal:
|
|
81
|
+
"""Turn a float into a decimal with reasonable precision,
|
|
82
|
+
avoiding things like 1.0000000000000002220446049250313080847263336181640625"""
|
|
83
|
+
if isinstance(x, Decimal):
|
|
84
|
+
return x
|
|
85
|
+
return BasicContext.create_decimal_from_float(float(x))
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def sanitize_decimals(values: Sequence[Decimal]) -> Sequence[Decimal]:
|
|
89
|
+
"""Strip trailing "0"s if all values in the list have the trailing "0"s
|
|
90
|
+
So [1.000, 2.000] becomes [1, 2]"""
|
|
91
|
+
if not values:
|
|
92
|
+
return []
|
|
93
|
+
|
|
94
|
+
as_tuples = [v.as_tuple() for v in values]
|
|
95
|
+
cur_exponent = as_tuples[0].exponent
|
|
96
|
+
if not all(t.exponent == cur_exponent for t in as_tuples):
|
|
97
|
+
# inconsistent exponent: just return them as is
|
|
98
|
+
return values
|
|
99
|
+
while cur_exponent < 0 and all(t.digits[-1] == 0 for t in as_tuples):
|
|
100
|
+
# all values have a trailing 0. Remove, and add a leading 0 to prevent (0,) from vanishing:
|
|
101
|
+
as_tuples = [
|
|
102
|
+
DecimalTuple(t.sign, (0,) + t.digits[:-1], cur_exponent + 1) for t in as_tuples
|
|
103
|
+
]
|
|
104
|
+
cur_exponent += 1
|
|
105
|
+
|
|
106
|
+
as_decimals = (Decimal(t) for t in as_tuples)
|
|
107
|
+
|
|
108
|
+
# zero values may be something like "0E8" or "0E-2". Make them just be "0":
|
|
109
|
+
return [d if d != 0 else Decimal(0) for d in as_decimals]
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def make_value_range(v: ValueRange | Sequence[FloatLike]) -> ValueRange:
|
|
113
|
+
"""Produce a ValueRange from from something that may be a sequence of FloatLikes"""
|
|
114
|
+
return ValueRange(make_decimal(v[0]), make_decimal(v[1]))
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def partial_first(f: Callable[[FloatLike, FloatLike], FloatLike]) -> Callable:
|
|
118
|
+
"""Equivalent to functools.partial, but works with Python 3.10"""
|
|
119
|
+
|
|
120
|
+
def out(x: FloatLike):
|
|
121
|
+
return f(x, 0)
|
|
122
|
+
|
|
123
|
+
return out
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def partial_second(f: Callable[[FloatLike, FloatLike], FloatLike]) -> Callable:
|
|
127
|
+
"""Equivalent to functools.partial, but works with Python 3.10"""
|
|
128
|
+
|
|
129
|
+
def out(x: FloatLike):
|
|
130
|
+
return f(0, x)
|
|
131
|
+
|
|
132
|
+
return out
|
|
82
133
|
|
|
83
134
|
|
|
84
135
|
def sfrexp10(value):
|
|
85
136
|
"""Returns sign, base-10 fraction (mantissa), and exponent.
|
|
86
|
-
i.e. (s, f, e) such that value = s * f * 10 ** e
|
|
137
|
+
i.e. (s, f, e) such that value = s * f * 10 ** e.
|
|
138
|
+
if f == 0 => value == 0, else 0.1 < f <= 1.0
|
|
87
139
|
"""
|
|
88
140
|
if value == 0:
|
|
89
|
-
return 1, 0, -100
|
|
90
|
-
|
|
91
|
-
sign = -1 if value < 0 else 1
|
|
141
|
+
return 1, Fraction(0), -100
|
|
92
142
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
143
|
+
if value < 0:
|
|
144
|
+
sign = -1
|
|
145
|
+
value = -value
|
|
146
|
+
else:
|
|
147
|
+
sign = 1
|
|
96
148
|
|
|
97
|
-
|
|
149
|
+
exp = math.ceil(math.log10(float(value)))
|
|
150
|
+
frac = (Fraction(value) / Fraction(10) ** exp).limit_denominator()
|
|
151
|
+
return sign, frac, exp
|
|
98
152
|
|
|
99
153
|
|
|
100
154
|
round_fractions = (
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
155
|
+
Fraction(1, 10),
|
|
156
|
+
Fraction(1, 8),
|
|
157
|
+
Fraction(1, 6),
|
|
158
|
+
Fraction(1, 5),
|
|
159
|
+
Fraction(1, 4),
|
|
160
|
+
Fraction(1, 3),
|
|
161
|
+
Fraction(2, 5),
|
|
162
|
+
Fraction(1, 2),
|
|
163
|
+
Fraction(2, 3),
|
|
164
|
+
Fraction(4, 5),
|
|
165
|
+
Fraction(1, 1),
|
|
112
166
|
)
|
|
113
167
|
|
|
114
168
|
|
|
115
169
|
def round_up_ish(value, round_fracs=round_fractions):
|
|
116
|
-
"""'Round' the value up to the next highest value in '
|
|
170
|
+
"""'Round' the value up to the next highest value in 'round_fracs' times a multiple of 10
|
|
117
171
|
|
|
118
172
|
Parameters
|
|
119
173
|
----------
|
|
@@ -121,114 +175,85 @@ def round_up_ish(value, round_fracs=round_fractions):
|
|
|
121
175
|
round_vals: the allowable values (mantissa in base 10)
|
|
122
176
|
return: the closest round_vals[i] * 10**N equal to or larger than 'value'
|
|
123
177
|
"""
|
|
124
|
-
sign,
|
|
178
|
+
sign, frac_float, exp = sfrexp10(value)
|
|
125
179
|
|
|
126
180
|
# if we're passed in a float that can't be represented in binary (say 0.1 or 0.2), it will be
|
|
127
|
-
# rounded up to the next representable float.
|
|
128
|
-
|
|
129
|
-
frac -= Decimal(math.ulp(frac))
|
|
181
|
+
# rounded up to the next representable float. Adjust to closest sensible fraction:
|
|
182
|
+
frac = Fraction(frac_float).limit_denominator()
|
|
130
183
|
|
|
131
184
|
idx = bisect_left(round_fracs, frac) # find index that this would be inserted before (>= frac)
|
|
132
185
|
round_frac = round_fracs[idx]
|
|
133
186
|
|
|
134
|
-
return sign * round_frac
|
|
187
|
+
return sign * round_frac * 10**exp
|
|
135
188
|
|
|
136
189
|
|
|
137
190
|
def roundness(value):
|
|
138
191
|
"""Metric for how 'round' a value is. 10 is rounder than 1, is rounder than 1.1."""
|
|
139
192
|
|
|
140
|
-
# if value is a sequence, combine the roundness of all elements, prioritizing in order:
|
|
141
193
|
if isinstance(value, Sequence):
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
194
|
+
# return the average roundness of all elements, with a bonus for the size of the range
|
|
195
|
+
num = len(value)
|
|
196
|
+
|
|
197
|
+
if num > 1:
|
|
198
|
+
roundnesses = (roundness(v) for v in value)
|
|
199
|
+
mean = sum(roundnesses) / num
|
|
200
|
+
# give a bonus to sets that cover a longer interval
|
|
201
|
+
log_value_range = math.log(value[-1] - value[0])
|
|
202
|
+
else:
|
|
203
|
+
mean = roundness(value[0])
|
|
204
|
+
log_value_range = -1000 # as if we're covering a range of 10^-1000
|
|
205
|
+
|
|
206
|
+
# we want a small bonus to "roundness" if the range is bigger.
|
|
207
|
+
# Keep it small enough that 1..2 won't get expanded to 0.5..2, say
|
|
208
|
+
# log_value_range increases by ~0.3 for every doubling of range
|
|
209
|
+
# which is ~ the penalty for using 1/2 vs 1
|
|
210
|
+
return mean + log_value_range
|
|
211
|
+
|
|
212
|
+
# Just a single value, not a sequence:
|
|
148
213
|
if value == 0:
|
|
149
214
|
# 0 is the roundest value
|
|
150
215
|
return 1000 # equivalent to roundness of 1e1000
|
|
151
|
-
_, frac, exp = sfrexp10(value)
|
|
152
216
|
|
|
153
|
-
|
|
217
|
+
if value < 0:
|
|
218
|
+
value = -value
|
|
219
|
+
|
|
220
|
+
exp = math.ceil(math.log10(float(value)))
|
|
221
|
+
frac = (Fraction(value) / Fraction(10) ** exp).limit_denominator()
|
|
222
|
+
# so frac is 1 for a multiple of 10, and <1 for non-multiples of 10
|
|
223
|
+
|
|
224
|
+
# penalties based on the denominator of 'frac' when expressed as a ratio:
|
|
154
225
|
penalties = {
|
|
155
|
-
1
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
226
|
+
1: 0, # value is power of 10 (1eX)
|
|
227
|
+
2: 0.3, # value is 5.0eX
|
|
228
|
+
5: 0.5, # value is 2/4/6/8.0 eX
|
|
229
|
+
10: 1, # x.1, x.3, x.7, x.9 eX
|
|
230
|
+
4: 1, # x.25, x.75 eX
|
|
231
|
+
3: 1.2, # x.333, x.666 TODO: figure out how these are being printed and limit digits
|
|
232
|
+
20: 1.2, # x.05, x.15, x.35...
|
|
233
|
+
25: 1.8, # x.04, x.08, ...
|
|
234
|
+
50: 1.8, # x.02, x.06, ...
|
|
235
|
+
100: 2, # x.01, ...
|
|
165
236
|
}
|
|
166
237
|
|
|
167
|
-
if
|
|
168
|
-
return exp - penalties[
|
|
238
|
+
if frac.denominator in penalties:
|
|
239
|
+
return exp - penalties[frac.denominator]
|
|
169
240
|
|
|
170
|
-
#
|
|
171
|
-
|
|
241
|
+
# In case we have ticks like 1.001, 1.002, 1.003, try to account for number of digits required:
|
|
242
|
+
max_digits = 10
|
|
243
|
+
for digits in range(2, max_digits):
|
|
244
|
+
if 10**digits % frac.denominator == 0:
|
|
245
|
+
return exp - digits
|
|
172
246
|
|
|
173
|
-
|
|
174
|
-
def most_round(values):
|
|
175
|
-
"""Pick the most round of the input values. Ties go to the earliest."""
|
|
176
|
-
best_r = -1e100
|
|
177
|
-
best_v = 0
|
|
178
|
-
for v in values:
|
|
179
|
-
r = roundness(v)
|
|
180
|
-
if r > best_r:
|
|
181
|
-
best_r, best_v = r, v
|
|
182
|
-
return best_v
|
|
247
|
+
return exp - max_digits
|
|
183
248
|
|
|
184
249
|
|
|
185
|
-
def
|
|
186
|
-
"""
|
|
187
|
-
|
|
188
|
-
|
|
250
|
+
def roundness_ordered(values):
|
|
251
|
+
"""Returns values in order of decreasing roundness"""
|
|
252
|
+
d = {roundness(v): v for v in values}
|
|
253
|
+
for r in reversed(sorted(d)):
|
|
254
|
+
yield d[r]
|
|
189
255
|
|
|
190
|
-
Parameters
|
|
191
|
-
----------
|
|
192
|
-
value_range: bounds of interval
|
|
193
|
-
num_steps_hint: approximate number of steps desired for the interval
|
|
194
|
-
min_steps_per_label: for use with axis/label generation, as labels take more space than ticks
|
|
195
|
-
return: step size, interval between labeled steps/ticks
|
|
196
|
-
"""
|
|
197
|
-
num_steps_hint = max(1, num_steps_hint)
|
|
198
|
-
# if steps are 0,1,2,3,4,5,6... or 0,2,4,6,8,10,... steps_per_label of 5 is sensible,
|
|
199
|
-
# if steps are 0,5,10,15,20,... steps_per_label of 4 is sensible
|
|
200
|
-
nominal_step = (value_range.max - value_range.min) / num_steps_hint
|
|
201
|
-
|
|
202
|
-
# Figure out the order-of-magnitude (power of 10), aka "decade" of the steps:
|
|
203
|
-
log_nominal = math.log10(nominal_step)
|
|
204
|
-
log_decade = math.floor(log_nominal) # i.e. # of digits
|
|
205
|
-
decade = Decimal(10) ** log_decade
|
|
206
|
-
|
|
207
|
-
# Now figure out where in that decade we are, so we can pick the closest 1/2/5 value
|
|
208
|
-
log_frac = log_nominal - log_decade # remainder after decade taken out
|
|
209
|
-
frac = 10**log_frac # i.e. fraction through the decade (shift decimal point to front)
|
|
210
|
-
|
|
211
|
-
# common-case: label every or every-other, or every 5th, or every 10th
|
|
212
|
-
if min_steps_per_label <= 2:
|
|
213
|
-
steps_per_label = min_steps_per_label
|
|
214
|
-
elif min_steps_per_label <= 5:
|
|
215
|
-
steps_per_label = 5
|
|
216
|
-
else:
|
|
217
|
-
steps_per_label = max(min_steps_per_label, 10)
|
|
218
|
-
|
|
219
|
-
if frac < 1.1:
|
|
220
|
-
step = decade
|
|
221
|
-
elif frac < 2.2:
|
|
222
|
-
step = 2 * decade
|
|
223
|
-
# Steps of .2, don't label every other one
|
|
224
|
-
if steps_per_label == 2:
|
|
225
|
-
steps_per_label = 5
|
|
226
|
-
elif frac < 5.5:
|
|
227
|
-
step = 5 * decade
|
|
228
|
-
# ticks every .5, don't label every 5th
|
|
229
|
-
if steps_per_label == 5:
|
|
230
|
-
steps_per_label = max(round(min_steps_per_label / 2) * 2, 6)
|
|
231
|
-
else:
|
|
232
|
-
step = 10 * decade
|
|
233
256
|
|
|
234
|
-
|
|
257
|
+
def most_round(values):
|
|
258
|
+
"""Pick the most round of the input values."""
|
|
259
|
+
return next(roundness_ordered(values))
|