mpmath 0.2__zip → 0.4__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mpmath-0.4/CHANGES +52 -0
- {mpmath-0.2 → mpmath-0.4}/PKG-INFO +1 -1
- {mpmath-0.2 → mpmath-0.4}/README +58 -8
- {mpmath-0.2 → mpmath-0.4}/demo/pidigits.py +10 -16
- mpmath-0.4/mpmath/__init__.py +3 -0
- mpmath-0.4/mpmath/functions2.py +384 -0
- mpmath-0.4/mpmath/lib/__init__.py +7 -0
- mpmath-0.4/mpmath/lib/complexop.py +181 -0
- mpmath-0.4/mpmath/lib/constants.py +212 -0
- mpmath-0.4/mpmath/lib/convert.py +233 -0
- mpmath-0.4/mpmath/lib/floatop.py +252 -0
- mpmath-0.4/mpmath/lib/functions.py +350 -0
- mpmath-0.4/mpmath/lib/squareroot.py +199 -0
- mpmath-0.4/mpmath/lib/util.py +268 -0
- mpmath-0.4/mpmath/mpmath.py +739 -0
- {mpmath-0.2 → mpmath-0.4}/setup.py +2 -2
- mpmath-0.4/tests/benchmark.py +139 -0
- mpmath-0.4/tests/runtests.py +5 -0
- mpmath-0.4/tests/test_bitwise.py +110 -0
- mpmath-0.4/tests/test_compatibility.py +51 -0
- mpmath-0.4/tests/test_convert.py +62 -0
- mpmath-0.4/tests/test_division.py +126 -0
- mpmath-0.4/tests/test_functions2.py +68 -0
- mpmath-0.4/tests/test_hp.py +199 -0
- {mpmath-0.2 → mpmath-0.4}/tests/test_mpmath.py +137 -166
- mpmath-0.2/CHANGES +0 -23
- mpmath-0.2/mpmath/__init__.py +0 -2
- mpmath-0.2/mpmath/lib.py +0 -1122
- mpmath-0.2/mpmath/mpmath.py +0 -515
- {mpmath-0.2 → mpmath-0.4}/LICENSE +0 -0
mpmath-0.2/mpmath/lib.py
DELETED
|
@@ -1,1122 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This module contains "low-level" functions for multiprecision floating-
|
|
3
|
-
point arithmetic implemented in pure Python. The code is written in a
|
|
4
|
-
functional style for simplicity and speed.
|
|
5
|
-
|
|
6
|
-
A floating-point number x = man * 2**exp is represented by the tuple
|
|
7
|
-
(man, exp, bc) where man is the mantissa, exp is the exponent, and bc
|
|
8
|
-
is the number of bits in the mantissa. To simplify equality testing,
|
|
9
|
-
the mantissa always gets normalized by removing trailing zero bits.
|
|
10
|
-
|
|
11
|
-
The bitcount is slightly redundant to store in the number, but may as
|
|
12
|
-
well be reused since it always gets computed during normalization,
|
|
13
|
-
and slightly speeds up subsequent operations on a number.
|
|
14
|
-
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
#----------------------------------------------------------------------------#
|
|
19
|
-
# #
|
|
20
|
-
# General utilities #
|
|
21
|
-
# #
|
|
22
|
-
#----------------------------------------------------------------------------#
|
|
23
|
-
|
|
24
|
-
import math
|
|
25
|
-
import decimal
|
|
26
|
-
|
|
27
|
-
# Same as standard Python float
|
|
28
|
-
STANDARD_PREC = 53
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
# All supported rounding modes. We define them as integer constants for easy
|
|
32
|
-
# management, but change __repr__ to give more information on inspection
|
|
33
|
-
|
|
34
|
-
class RoundingMode(int):
|
|
35
|
-
def __new__(cls, level, name):
|
|
36
|
-
a = int.__new__(cls, level)
|
|
37
|
-
a.name = name
|
|
38
|
-
return a
|
|
39
|
-
def __repr__(self): return self.name
|
|
40
|
-
|
|
41
|
-
ROUND_DOWN = RoundingMode(1, 'ROUND_DOWN')
|
|
42
|
-
ROUND_UP = RoundingMode(2, 'ROUND_UP')
|
|
43
|
-
ROUND_FLOOR = RoundingMode(3, 'ROUND_FLOOR')
|
|
44
|
-
ROUND_CEILING = RoundingMode(4, 'ROUND_CEILING')
|
|
45
|
-
ROUND_HALF_UP = RoundingMode(5, 'ROUND_HALF_UP')
|
|
46
|
-
ROUND_HALF_DOWN = RoundingMode(6, 'ROUND_HALF_DOWN')
|
|
47
|
-
ROUND_HALF_EVEN = RoundingMode(7, 'ROUND_HALF_EVEN')
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
def giant_steps(start, target):
|
|
51
|
-
"""Generate a list of precisions ranging from 'start' to 'target'
|
|
52
|
-
that doubles with each step. This is used by quadratically
|
|
53
|
-
convergent iterations (that is, Newton iterations), where we want
|
|
54
|
-
to keep the precision at the same level as the accuracy in each
|
|
55
|
-
step to minimize work.
|
|
56
|
-
|
|
57
|
-
For example, to find a sequence of precisions to reach 1000 bits
|
|
58
|
-
starting from a 53-bit estimate, giant_steps(53, 1000) gives
|
|
59
|
-
|
|
60
|
-
[64, 126, 251, 501, 1000]
|
|
61
|
-
|
|
62
|
-
So, if we start Newton's method with a 53-bit accurate initial
|
|
63
|
-
guess, the first iteration should be carried out at 64-bit
|
|
64
|
-
precision, the second at 126-bit precision, and so on.
|
|
65
|
-
|
|
66
|
-
Note the conservative rounding (1000 to 501, etc); this is used
|
|
67
|
-
guard against unit errors in the last place."""
|
|
68
|
-
L = [target]
|
|
69
|
-
while L[-1] > start*2:
|
|
70
|
-
L = L + [L[-1]//2 + 1]
|
|
71
|
-
return L[::-1]
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
#----------------------------------------------------------------------------#
|
|
75
|
-
# #
|
|
76
|
-
# Radix conversion #
|
|
77
|
-
# #
|
|
78
|
-
#----------------------------------------------------------------------------#
|
|
79
|
-
|
|
80
|
-
LOG2_10 = math.log(10,2) # 3.3219...
|
|
81
|
-
|
|
82
|
-
# TODO: only binary_to_decimal and decimal_to_binary are used currently.
|
|
83
|
-
# Things could be sped up by using the other functions below, currently used
|
|
84
|
-
# only by the pidigits.py demo
|
|
85
|
-
|
|
86
|
-
getctx = decimal.getcontext
|
|
87
|
-
Dec = decimal.Decimal
|
|
88
|
-
|
|
89
|
-
def binary_to_decimal(s, n):
|
|
90
|
-
"""Represent as a decimal string with at most n digits"""
|
|
91
|
-
man, exp, bc = s
|
|
92
|
-
prec_ = getctx().prec
|
|
93
|
-
getctx().prec = n + 10
|
|
94
|
-
d = Dec(man) * Dec(2)**exp
|
|
95
|
-
getctx().prec = n
|
|
96
|
-
a = str(+d)
|
|
97
|
-
getctx().prec = prec_
|
|
98
|
-
return a
|
|
99
|
-
|
|
100
|
-
def decimal_to_binary(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
101
|
-
dps = int(prec*LOG2_10) + 5
|
|
102
|
-
prec_ = getctx().prec
|
|
103
|
-
getctx().prec = dps
|
|
104
|
-
d = Dec(x).normalize()
|
|
105
|
-
sgn, digits, dexp = d.as_tuple()
|
|
106
|
-
d = d * Dec(10)**(-dexp)
|
|
107
|
-
power = fpow(ften, -dexp, prec+5)
|
|
108
|
-
y = fdiv(float_from_int(int(d), prec+5), power, prec, rounding)
|
|
109
|
-
getctx().prec = prec_
|
|
110
|
-
return y
|
|
111
|
-
|
|
112
|
-
def bin_to_radix(x, xbits, base, bdigits):
|
|
113
|
-
return x * (base**bdigits) >> xbits
|
|
114
|
-
|
|
115
|
-
def small_numeral(n, base=10, digits='0123456789abcdefghijklmnopqrstuvwxyz'):
|
|
116
|
-
# Calculate numeral of n*(base**digits) in the given base
|
|
117
|
-
if base == 10:
|
|
118
|
-
return str(n)
|
|
119
|
-
digs = []
|
|
120
|
-
while n:
|
|
121
|
-
n, digit = divmod(n, base)
|
|
122
|
-
digs.append(digits[digit])
|
|
123
|
-
return "".join(digs[::-1])
|
|
124
|
-
|
|
125
|
-
# TODO: speed up for bases 2, 4, 8, 16, ...
|
|
126
|
-
def fixed_to_str(x, base, digits, verbose=False):
|
|
127
|
-
if digits < 789:
|
|
128
|
-
return small_numeral(x, base)
|
|
129
|
-
half = (digits // 2) + (digits & 1)
|
|
130
|
-
if verbose and half > 50000: print " dividing..."
|
|
131
|
-
A, B = divmod(x, base**half)
|
|
132
|
-
ad = fixed_to_str(A, base, half)
|
|
133
|
-
bd = fixed_to_str(B, base, half).rjust(half, "0")
|
|
134
|
-
return ad + bd
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
#----------------------------------------------------------------------------#
|
|
138
|
-
# #
|
|
139
|
-
# Bit manipulation, etc #
|
|
140
|
-
# #
|
|
141
|
-
#----------------------------------------------------------------------------#
|
|
142
|
-
|
|
143
|
-
def make_fixed(s, prec):
|
|
144
|
-
"""Convert a floating-point number to a fixed-point big integer"""
|
|
145
|
-
man, exp, bc = s
|
|
146
|
-
offset = exp + prec
|
|
147
|
-
if offset >= 0:
|
|
148
|
-
return man << offset
|
|
149
|
-
else:
|
|
150
|
-
return man >> (-offset)
|
|
151
|
-
|
|
152
|
-
def bitcount(n, log=math.log, table=(0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4)):
|
|
153
|
-
"""Give size of n in bits; i.e. the position of the highest set bit
|
|
154
|
-
in n. If n is negative, the absolute value is used. The bitcount of
|
|
155
|
-
zero is taken to be 0."""
|
|
156
|
-
|
|
157
|
-
if not n: return 0
|
|
158
|
-
if n < 0: n = -n
|
|
159
|
-
|
|
160
|
-
# math.log gives a good estimate, and never overflows, but
|
|
161
|
-
# is not always exact. Subtract 2 to underestimate, then
|
|
162
|
-
# count remaining bits by table lookup
|
|
163
|
-
bc = int(log(n, 2)) - 2
|
|
164
|
-
if bc < 0:
|
|
165
|
-
bc = 0
|
|
166
|
-
return bc + table[n >> bc]
|
|
167
|
-
|
|
168
|
-
# from decimal.py -- faster for small precs
|
|
169
|
-
def bitcount2(n, correction = {
|
|
170
|
-
'0': 4, '1': 3, '2': 2, '3': 2,
|
|
171
|
-
'4': 1, '5': 1, '6': 1, '7': 1,
|
|
172
|
-
'8': 0, '9': 0, 'a': 0, 'b': 0,
|
|
173
|
-
'c': 0, 'd': 0, 'e': 0, 'f': 0}):
|
|
174
|
-
if n < 0:
|
|
175
|
-
n = -n
|
|
176
|
-
hex_n = "%x" % n
|
|
177
|
-
return 4*len(hex_n) - correction[hex_n[0]]
|
|
178
|
-
|
|
179
|
-
def trailing_zeros(n):
|
|
180
|
-
"""Count trailing zero bits in an integer. If n is negative, it is
|
|
181
|
-
replaced by its absolute value."""
|
|
182
|
-
if n & 1: return 0
|
|
183
|
-
if not n: return 0
|
|
184
|
-
if n < 0: n = -n
|
|
185
|
-
t = 0
|
|
186
|
-
while not n & 0xffffffffffffffff: n >>= 64; t += 64
|
|
187
|
-
while not n & 0xff: n >>= 8; t += 8
|
|
188
|
-
while not n & 1: n >>= 1; t += 1
|
|
189
|
-
return t
|
|
190
|
-
|
|
191
|
-
def rshift_quick(x, n):
|
|
192
|
-
"""For an integer x, calculate x >> n with the fastest (floor)
|
|
193
|
-
rounding. Unlike the plain Python expression (x >> n), n is
|
|
194
|
-
allowed to be negative, in which case a left shift is performed."""
|
|
195
|
-
if n >= 0: return x >> n
|
|
196
|
-
else: return x << (-n)
|
|
197
|
-
|
|
198
|
-
def lshift_quick(x, n):
|
|
199
|
-
"""For an integer x, calculate x << n. Unlike the plain Python
|
|
200
|
-
expression (x << n), n is allowed to be negative, in which case a
|
|
201
|
-
right shift with default (floor) rounding is performed."""
|
|
202
|
-
if n >= 0: return x << n
|
|
203
|
-
else: return x >> (-n)
|
|
204
|
-
|
|
205
|
-
def rshift(x, n, rounding):
|
|
206
|
-
"""Shift x (a plain Python integer) n bits to the right (i.e.,
|
|
207
|
-
calculate x/(2**n)), and round to the nearest integer in accordance
|
|
208
|
-
with the specified rounding mode. The exponent n may be negative,
|
|
209
|
-
in which case x is shifted to the left (and no rounding is
|
|
210
|
-
necessary)."""
|
|
211
|
-
|
|
212
|
-
if not n or not x:
|
|
213
|
-
return x
|
|
214
|
-
# Support left-shifting (no rounding needed)
|
|
215
|
-
if n < 0:
|
|
216
|
-
return x << -n
|
|
217
|
-
|
|
218
|
-
# To get away easily, we exploit the fact that Python rounds positive
|
|
219
|
-
# integers toward zero and negative integers away from zero when dividing
|
|
220
|
-
# or shifting. The simplest roundings can be handled entirely
|
|
221
|
-
# through shifts:
|
|
222
|
-
if rounding == ROUND_FLOOR:
|
|
223
|
-
return x >> n
|
|
224
|
-
elif rounding < ROUND_HALF_UP:
|
|
225
|
-
if rounding == ROUND_DOWN:
|
|
226
|
-
if x > 0: return x >> n
|
|
227
|
-
else: return -((-x) >> n)
|
|
228
|
-
if rounding == ROUND_UP:
|
|
229
|
-
if x > 0: return -((-x) >> n)
|
|
230
|
-
else: return x >> n
|
|
231
|
-
if rounding == ROUND_CEILING:
|
|
232
|
-
return -((-x) >> n)
|
|
233
|
-
|
|
234
|
-
# Here we need to inspect the bits around the cutoff point
|
|
235
|
-
if x > 0: t = x >> (n-1)
|
|
236
|
-
else: t = (-x) >> (n-1)
|
|
237
|
-
if t & 1:
|
|
238
|
-
if rounding == ROUND_HALF_UP or \
|
|
239
|
-
(rounding == ROUND_HALF_DOWN and x & ((1<<(n-1))-1)) or \
|
|
240
|
-
(rounding == ROUND_HALF_EVEN and (t&2 or x & ((1<<(n-1))-1))):
|
|
241
|
-
if x > 0: return (t>>1)+1
|
|
242
|
-
else: return -((t>>1)+1)
|
|
243
|
-
if x > 0: return t>>1
|
|
244
|
-
else: return -(t>>1)
|
|
245
|
-
|
|
246
|
-
def normalize(man, exp, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
247
|
-
"""Normalize the binary floating-point number represented by
|
|
248
|
-
man * 2**exp to the specified precision level, rounding if the
|
|
249
|
-
number of bits in the mantissa exceeds prec. The mantissa is also
|
|
250
|
-
stripped of trailing zero bits, and its bits are counted. The
|
|
251
|
-
returned value is a tuple (man, exp, bc)."""
|
|
252
|
-
|
|
253
|
-
if not man:
|
|
254
|
-
return 0, 0, 0
|
|
255
|
-
if prec < 100:
|
|
256
|
-
bc = bitcount2(man)
|
|
257
|
-
else:
|
|
258
|
-
bc = bitcount(man)
|
|
259
|
-
if bc > prec:
|
|
260
|
-
# Right shifting by bc-prec nearly always guarantees that
|
|
261
|
-
# the result has at most prec bits. There is one exceptional
|
|
262
|
-
# case: if abs(man) is 1 less than a power of two and rounding
|
|
263
|
-
# is done away from zero, it turns into the higher power of two.
|
|
264
|
-
# This case must be handled separately; otherwise a bc of 0
|
|
265
|
-
# gets returned.
|
|
266
|
-
|
|
267
|
-
# TODO: by comparing sign and rounding mode, we could just
|
|
268
|
-
# return (+/- 1, exp+bc, 1) right here
|
|
269
|
-
absman = man
|
|
270
|
-
if absman < 0:
|
|
271
|
-
absman = -absman
|
|
272
|
-
if not ((absman+1) & absman):
|
|
273
|
-
man = rshift(man, bc-prec, rounding)
|
|
274
|
-
exp += (bc - prec)
|
|
275
|
-
bc = bitcount(man)
|
|
276
|
-
# Default case
|
|
277
|
-
else:
|
|
278
|
-
man = rshift(man, bc-prec, rounding)
|
|
279
|
-
exp += (bc - prec)
|
|
280
|
-
bc = prec
|
|
281
|
-
# Strip trailing zeros
|
|
282
|
-
if not man & 1:
|
|
283
|
-
tr = trailing_zeros(man)
|
|
284
|
-
if tr:
|
|
285
|
-
man >>= tr
|
|
286
|
-
exp += tr
|
|
287
|
-
bc -= tr
|
|
288
|
-
#assert bitcount(man) <= prec
|
|
289
|
-
if not man:
|
|
290
|
-
return 0, 0, 0
|
|
291
|
-
return man, exp, bc
|
|
292
|
-
|
|
293
|
-
#----------------------------------------------------------------------------#
|
|
294
|
-
# #
|
|
295
|
-
# Type conversion #
|
|
296
|
-
# #
|
|
297
|
-
#----------------------------------------------------------------------------#
|
|
298
|
-
|
|
299
|
-
def float_from_int(n, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
300
|
-
return normalize(n, 0, prec, rounding)
|
|
301
|
-
|
|
302
|
-
def float_from_rational(p, q, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
303
|
-
"""Create floating-point number from a rational number p/q"""
|
|
304
|
-
n = prec + bitcount(q) + 2
|
|
305
|
-
return normalize((p<<n)//q, -n, prec, rounding)
|
|
306
|
-
|
|
307
|
-
def float_from_pyfloat(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
308
|
-
# We assume that a float mantissa has 53 bits
|
|
309
|
-
m, e = math.frexp(x)
|
|
310
|
-
return normalize(int(m*(1<<53)), e-53, prec, rounding)
|
|
311
|
-
|
|
312
|
-
def float_to_int(s):
|
|
313
|
-
man, exp, bc = s
|
|
314
|
-
return rshift(man, -exp, ROUND_DOWN)
|
|
315
|
-
|
|
316
|
-
def float_to_pyfloat(s):
|
|
317
|
-
"""Convert to a Python float. May raise OverflowError."""
|
|
318
|
-
man, exp, bc = s
|
|
319
|
-
try:
|
|
320
|
-
return math.ldexp(man, exp)
|
|
321
|
-
except OverflowError:
|
|
322
|
-
# Try resizing the mantissa. Overflow may still happen here.
|
|
323
|
-
n = bc - 53
|
|
324
|
-
m = man >> n
|
|
325
|
-
return math.ldexp(m, exp + n)
|
|
326
|
-
|
|
327
|
-
def float_to_rational(s):
|
|
328
|
-
"""Return (p, q) such that s = p/q exactly. p and q are not reduced
|
|
329
|
-
to lowest terms."""
|
|
330
|
-
man, exp, bc = s
|
|
331
|
-
if exp > 0:
|
|
332
|
-
return man * 2**exp, 1
|
|
333
|
-
else:
|
|
334
|
-
return man, 2**-exp
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
fzero = float_from_int(0)
|
|
338
|
-
fone = float_from_int(1)
|
|
339
|
-
ftwo = float_from_int(2)
|
|
340
|
-
ften = float_from_int(10)
|
|
341
|
-
fhalf = float_from_rational(1, 2)
|
|
342
|
-
assert fhalf == float_from_pyfloat(0.5)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
#----------------------------------------------------------------------------#
|
|
346
|
-
# #
|
|
347
|
-
# Comparison #
|
|
348
|
-
# #
|
|
349
|
-
#----------------------------------------------------------------------------#
|
|
350
|
-
|
|
351
|
-
def feq(s, t):
|
|
352
|
-
"""Floating-point equality testing. The numbers are assumed to
|
|
353
|
-
be normalized, meaning that this simply performs tuple comparison."""
|
|
354
|
-
return s == t
|
|
355
|
-
|
|
356
|
-
def fcmp(s, t):
|
|
357
|
-
"""Floating-point comparison. Return -1 if s < t, 0 if s == t,
|
|
358
|
-
and 1 if s > t."""
|
|
359
|
-
|
|
360
|
-
# An inequality between two numbers s and t is determined by looking
|
|
361
|
-
# at the value of s-t. A full floating-point subtraction is relatively
|
|
362
|
-
# slow, so we first try to look at the exponents and signs of s and t.
|
|
363
|
-
sman, sexp, sbc = s
|
|
364
|
-
tman, texp, tbc = t
|
|
365
|
-
|
|
366
|
-
# Very easy cases: check for 0's and opposite signs
|
|
367
|
-
if not tman: return cmp(sman, 0)
|
|
368
|
-
if not sman: return cmp(0, tman)
|
|
369
|
-
if sman > 0 and tman < 0: return 1
|
|
370
|
-
if sman < 0 and tman > 0: return -1
|
|
371
|
-
|
|
372
|
-
# In this case, the numbers likely have the same magnitude
|
|
373
|
-
if sexp == texp: return cmp(sman, tman)
|
|
374
|
-
|
|
375
|
-
# The numbers have the same sign but different exponents. In this
|
|
376
|
-
# case we try to determine if they are of different magnitude by
|
|
377
|
-
# checking the position of the highest set bit in each number.
|
|
378
|
-
a = sbc + sexp
|
|
379
|
-
b = tbc + texp
|
|
380
|
-
if sman > 0:
|
|
381
|
-
if a < b: return -1
|
|
382
|
-
if a > b: return 1
|
|
383
|
-
else:
|
|
384
|
-
if a < b: return 1
|
|
385
|
-
if a < b: return -1
|
|
386
|
-
|
|
387
|
-
# The numbers have similar magnitude but different exponents.
|
|
388
|
-
# So we subtract and check the sign of resulting mantissa.
|
|
389
|
-
return cmp(fsub(s, t, 5, ROUND_FLOOR)[0], 0)
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
#----------------------------------------------------------------------------#
|
|
393
|
-
# #
|
|
394
|
-
# Basic arithmetic #
|
|
395
|
-
# #
|
|
396
|
-
#----------------------------------------------------------------------------#
|
|
397
|
-
|
|
398
|
-
def fadd(s, t, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
399
|
-
"""Floating-point addition. Given two tuples s and t containing the
|
|
400
|
-
components of floating-point numbers, return their sum rounded to 'prec'
|
|
401
|
-
bits using the 'rounding' mode, represented as a tuple of components."""
|
|
402
|
-
|
|
403
|
-
# General algorithm: we set min(s.exp, t.exp) = 0, perform exact integer
|
|
404
|
-
# addition, and then round the result.
|
|
405
|
-
# exp = 0
|
|
406
|
-
# |
|
|
407
|
-
# v
|
|
408
|
-
# 11111111100000 <-- s.man (padded with zeros from shifting)
|
|
409
|
-
# + 222222222 <-- t.man (no shifting necessary)
|
|
410
|
-
# --------------
|
|
411
|
-
# = 11111333333333
|
|
412
|
-
|
|
413
|
-
# We assume that s has the higher exponent. If not, just switch them:
|
|
414
|
-
if t[1] > s[1]:
|
|
415
|
-
s, t = t, s
|
|
416
|
-
|
|
417
|
-
sman, sexp, sbc = s
|
|
418
|
-
tman, texp, tbc = t
|
|
419
|
-
|
|
420
|
-
# Check if one operand is zero. Float(0) always has exp = 0; if the
|
|
421
|
-
# other operand has a large exponent, its mantissa will unnecessarily
|
|
422
|
-
# be shifted a huge number of bits if we don't check for this case.
|
|
423
|
-
if not tman:
|
|
424
|
-
return normalize(sman, sexp, prec, rounding)
|
|
425
|
-
if not sman:
|
|
426
|
-
return normalize(tman, texp, prec, rounding)
|
|
427
|
-
|
|
428
|
-
# More generally, if one number is huge and the other is small,
|
|
429
|
-
# and in particular, if their mantissas don't overlap at all at
|
|
430
|
-
# the current precision level, we can avoid work.
|
|
431
|
-
|
|
432
|
-
# precision
|
|
433
|
-
# | |
|
|
434
|
-
# 111111111
|
|
435
|
-
# + 222222222
|
|
436
|
-
# ------------------------
|
|
437
|
-
# # 1111111110000...
|
|
438
|
-
|
|
439
|
-
# However, if the rounding isn't to nearest, correct rounding mandates
|
|
440
|
-
# the result should be adjusted up or down.
|
|
441
|
-
|
|
442
|
-
if sexp - texp > 10:
|
|
443
|
-
bitdelta = (sbc + sexp) - (tbc + texp)
|
|
444
|
-
if bitdelta > prec + 5:
|
|
445
|
-
if rounding > 4: # nearby rounding
|
|
446
|
-
return normalize(sman, sexp, prec, rounding)
|
|
447
|
-
|
|
448
|
-
# shift s and add a dummy bit outside the precision range to
|
|
449
|
-
# force rounding up or down
|
|
450
|
-
offset = min(bitdelta + 3, prec+3)
|
|
451
|
-
sman <<= offset
|
|
452
|
-
if tman > 0:
|
|
453
|
-
sman += 1
|
|
454
|
-
else:
|
|
455
|
-
sman -= 1
|
|
456
|
-
return normalize(sman, sexp-offset, prec, rounding)
|
|
457
|
-
|
|
458
|
-
# General case
|
|
459
|
-
return normalize(tman+(sman<<(sexp-texp)), texp, prec, rounding)
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
def fsub(s, t, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
463
|
-
"""Floating-point subtraction"""
|
|
464
|
-
return fadd(s, (-t[0], t[1], t[2]), prec, rounding)
|
|
465
|
-
|
|
466
|
-
def fneg(s, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
467
|
-
"""Floating-point negation. In addition to changing sign, rounds to
|
|
468
|
-
the specified precision."""
|
|
469
|
-
return normalize(-s[0], s[1], prec, rounding)
|
|
470
|
-
|
|
471
|
-
def fabs(s, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
472
|
-
man, exp, bc = s
|
|
473
|
-
if man < 0:
|
|
474
|
-
return normalize(-man, exp, prec, rounding)
|
|
475
|
-
return normalize(man, exp, prec, rounding)
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
def fmul(s, t, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
479
|
-
"""Floating-point multiplication"""
|
|
480
|
-
|
|
481
|
-
sman, sexp, sbc = s
|
|
482
|
-
tman, texp, tbc = t
|
|
483
|
-
|
|
484
|
-
# This is very simple. A possible optimization would be to throw
|
|
485
|
-
# away some bits when prec is much smaller than sbc+tbc
|
|
486
|
-
return normalize(sman*tman, sexp+texp, prec, rounding)
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
def fdiv(s, t, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
490
|
-
"""Floating-point division"""
|
|
491
|
-
sman, sexp, sbc = s
|
|
492
|
-
tman, texp, tbc = t
|
|
493
|
-
|
|
494
|
-
# Perform integer division between mantissas. The mantissa of s must
|
|
495
|
-
# be padded appropriately to preserve accuracy.
|
|
496
|
-
|
|
497
|
-
# Note: this algorithm does produce slightly wrong rounding in corner
|
|
498
|
-
# cases. Padding with a few extra bits makes the chance very small.
|
|
499
|
-
# Changing '12' to something lower will reveal the error in the
|
|
500
|
-
# test_standard_float test case
|
|
501
|
-
extra = prec - sbc + tbc + 12
|
|
502
|
-
if extra < 12:
|
|
503
|
-
extra = 12
|
|
504
|
-
|
|
505
|
-
return normalize((sman<<extra)//tman, sexp-texp-extra, prec, rounding)
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
def fpow(s, n, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
509
|
-
"""Compute s**n, where n is an integer"""
|
|
510
|
-
n = int(n)
|
|
511
|
-
if n == 0: return fone
|
|
512
|
-
if n == 1: return normalize(s[0], s[1], prec, rounding)
|
|
513
|
-
if n == 2: return fmul(s, s, prec, rounding)
|
|
514
|
-
if n == -1: return fdiv(fone, s, prec, rounding)
|
|
515
|
-
if n < 0:
|
|
516
|
-
return fdiv(fone, fpow(s, -n, prec+3, ROUND_FLOOR), prec, rounding)
|
|
517
|
-
# Now we perform binary exponentiation. Need to estimate precision
|
|
518
|
-
# to avoid rounding from temporary operations. Roughly log_2(n)
|
|
519
|
-
# operations are performed.
|
|
520
|
-
prec2 = prec + int(4*math.log(n, 2) + 4)
|
|
521
|
-
man, exp, bc = normalize(s[0], s[1], prec2, ROUND_FLOOR)
|
|
522
|
-
pm, pe, pbc = fone
|
|
523
|
-
while n:
|
|
524
|
-
if n & 1:
|
|
525
|
-
pm, pe, pbc = normalize(pm*man, pe+exp, prec2, ROUND_FLOOR)
|
|
526
|
-
n -= 1
|
|
527
|
-
man, exp, bc = normalize(man*man, exp+exp, prec2, ROUND_FLOOR)
|
|
528
|
-
n = n // 2
|
|
529
|
-
return normalize(pm, pe, prec, rounding)
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
"""
|
|
533
|
-
Square roots are most efficiently computed with Newton's method.
|
|
534
|
-
Two functions are implemented: _sqrt_fixed and _sqrt_fixed2.
|
|
535
|
-
|
|
536
|
-
_sqrt_fixed uses the iteration r_{n+1} = (r_n + y/r_n)/2,
|
|
537
|
-
which is just Newton's method applied to the equation r**2 = y.
|
|
538
|
-
|
|
539
|
-
_sqrt_fixed2 uses the iteration r_{n+1} = r_n*(3 - y*r_n**2)
|
|
540
|
-
to calculate 1/sqrt(y), and then multiplies by y to obtain
|
|
541
|
-
sqrt(y).
|
|
542
|
-
|
|
543
|
-
The first iteration is slightly faster at low precision levels, since
|
|
544
|
-
it essentially just requires one division at each step, compared to
|
|
545
|
-
the three multiplications in the second formula. However, the second
|
|
546
|
-
iteration is much better at extremely high precision levels. This is
|
|
547
|
-
due to the fact that Python uses the Karatsuba algorithm for integer
|
|
548
|
-
multiplication, which is asymptotically faster than its division
|
|
549
|
-
algorithm.
|
|
550
|
-
|
|
551
|
-
Both functions use fixed-point arithmetic and assume that the input y
|
|
552
|
-
is a big integer, i.e. given the integer y and precision prec,
|
|
553
|
-
they return floor(sqrt(x) * 2**prec) where y = floor(x * 2**prec).
|
|
554
|
-
|
|
555
|
-
The functions currently assume that x ~= 1. (TODO: make the code
|
|
556
|
-
work for x of arbitrary magnitude.) The main fsqrt() function
|
|
557
|
-
fiddles with the exponent of the input to reduce it to unit
|
|
558
|
-
magnitude before passing it to _sqrt_fixed or _sqrt_fixed2.
|
|
559
|
-
|
|
560
|
-
"""
|
|
561
|
-
|
|
562
|
-
def _sqrt_fixed(y, prec):
|
|
563
|
-
# get 50-bit initial guess from regular float math
|
|
564
|
-
if prec < 200:
|
|
565
|
-
r = int(y**0.5 * 2.0**(50-prec*0.5))
|
|
566
|
-
else:
|
|
567
|
-
r = int((y >> (prec-100))**0.5)
|
|
568
|
-
prevp = 50
|
|
569
|
-
for p in giant_steps(50, prec+8):
|
|
570
|
-
# Newton iteration: r_{n+1} = (r_{n} + y/r_{n})/2
|
|
571
|
-
# print "sqrt", p
|
|
572
|
-
r = lshift_quick(r, p-prevp-1) + (rshift_quick(y, prec-p-prevp+1)//r)
|
|
573
|
-
prevp = p
|
|
574
|
-
return r >> 8
|
|
575
|
-
|
|
576
|
-
def _sqrt_fixed2(y, prec):
|
|
577
|
-
r = float_to_pyfloat(normalize(y, -prec, 64, ROUND_FLOOR)) ** -0.5
|
|
578
|
-
r = int(r * 2**50)
|
|
579
|
-
prevp = 50
|
|
580
|
-
for p in giant_steps(50, prec+8):
|
|
581
|
-
# print "sqrt", p
|
|
582
|
-
r2 = rshift_quick(r*r, 2*prevp - p)
|
|
583
|
-
A = lshift_quick(r, p-prevp)
|
|
584
|
-
T = rshift_quick(y, prec-p)
|
|
585
|
-
S = (T*r2) >> p
|
|
586
|
-
B = (3 << p) - S
|
|
587
|
-
r = (A*B)>>(p+1)
|
|
588
|
-
prevp = p
|
|
589
|
-
r = (r * y) >> prec
|
|
590
|
-
return r >> 8
|
|
591
|
-
|
|
592
|
-
def fsqrt(s, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
593
|
-
"""
|
|
594
|
-
If x is a positive Float, sqrt(x) returns the square root of x as a
|
|
595
|
-
Float, rounded to the current working precision.
|
|
596
|
-
"""
|
|
597
|
-
man, exp, bc = s
|
|
598
|
-
if not man: return fzero
|
|
599
|
-
if (man, exp) == (1, 0): return fone
|
|
600
|
-
|
|
601
|
-
prec2 = prec + 10
|
|
602
|
-
|
|
603
|
-
# Convert to a fixed-point number with prec bits. Adjust
|
|
604
|
-
# exponents to be even so that they can be divided in half
|
|
605
|
-
if prec2 & 1:
|
|
606
|
-
prec2 += 1
|
|
607
|
-
if exp & 1:
|
|
608
|
-
exp -= 1
|
|
609
|
-
man <<= 1
|
|
610
|
-
shift = bitcount(man) - prec2
|
|
611
|
-
shift -= shift & 1
|
|
612
|
-
man = rshift_quick(man, shift)
|
|
613
|
-
|
|
614
|
-
if prec < 65000:
|
|
615
|
-
man = _sqrt_fixed(man, prec2)
|
|
616
|
-
else:
|
|
617
|
-
man = _sqrt_fixed2(man, prec2)
|
|
618
|
-
|
|
619
|
-
return normalize(man, (exp+shift-prec2)//2, prec, ROUND_HALF_EVEN)
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
def fhypot(x, y, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
623
|
-
if y == fzero: return fabs(x, prec, rounding)
|
|
624
|
-
if x == fzero: return fabs(y, prec, rounding)
|
|
625
|
-
RF = ROUND_FLOOR
|
|
626
|
-
hypot2 = fadd(fmul(x,x,prec+4,RF), fmul(y,y,prec+4,RF), prec+4, RF)
|
|
627
|
-
return fsqrt(hypot2, prec, rounding)
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
#----------------------------------------------------------------------------#
|
|
631
|
-
# #
|
|
632
|
-
# Mathematical constants #
|
|
633
|
-
# #
|
|
634
|
-
#----------------------------------------------------------------------------#
|
|
635
|
-
|
|
636
|
-
# Only re-compute a constant if the precision level is raised
|
|
637
|
-
def _constmemo(f):
|
|
638
|
-
f.memo_prec = -1
|
|
639
|
-
f.memo_val = None
|
|
640
|
-
def calc(prec):
|
|
641
|
-
if prec == f.memo_prec: return f.memo_val
|
|
642
|
-
if prec < f.memo_prec: return f.memo_val >> (f.memo_prec-prec)
|
|
643
|
-
f.memo_val = f(prec)
|
|
644
|
-
f.memo_prec = prec
|
|
645
|
-
return f.memo_val
|
|
646
|
-
return calc
|
|
647
|
-
|
|
648
|
-
# Evaluate a Machin-like formula, i.e., a rational combination of
|
|
649
|
-
# of acot(n) or acoth(n) for specific integer values of n
|
|
650
|
-
def _machin(coefs, prec, hyperbolic=False):
|
|
651
|
-
prec += 10
|
|
652
|
-
def acot(x):
|
|
653
|
-
# Series expansion for atan/acot, optimized for integer arguments
|
|
654
|
-
s = w = (1<<prec)//x; x2 = x*x; n = 3
|
|
655
|
-
while 1:
|
|
656
|
-
w //= x2
|
|
657
|
-
term = w // n
|
|
658
|
-
if not term: break
|
|
659
|
-
if hyperbolic or n & 2 == 0: s += term
|
|
660
|
-
else: s -= term
|
|
661
|
-
n += 2
|
|
662
|
-
return s
|
|
663
|
-
s = 0
|
|
664
|
-
for a, b in coefs:
|
|
665
|
-
s += a * acot(b)
|
|
666
|
-
return (s >> 10)
|
|
667
|
-
|
|
668
|
-
"""
|
|
669
|
-
At low precision, pi can be calculated easily using Machin's formula
|
|
670
|
-
pi = 16*acot(5)-4*acot(239). For high precision, we use the Brent-Salamin
|
|
671
|
-
algorithm based on the arithmetic-geometric mean. See for example Wikipedia
|
|
672
|
-
(http://en.wikipedia.org/wiki/Brent-Salamin_algorithm) or "Pi and the AGM" by
|
|
673
|
-
Jonathan and Peter Borwein (Wiley, 1987). The algorithm (as stated in the
|
|
674
|
-
Wikipedia article) consists of setting
|
|
675
|
-
|
|
676
|
-
a_0 = 1; b_0 = 1/sqrt(2); t_0 = 1/4; p_0 = 1
|
|
677
|
-
|
|
678
|
-
and computing
|
|
679
|
-
|
|
680
|
-
a_{n+1} = (a_n + b_n)/2
|
|
681
|
-
b_{n+1} = sqrt(a_n * b_n)
|
|
682
|
-
t_{n+1} = t_n - p_n*(a_n - a_{n+1})**2
|
|
683
|
-
p_{n+1} = 2*p_n
|
|
684
|
-
|
|
685
|
-
for n = 0, 1, 2, 3, ..., after which the approximation is given by
|
|
686
|
-
pi ~= (a_n + b_n)**2 / (4*t_n). Each step roughly doubles the number of
|
|
687
|
-
correct digits.
|
|
688
|
-
"""
|
|
689
|
-
|
|
690
|
-
def _pi_agm(prec, verbose=False, verbose_base=10):
|
|
691
|
-
prec += 50
|
|
692
|
-
a = 1 << prec
|
|
693
|
-
if verbose: print " computing initial square root..."
|
|
694
|
-
b = _sqrt_fixed2(a>>1, prec)
|
|
695
|
-
t = a >> 2
|
|
696
|
-
p = 1
|
|
697
|
-
step = 1
|
|
698
|
-
while 1:
|
|
699
|
-
an = (a+b)>>1
|
|
700
|
-
adiff = a - an
|
|
701
|
-
if verbose:
|
|
702
|
-
logdiff = math.log(max(1, adiff), verbose_base)
|
|
703
|
-
digits = int(prec/math.log(verbose_base,2) - logdiff)
|
|
704
|
-
print " iteration", step, ("(accuracy ~= %i base-%i digits)" % \
|
|
705
|
-
(digits, verbose_base))
|
|
706
|
-
if p > 16 and abs(adiff) < 1000:
|
|
707
|
-
break
|
|
708
|
-
prod = (a*b)>>prec
|
|
709
|
-
b = _sqrt_fixed2(prod, prec)
|
|
710
|
-
t = t - p*((adiff**2) >> prec)
|
|
711
|
-
p = 2*p
|
|
712
|
-
a = an
|
|
713
|
-
step += 1
|
|
714
|
-
if verbose: print " final division"
|
|
715
|
-
return ((((a+b)**2) >> 2) // t) >> 50
|
|
716
|
-
|
|
717
|
-
@_constmemo
|
|
718
|
-
def pi_fixed(prec):
|
|
719
|
-
if prec < 10000:
|
|
720
|
-
return _machin([(16, 5), (-4, 239)], prec)
|
|
721
|
-
else:
|
|
722
|
-
return _pi_agm(prec)
|
|
723
|
-
|
|
724
|
-
def fpi(prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
725
|
-
"""Compute a floating-point approximation of pi"""
|
|
726
|
-
return normalize(pi_fixed(prec+5), -prec-5, prec, rounding)
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
# Logarithms of integers can be computed easily using
|
|
730
|
-
# Machin-like formulas
|
|
731
|
-
|
|
732
|
-
@_constmemo
|
|
733
|
-
def log2_fixed(prec):
|
|
734
|
-
return _machin([(18, 26), (-2, 4801), (8, 8749)], prec, True)
|
|
735
|
-
|
|
736
|
-
def flog2(prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
737
|
-
return normalize(log2_fixed(prec+5), -prec-5, prec, rounding)
|
|
738
|
-
|
|
739
|
-
@_constmemo
|
|
740
|
-
def log10_fixed(prec):
|
|
741
|
-
return _machin([(46, 31), (34, 49), (20, 161)], prec, True)
|
|
742
|
-
|
|
743
|
-
def flog10(prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
744
|
-
return normalize(log10_fixed(prec+5), -prec-5, prec, rounding)
|
|
745
|
-
|
|
746
|
-
"""
|
|
747
|
-
Euler's constant (gamma) is computed using the Brent-McMillan formula,
|
|
748
|
-
gamma ~= A(n)/B(n) - log(n), where
|
|
749
|
-
|
|
750
|
-
A(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
|
|
751
|
-
B(n) = sum_{k=0,1,2,...} (n**k / k!)**2
|
|
752
|
-
H(k) = 1 + 1/2 + 1/3 + ... + 1/k
|
|
753
|
-
|
|
754
|
-
The error is bounded by O(exp(-4n)). Choosing n to be a power
|
|
755
|
-
of two, 2**p, the logarithm becomes particularly easy to calculate.
|
|
756
|
-
|
|
757
|
-
Reference:
|
|
758
|
-
Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
|
|
759
|
-
http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
|
|
760
|
-
"""
|
|
761
|
-
|
|
762
|
-
@_constmemo
|
|
763
|
-
def gamma_fixed(prec):
|
|
764
|
-
prec += 30
|
|
765
|
-
# choose p such that exp(-4*(2**p)) < 2**-n
|
|
766
|
-
p = int(math.log((prec/4) * math.log(2), 2)) + 1
|
|
767
|
-
n = 1<<p; r=one=1<<prec
|
|
768
|
-
H, A, B, npow, k, d = 0, 0, 0, 1, 1, 1
|
|
769
|
-
while r:
|
|
770
|
-
A += (r * H) >> prec
|
|
771
|
-
B += r
|
|
772
|
-
r = r * (n*n) // (k*k)
|
|
773
|
-
H += one // k
|
|
774
|
-
k += 1
|
|
775
|
-
S = ((A<<prec) // B) - p*log2_fixed(prec)
|
|
776
|
-
return S >> 30
|
|
777
|
-
|
|
778
|
-
def fgamma(prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
779
|
-
return normalize(gamma_fixed(prec+5), -prec-5, prec, rounding)
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
#----------------------------------------------------------------------------#
|
|
783
|
-
# #
|
|
784
|
-
# Transcendental functions #
|
|
785
|
-
# #
|
|
786
|
-
#----------------------------------------------------------------------------#
|
|
787
|
-
|
|
788
|
-
"""
|
|
789
|
-
The exponential function has a rapidly convergent Maclaurin series:
|
|
790
|
-
|
|
791
|
-
exp(x) = 1 + x + x**2/2! + x**3/3! + x**4/4! + ...
|
|
792
|
-
|
|
793
|
-
The series can be summed very easily using fixed-point arithmetic.
|
|
794
|
-
The convergence can be improved further, using a trick due to
|
|
795
|
-
Richard P. Brent: instead of computing exp(x) directly, we choose a
|
|
796
|
-
small integer r (say, r=10) and compute exp(x/2**r)**(2**r).
|
|
797
|
-
|
|
798
|
-
The optimal value for r depends on the Python platform, the magnitude
|
|
799
|
-
of x and the target precision, and has to be estimated from
|
|
800
|
-
experimental timings. One test with x ~= 0.3 showed that
|
|
801
|
-
r = 2.2*prec**0.42 gave a good fit to the optimal values for r for
|
|
802
|
-
prec between 1 and 10000 bits, on one particular machine.
|
|
803
|
-
|
|
804
|
-
This optimization makes the summation about twice as fast at
|
|
805
|
-
low precision levels and much faster at high precision
|
|
806
|
-
(roughly five times faster at 1000 decimal digits).
|
|
807
|
-
|
|
808
|
-
If |x| is very large, we first rewrite it as t + n*log(2) with the
|
|
809
|
-
integer n chosen such that |t| <= log(2), and then calculate
|
|
810
|
-
exp(x) as exp(t)*(2**n), using the Maclaurin series for exp(t)
|
|
811
|
-
(the multiplication by 2**n just amounts to shifting the exponent).
|
|
812
|
-
"""
|
|
813
|
-
|
|
814
|
-
def exp_series(x, prec):
|
|
815
|
-
r = int(2.2 * prec ** 0.42)
|
|
816
|
-
# XXX: more careful calculation of guard bits
|
|
817
|
-
guards = r + 3
|
|
818
|
-
if prec > 60:
|
|
819
|
-
guards += int(math.log(prec))
|
|
820
|
-
prec2 = prec + guards
|
|
821
|
-
x = rshift_quick(x, r - guards)
|
|
822
|
-
s = (1 << prec2) + x
|
|
823
|
-
a = x
|
|
824
|
-
k = 2
|
|
825
|
-
# Sum exp(x/2**r)
|
|
826
|
-
while 1:
|
|
827
|
-
a = ((a*x) >> prec2) // k
|
|
828
|
-
if not a: break
|
|
829
|
-
s += a
|
|
830
|
-
k += 1
|
|
831
|
-
# Calculate s**(2**r) by repeated squaring
|
|
832
|
-
for j in range(r):
|
|
833
|
-
s = (s*s) >> prec2
|
|
834
|
-
return s >> guards
|
|
835
|
-
|
|
836
|
-
def fexp(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
837
|
-
man, exp, bc = x
|
|
838
|
-
# extra precision needs to be similar in magnitude to log_2(|x|)
|
|
839
|
-
prec2 = prec + 4 + max(0, bc+exp)
|
|
840
|
-
t = make_fixed(x, prec2)
|
|
841
|
-
# abs(x) > 1?
|
|
842
|
-
if exp+bc > 1: #fcmp(fabs(x), fone) > 0:
|
|
843
|
-
lg2 = log2_fixed(prec2)
|
|
844
|
-
n, t = divmod(t, lg2)
|
|
845
|
-
else:
|
|
846
|
-
n = 0
|
|
847
|
-
return normalize(exp_series(t, prec2), -prec2+n, prec, rounding)
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
"""
|
|
851
|
-
The basic strategy for computing log(x) is to set r = log(x) and use
|
|
852
|
-
Newton's method to solve the equation exp(r) = x. We set the initial
|
|
853
|
-
value r_0 to math.log(x) and then iterate r_{n+1} = r_n + exp(-r_n) - 1
|
|
854
|
-
until convergence. As with square roots, we increase the working
|
|
855
|
-
precision dynamically during the process so that only one full-precision
|
|
856
|
-
evaluation of exp is required.
|
|
857
|
-
|
|
858
|
-
log(x) is small for most inputs, so the r values can safely be
|
|
859
|
-
computed using fixed-point arithmetic. However, when x has a very
|
|
860
|
-
large or small exponent, we can improve performance through the
|
|
861
|
-
normalization log(t * 2**n) = log(t) + n*log(2), choosing n such
|
|
862
|
-
that 0.5 <= t <= 1 (for example).
|
|
863
|
-
|
|
864
|
-
There are some caveats: if x is extremely close to 1, the working
|
|
865
|
-
precision must be increased to maintain high relative precision in the
|
|
866
|
-
output (alternatively, the series approximation for log(1+x) could
|
|
867
|
-
be used in that case).
|
|
868
|
-
"""
|
|
869
|
-
|
|
870
|
-
# This function performs the Newton iteration using fixed-point
|
|
871
|
-
# arithmetic. x is assumed to have magnitude ~= 1
|
|
872
|
-
def _log_newton(x, prec):
|
|
873
|
-
# 50-bit approximation
|
|
874
|
-
#r = int(_clog(Float((x, -prec), 64)) * 2.0**50)
|
|
875
|
-
fx = math.log(float_to_pyfloat((x, -prec, 1)))
|
|
876
|
-
r = int(fx * 2.0**50)
|
|
877
|
-
prevp = 50
|
|
878
|
-
for p in giant_steps(50, prec+8):
|
|
879
|
-
rb = lshift_quick(r, p-prevp)
|
|
880
|
-
e = exp_series(-rb, p)
|
|
881
|
-
r = rb + ((rshift_quick(x, prec-p)*e)>>p) - (1 << p)
|
|
882
|
-
prevp = p
|
|
883
|
-
return r >> 8
|
|
884
|
-
|
|
885
|
-
def flog(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
886
|
-
if x == fzero: raise ValueError, "logarithm of 0"
|
|
887
|
-
if x == fone: return fzero
|
|
888
|
-
man, exp, bc = x
|
|
889
|
-
if man < 0: raise ValueError, "logarithm of a negative number"
|
|
890
|
-
# Estimated precision needed for log(t) + n*log(2)
|
|
891
|
-
prec2 = prec + int(math.log(1+abs(bc+exp), 2)) + 10
|
|
892
|
-
# Watch out for the case when x is very close to 1
|
|
893
|
-
if -1 < bc + exp < 2:
|
|
894
|
-
near_one = fabs(fsub(x, fone))
|
|
895
|
-
if near_one == 0:
|
|
896
|
-
return fzero
|
|
897
|
-
# estimate how close
|
|
898
|
-
prec2 += -(near_one[1]) - bitcount(near_one[0])
|
|
899
|
-
# Separate mantissa and exponent, calculate, join parts
|
|
900
|
-
t = rshift_quick(man, bc-prec2)
|
|
901
|
-
l = _log_newton(t, prec2)
|
|
902
|
-
a = (exp + bc) * log2_fixed(prec2)
|
|
903
|
-
return normalize(l+a, -prec2, prec, rounding)
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
## XXX: need to increase working precision here
|
|
907
|
-
#def fpow(x, y):
|
|
908
|
-
# return exp(log(x) * y)
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
"""
|
|
912
|
-
We compute sin(x) around 0 from its Taylor series, and cos(x) around 0
|
|
913
|
-
from sqrt(1-sin(x)**2). This way we can simultaneously compute sin and
|
|
914
|
-
cos, which are often needed together (e.g. for the tangent function or
|
|
915
|
-
the complex exponential), with little extra cost compared to computing
|
|
916
|
-
just one of them. The main reason for computing sin first (and not cos
|
|
917
|
-
from sin) is to obtain high relative accuracy for x extremely close to
|
|
918
|
-
0, where the operation sqrt(1-cos(x)**2) can cause huge cancellations.
|
|
919
|
-
|
|
920
|
-
For any value of x, we can reduce it to the interval A = [-pi/4, pi/4]
|
|
921
|
-
(where the Taylor series converges quickly) by translations, changing
|
|
922
|
-
signs, and switching the roles of cos and sin:
|
|
923
|
-
|
|
924
|
-
A : sin(x) = sin(x) cos(x) = cos(x)
|
|
925
|
-
B : sin(x) = cos(x-pi/2) cos(x) = -sin(x-pi/2)
|
|
926
|
-
C : sin(x) = -sin(x-pi) cos(x) = -cos(x-pi)
|
|
927
|
-
D : sin(x) = -cos(x-3*pi/2) cos(x) = sin(x-3*pi/2)
|
|
928
|
-
|
|
929
|
-
| A | B | C | D |
|
|
930
|
-
v v v v v
|
|
931
|
-
|
|
932
|
-
1 | ____ .......... ____
|
|
933
|
-
| _.. .. __
|
|
934
|
-
| . __ . __
|
|
935
|
-
| .. _ .. _
|
|
936
|
-
| . __ . __
|
|
937
|
-
-----| -.----------_-----------.-------------_-----------
|
|
938
|
-
| . _ .. _ .
|
|
939
|
-
| __ . __ .
|
|
940
|
-
| _ .. _ ..
|
|
941
|
-
| __ . __ .
|
|
942
|
-
| __ _.. ..
|
|
943
|
-
-1 | _________ ..........
|
|
944
|
-
0 pi 2*pi
|
|
945
|
-
|
|
946
|
-
"""
|
|
947
|
-
|
|
948
|
-
def _sin_series(x, prec):
|
|
949
|
-
x2 = (x*x) >> prec
|
|
950
|
-
s = a = x
|
|
951
|
-
k = 3
|
|
952
|
-
while a:
|
|
953
|
-
a = ((a * x2) >> prec) // (-k*(k-1))
|
|
954
|
-
s += a
|
|
955
|
-
k += 2
|
|
956
|
-
return s
|
|
957
|
-
|
|
958
|
-
def _trig_reduce(x, prec):
|
|
959
|
-
pi_ = pi_fixed(prec)
|
|
960
|
-
pi4 = pi_ >> 2
|
|
961
|
-
pi2 = pi_ >> 1
|
|
962
|
-
n, rem = divmod(x + pi4, pi2)
|
|
963
|
-
rem -= pi4
|
|
964
|
-
return n, rem
|
|
965
|
-
|
|
966
|
-
def cos_sin(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
967
|
-
"""
|
|
968
|
-
cos_sin(x) calculates both the cosine and the sine of x rounded
|
|
969
|
-
to the nearest Float value, and returns the tuple (cos(x), sin(x)).
|
|
970
|
-
"""
|
|
971
|
-
man, exp, bc = x
|
|
972
|
-
bits_from_unit = abs(bc + exp)
|
|
973
|
-
prec2 = prec + bits_from_unit + 15
|
|
974
|
-
xf = make_fixed(x, prec2)
|
|
975
|
-
n, rx = _trig_reduce(xf, prec2)
|
|
976
|
-
case = n % 4
|
|
977
|
-
one = 1 << prec2
|
|
978
|
-
if case == 0:
|
|
979
|
-
s = _sin_series(rx, prec2)
|
|
980
|
-
c = _sqrt_fixed(one - ((s*s)>>prec2), prec2)
|
|
981
|
-
elif case == 1:
|
|
982
|
-
c = -_sin_series(rx, prec2)
|
|
983
|
-
s = _sqrt_fixed(one - ((c*c)>>prec2), prec2)
|
|
984
|
-
elif case == 2:
|
|
985
|
-
s = -_sin_series(rx, prec2)
|
|
986
|
-
c = -_sqrt_fixed(one - ((s*s)>>prec2), prec2)
|
|
987
|
-
elif case == 3:
|
|
988
|
-
c = _sin_series(rx, prec2)
|
|
989
|
-
s = -_sqrt_fixed(one - ((c*c)>>prec2), prec2)
|
|
990
|
-
c = normalize(c, -prec2, prec, rounding)
|
|
991
|
-
s = normalize(s, -prec2, prec, rounding)
|
|
992
|
-
return c, s
|
|
993
|
-
|
|
994
|
-
def fcos(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
995
|
-
return cos_sin(x, prec, rounding)[0]
|
|
996
|
-
|
|
997
|
-
def fsin(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
998
|
-
return cos_sin(x, prec, rounding)[1]
|
|
999
|
-
|
|
1000
|
-
def ftan(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1001
|
-
c, s = cos_sin(x, prec+2, rounding)
|
|
1002
|
-
return fdiv(s, c, prec, rounding)
|
|
1003
|
-
|
|
1004
|
-
#----------------------------------------------------------------------
|
|
1005
|
-
# Inverse tangent
|
|
1006
|
-
#
|
|
1007
|
-
|
|
1008
|
-
"""
|
|
1009
|
-
Near x = 0, use atan(x) = x - x**3/3 + x**5/5 - ...
|
|
1010
|
-
Near x = 1, use atan(x) = y/x * (1 + 2/3*y + 2*4/3/5*y**2 + ...)
|
|
1011
|
-
where y = x**2/(1+x**2).
|
|
1012
|
-
|
|
1013
|
-
TODO: these series are not impressively fast. It is probably better
|
|
1014
|
-
to calculate atan from tan, using Newton's method or even the
|
|
1015
|
-
secant method.
|
|
1016
|
-
"""
|
|
1017
|
-
|
|
1018
|
-
def _atan_series_1(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1019
|
-
man, exp, bc = x
|
|
1020
|
-
# Increase absolute precision when extremely close to 0
|
|
1021
|
-
bc = bitcount(man)
|
|
1022
|
-
diff = -(bc + exp)
|
|
1023
|
-
prec2 = prec
|
|
1024
|
-
if diff > 10:
|
|
1025
|
-
if 3*diff - 4 > prec: # x**3 term vanishes; atan(x) ~x
|
|
1026
|
-
return normalize(man, exp, prec, rounding)
|
|
1027
|
-
prec2 = prec + diff
|
|
1028
|
-
prec2 += 15 # XXX: better estimate for number of guard bits
|
|
1029
|
-
x = make_fixed(x, prec2)
|
|
1030
|
-
x2 = (x*x)>>prec2; one = 1<<prec2; s=a=x
|
|
1031
|
-
for n in xrange(1, 1000000):
|
|
1032
|
-
a = (a*x2) >> prec2
|
|
1033
|
-
s += a // ((-1)**n * (n+n+1))
|
|
1034
|
-
if -100 < a < 100:
|
|
1035
|
-
break
|
|
1036
|
-
return normalize(s, -prec2, prec, rounding)
|
|
1037
|
-
|
|
1038
|
-
def _atan_series_2(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1039
|
-
prec2 = prec + 15
|
|
1040
|
-
x = make_fixed(x, prec2)
|
|
1041
|
-
one = 1<<prec2; x2 = (x*x)>>prec2; y=(x2<<prec2)//(one+x2)
|
|
1042
|
-
s = a = one
|
|
1043
|
-
for n in xrange(1, 1000000):
|
|
1044
|
-
a = ((a*y)>>prec2) * (2*n) // (2*n+1)
|
|
1045
|
-
if a < 100:
|
|
1046
|
-
break
|
|
1047
|
-
s += a
|
|
1048
|
-
return normalize(y*s//x, -prec2, prec, rounding)
|
|
1049
|
-
|
|
1050
|
-
_cutoff_1 = (5, -3, 3) # ~0.6
|
|
1051
|
-
_cutoff_2 = (3, -1, 2) # 1.5
|
|
1052
|
-
|
|
1053
|
-
def fatan(x, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1054
|
-
if x[0] < 0:
|
|
1055
|
-
t = fatan(fneg(x), prec+4, ROUND_FLOOR)
|
|
1056
|
-
return normalize(-t[0], t[1], prec, rounding)
|
|
1057
|
-
if fcmp(x, _cutoff_1) < 0:
|
|
1058
|
-
return _atan_series_1(x)
|
|
1059
|
-
if fcmp(x, _cutoff_2) < 0:
|
|
1060
|
-
return _atan_series_2(x)
|
|
1061
|
-
# For large x, use atan(x) = pi/2 - atan(1/x)
|
|
1062
|
-
if x[1] > 10*prec:
|
|
1063
|
-
pi = fpi(prec, rounding)
|
|
1064
|
-
pihalf = pi[0], pi[1]-1, pi[2]
|
|
1065
|
-
else:
|
|
1066
|
-
pi = fpi(prec+4, ROUND_FLOOR)
|
|
1067
|
-
pihalf = pi[0], pi[1]-1, pi[2]
|
|
1068
|
-
t = fatan(fdiv(fone, x, prec+4, ROUND_FLOOR), prec+4, ROUND_FLOOR)
|
|
1069
|
-
return fsub(pihalf, t, prec, rounding)
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
#----------------------------------------------------------------------------#
|
|
1073
|
-
# #
|
|
1074
|
-
# Complex functions #
|
|
1075
|
-
# #
|
|
1076
|
-
#----------------------------------------------------------------------------#
|
|
1077
|
-
|
|
1078
|
-
def fcabs(re, im, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1079
|
-
return fhypot(re, im, prec, rounding)
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
# For complex square roots, we have sqrt(a+b*I) = sqrt((r+a)/2) +
|
|
1083
|
-
# I*b/sqrt(2*(r+a)) where r = abs(a+b*I), when a+b*I is not a negative
|
|
1084
|
-
# real number (http://en.wikipedia.org/wiki/Square_root)
|
|
1085
|
-
def fcsqrt(re, im, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1086
|
-
if re == im == fzero:
|
|
1087
|
-
return (re, im)
|
|
1088
|
-
if re[0] < 0 and im[0] == 0:
|
|
1089
|
-
return (fzero, fsqrt(fneg(re, prec, rounding), prec, rounding))
|
|
1090
|
-
RF, prec2 = ROUND_FLOOR, prec+4
|
|
1091
|
-
rpx = fadd(fcabs(re, im, prec2, RF), re, prec2, RF)
|
|
1092
|
-
are = fsqrt(fdiv(rpx, ftwo, prec2, RF), prec, rounding)
|
|
1093
|
-
aim = fdiv(im, fsqrt(fmul(rpx, ftwo, prec2, RF)), prec, rounding)
|
|
1094
|
-
return are, aim
|
|
1095
|
-
|
|
1096
|
-
def fcexp(re, im, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1097
|
-
mag = fexp(re, prec+4, ROUND_FLOOR)
|
|
1098
|
-
are, aim = cos_sin(im, prec+4, ROUND_FLOOR)
|
|
1099
|
-
return fmul(mag, are, prec, rounding), fmul(mag, aim, prec, rounding)
|
|
1100
|
-
|
|
1101
|
-
def fcsin(re, im, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1102
|
-
prec2 = prec+4
|
|
1103
|
-
RF = ROUND_FLOOR
|
|
1104
|
-
# sin(x+y*i) = sin(x)*cosh(y)+cos(x)*sinh(y)*i
|
|
1105
|
-
c, s = cos_sin(re, prec2, RF)
|
|
1106
|
-
expb1 = fexp(im, prec2, RF)
|
|
1107
|
-
expb2 = fdiv(fone, expb1, prec2, RF)
|
|
1108
|
-
ch = fmul(fadd(expb1, expb2, prec2, RF), fhalf, prec2, RF)
|
|
1109
|
-
sh = fmul(fsub(expb1, expb2, prec2, RF), fhalf, prec2, RF)
|
|
1110
|
-
return fmul(s, ch, prec, rounding), fmul(c, sh, prec, rounding)
|
|
1111
|
-
|
|
1112
|
-
def fccos(re, im, prec=STANDARD_PREC, rounding=ROUND_HALF_EVEN):
|
|
1113
|
-
prec2 = prec+4
|
|
1114
|
-
RF = ROUND_FLOOR
|
|
1115
|
-
# cos(x+y*i) = cos(x)*cosh(y)-sin(x)*sinh(y)*i
|
|
1116
|
-
c, s = cos_sin(re, prec2, RF)
|
|
1117
|
-
expb1 = fexp(im, prec2, RF)
|
|
1118
|
-
expb2 = fdiv(fone, expb1, prec2, RF)
|
|
1119
|
-
ch = fmul(fadd(expb1, expb2, prec2, RF), fhalf, prec2, RF)
|
|
1120
|
-
sh = fmul(fsub(expb1, expb2, prec2, RF), fhalf, prec2, RF)
|
|
1121
|
-
return fmul(c, ch, prec, rounding), \
|
|
1122
|
-
fneg(fmul(s, sh, prec, rounding), prec, rounding)
|