mpmath 0.2__zip → 0.4__zip

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,252 @@
1
+ """
2
+ Functions for basic operations on raw mpfs: normalization, comparison,
3
+ addition, subtraction, multiplication, division, integer powers.
4
+ """
5
+
6
+ from util import *
7
+ import random as _random
8
+
9
+
10
+ # Some commonly needed raw mpfs
11
+ fzero = (0, 0, 0)
12
+ fone = (1, 0, 1)
13
+ ftwo = (1, 1, 1)
14
+ ften = (5, 1, 3)
15
+ fhalf = (1, -1, 1)
16
+
17
+
18
+ # Pre-computing and avoiding calls to trailing_zeros() in
19
+ # normalize improves performance at 15-digit precision by ~15%
20
+ shift_table = map(trailing_zeros, range(256))
21
+
22
+
23
+ def normalize(man, exp, prec, rounding):
24
+ """Create a raw mpf with value (man * 2**exp), rounding in the
25
+ specified direction if the number of bits in the mantissa
26
+ exceeds the precision. Trailing zero bits are also stripped from
27
+ the mantissa to ensure that the representation is canonical."""
28
+
29
+ # The bit-level operations below assume a nonzero mantissa
30
+ if not man:
31
+ return fzero
32
+
33
+ # Count bits in the input mantissa. bitcount2 is slightly faster
34
+ # when man is expected to be small.
35
+ if prec < 100:
36
+ bc = bitcount2(man)
37
+ else:
38
+ bc = bitcount(man)
39
+
40
+ # Cut mantissa down to size
41
+ if bc > prec:
42
+ man = rshift(man, bc-prec, rounding)
43
+ exp += (bc - prec)
44
+ bc = prec
45
+ # Strip trailing zeros
46
+ if not man & 1:
47
+ # To within the nearest byte
48
+ while not man & 0xff:
49
+ man >>= 8
50
+ exp += 8
51
+ bc -= 8
52
+ t = shift_table[man & 0xff]
53
+ man >>= t
54
+ exp += t
55
+ bc -= t
56
+ # If result is +/- a power of two due to rounding up in rshift(),
57
+ # bc may be wrong
58
+ if man == 1 or man == -1:
59
+ bc = 1
60
+ return (man, exp, bc)
61
+
62
+
63
+ def feq(s, t):
64
+ """Test equality of two raw mpfs. (This is simply tuple comparion;
65
+ this function is provided only for completeness)."""
66
+ return s == t
67
+
68
+
69
+ def fcmp(s, t):
70
+ """Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
71
+ and 1 if s > t. (Same convention as Python's cmp() function.)"""
72
+
73
+ # In principle, a comparison amounts to determining the sign of s-t.
74
+ # A full subtraction is relatively slow, however, so we first try to
75
+ # look at the components.
76
+ sman, sexp, sbc = s
77
+ tman, texp, tbc = t
78
+
79
+ # Very easy cases: check for zeros and opposite signs
80
+ if not tman: return cmp(sman, 0)
81
+ if not sman: return cmp(0, tman)
82
+ if sman > 0 and tman < 0: return 1
83
+ if sman < 0 and tman > 0: return -1
84
+
85
+ # This reduces to direct integer comparison
86
+ if sexp == texp: return cmp(sman, tman)
87
+
88
+ # Check position of the highest set bit in each number. If
89
+ # different, there is certainly an inequality.
90
+ a = sbc + sexp
91
+ b = tbc + texp
92
+ if sman > 0:
93
+ if a < b: return -1
94
+ if a > b: return 1
95
+ else:
96
+ if a < b: return 1
97
+ if a > b: return -1
98
+
99
+ # Both numbers have the same highest bit. Subtract to find
100
+ # how the lower bits compare.
101
+ return cmp(fsub(s, t, 5, ROUND_FLOOR)[0], 0)
102
+
103
+
104
+ def fpos(s, prec, rounding):
105
+ """Calculate 0+s for a raw mpf (i.e., just round s to the specified
106
+ precision, or return s unchanged if its mantissa is smaller than
107
+ the precision)."""
108
+ return normalize(s[0], s[1], prec, rounding)
109
+
110
+
111
+ def fadd(s, t, prec, rounding):
112
+ """Add two raw mpfs and round the result to the specified precision,
113
+ in the specified direction."""
114
+
115
+ # We will assume below that s has the higher exponent.
116
+ if t[1] > s[1]:
117
+ s, t = t, s
118
+ sman, sexp, sbc = s
119
+ tman, texp, tbc = t
120
+
121
+ # Check if one operand is zero. Zero always has exp = 0; if the
122
+ # other operand has a large exponent, its mantissa will unnecessarily
123
+ # be shifted a huge number of bits if we don't check for this case.
124
+ if not tman: return normalize(sman, sexp, prec, rounding)
125
+ if not sman: return normalize(tman, texp, prec, rounding)
126
+
127
+ #------------------------------------------------------------------------
128
+ # More generally, if one number is huge and the other is small,
129
+ # and in particular, if their mantissas don't overlap at all at
130
+ # the current precision level, we can avoid work.
131
+ # precision
132
+ # | |
133
+ # 111111111
134
+ # + 222222222
135
+ # ------------------------
136
+ # # 1111111110000...
137
+ #
138
+ delta = (sbc + sexp) - (tbc + texp)
139
+ if delta > prec + 5: # an arbitrary number ~> 3
140
+ # The result may have to be rounded up or down. So we shift s
141
+ # and add a dummy bit outside the precision range to force
142
+ # rounding.
143
+ offset = min(delta + 3, prec+3)
144
+ sman <<= offset
145
+ if tman > 0:
146
+ sman += 1
147
+ else:
148
+ sman -= 1
149
+ return normalize(sman, sexp-offset, prec, rounding)
150
+
151
+ #------------------------------------------------------------------------
152
+ # General algorithm: we set min(s.exp, t.exp) = 0, perform exact integer
153
+ # addition, and then round the result.
154
+ # exp = 0
155
+ # |
156
+ # v
157
+ # 11111111100000 <-- s.man (padded with zeros from shifting)
158
+ # + 222222222 <-- t.man (no shifting necessary)
159
+ # --------------
160
+ # = 11111333333333
161
+ #
162
+ return normalize(tman+(sman<<(sexp-texp)), texp, prec, rounding)
163
+
164
+
165
+ def fsub(s, t, prec, rounding):
166
+ """Return the difference of two raw mpfs, s-t. This function is
167
+ simply a wrapper of fadd that changes the sign of t."""
168
+ return fadd(s, (-t[0], t[1], t[2]), prec, rounding)
169
+
170
+
171
+ def fneg(s, prec, rounding):
172
+ """Negate a raw mpf (return -s), rounding the result to the
173
+ specified precision."""
174
+ return normalize(-s[0], s[1], prec, rounding)
175
+
176
+
177
+ def fneg_exact(s):
178
+ """Negate a raw mpf (return -s), without performing any rounding."""
179
+ return (-s[0], s[1], s[2])
180
+
181
+
182
+ def fabs(s, prec, rounding):
183
+ """Return abs(s) of the raw mpf s, rounded to the specified
184
+ precision."""
185
+ man, exp, bc = s
186
+ if man < 0:
187
+ return normalize(-man, exp, prec, rounding)
188
+ return normalize(man, exp, prec, rounding)
189
+
190
+
191
+ def fmul(s, t, prec, rounding):
192
+ """Return the product of two raw mpfs, s*t, rounded to the
193
+ specified precision."""
194
+ sman, sexp, sbc = s
195
+ tman, texp, tbc = t
196
+ # This is very simple. A possible optimization would be to throw
197
+ # away some bits when prec is much smaller than sbc+tbc
198
+ return normalize(sman*tman, sexp+texp, prec, rounding)
199
+
200
+
201
+ def fdiv(s, t, prec, rounding):
202
+ """Floating-point division"""
203
+ sman, sexp, sbc = s
204
+ tman, texp, tbc = t
205
+
206
+ # Same strategy as for addition: if there is a remainder, perturb
207
+ # the result a few bits outside the precision range before rounding
208
+ extra = max(prec - sbc + tbc + 5, 5)
209
+ quot, rem = divmod(sman<<extra, tman)
210
+ if rem:
211
+ quot = (quot << 5) + 1
212
+ extra += 5
213
+ return normalize(quot, sexp-texp-extra, prec, rounding)
214
+
215
+
216
+ def fshift_exact(s, n):
217
+ """Quickly multiply the raw mpf s by 2**n without rounding."""
218
+ man, exp, bc = s
219
+ if not man:
220
+ return s
221
+ return man, exp+n, bc
222
+
223
+
224
+ # TODO: use directed rounding all the way through (and, account for signs?)
225
+ def fpow(s, n, prec, rounding):
226
+ """Compute s**n, where n is an integer"""
227
+ n = int(n)
228
+ if n == 0: return fone
229
+ if n == 1: return normalize(s[0], s[1], prec, rounding)
230
+ if n == 2: return fmul(s, s, prec, rounding)
231
+ if n == -1: return fdiv(fone, s, prec, rounding)
232
+ if n < 0:
233
+ return fdiv(fone, fpow(s, -n, prec+3, ROUND_FLOOR), prec, rounding)
234
+ # Now we perform binary exponentiation. Need to estimate precision
235
+ # to avoid rounding from temporary operations. Roughly log_2(n)
236
+ # operations are performed.
237
+ prec2 = prec + int(4*math.log(n, 2) + 4)
238
+ man, exp, bc = normalize(s[0], s[1], prec2, ROUND_FLOOR)
239
+ pm, pe, pbc = fone
240
+ while n:
241
+ if n & 1:
242
+ pm, pe, pbc = normalize(pm*man, pe+exp, prec2, ROUND_FLOOR)
243
+ n -= 1
244
+ man, exp, bc = normalize(man*man, exp+exp, prec2, ROUND_FLOOR)
245
+ n = n // 2
246
+ return normalize(pm, pe, prec, rounding)
247
+
248
+
249
+ def frand(prec):
250
+ """Return a raw mpf chosen randomly from [0, 1), with prec bits
251
+ in the mantissa."""
252
+ return normalize(_random.randrange(0, 1<<prec), -prec, prec, ROUND_FLOOR)
@@ -0,0 +1,350 @@
1
+ """
2
+ Transcendental functions for real numbers:
3
+ * exp
4
+ * log
5
+ * sin/cos/tan
6
+ * sinh/cosh/tanh
7
+
8
+ """
9
+
10
+ from util import *
11
+ from floatop import *
12
+ from squareroot import *
13
+ from constants import *
14
+ from convert import *
15
+
16
+
17
+ """
18
+ The exponential function has a rapidly convergent Maclaurin series:
19
+
20
+ exp(x) = 1 + x + x**2/2! + x**3/3! + x**4/4! + ...
21
+
22
+ The series can be summed very easily using fixed-point arithmetic.
23
+ The convergence can be improved further, using a trick due to
24
+ Richard P. Brent: instead of computing exp(x) directly, we choose a
25
+ small integer r (say, r=10) and compute exp(x/2**r)**(2**r).
26
+
27
+ The optimal value for r depends on the Python platform, the magnitude
28
+ of x and the target precision, and has to be estimated from
29
+ experimental timings. One test with x ~= 0.3 showed that
30
+ r = 2.2*prec**0.42 gave a good fit to the optimal values for r for
31
+ prec between 1 and 10000 bits, on one particular machine.
32
+
33
+ This optimization makes the summation about twice as fast at
34
+ low precision levels and much faster at high precision
35
+ (roughly five times faster at 1000 decimal digits).
36
+
37
+ If |x| is very large, we first rewrite it as t + n*log(2) with the
38
+ integer n chosen such that |t| <= log(2), and then calculate
39
+ exp(x) as exp(t)*(2**n), using the Maclaurin series for exp(t)
40
+ (the multiplication by 2**n just amounts to shifting the exponent).
41
+ """
42
+
43
+ def exp_series(x, prec):
44
+ r = int(2.2 * prec ** 0.42)
45
+ # XXX: more careful calculation of guard bits
46
+ guards = r + 3
47
+ if prec > 60:
48
+ guards += int(math.log(prec))
49
+ prec2 = prec + guards
50
+ x = rshift_quick(x, r - guards)
51
+ s = (1 << prec2) + x
52
+ a = x
53
+ k = 2
54
+ # Sum exp(x/2**r)
55
+ while 1:
56
+ a = ((a*x) >> prec2) // k
57
+ if not a: break
58
+ s += a
59
+ k += 1
60
+ # Calculate s**(2**r) by repeated squaring
61
+ for j in range(r):
62
+ s = (s*s) >> prec2
63
+ return s >> guards
64
+
65
+ def fexp(x, prec, rounding):
66
+ man, exp, bc = x
67
+ # extra precision needs to be similar in magnitude to log_2(|x|)
68
+ prec2 = prec + 6 + max(0, bc+exp)
69
+ t = make_fixed(x, prec2)
70
+ # abs(x) > 1?
71
+ if exp+bc > 1: #fcmp(fabs(x), fone) > 0:
72
+ lg2 = log2_fixed(prec2)
73
+ n, t = divmod(t, lg2)
74
+ else:
75
+ n = 0
76
+ return normalize(exp_series(t, prec2), -prec2+n, prec, rounding)
77
+
78
+
79
+ """
80
+ The basic strategy for computing log(x) is to set r = log(x) and use
81
+ Newton's method to solve the equation exp(r) = x. We set the initial
82
+ value r_0 to math.log(x) and then iterate r_{n+1} = r_n + exp(-r_n) - 1
83
+ until convergence. As with square roots, we increase the working
84
+ precision dynamically during the process so that only one full-precision
85
+ evaluation of exp is required.
86
+
87
+ log(x) is small for most inputs, so the r values can safely be
88
+ computed using fixed-point arithmetic. However, when x has a very
89
+ large or small exponent, we can improve performance through the
90
+ normalization log(t * 2**n) = log(t) + n*log(2), choosing n such
91
+ that 0.5 <= t <= 1 (for example).
92
+
93
+ There are some caveats: if x is extremely close to 1, the working
94
+ precision must be increased to maintain high relative precision in the
95
+ output (alternatively, the series approximation for log(1+x) could
96
+ be used in that case).
97
+ """
98
+
99
+ # This function performs the Newton iteration using fixed-point
100
+ # arithmetic. x is assumed to have magnitude ~= 1
101
+ def _log_newton(x, prec):
102
+ extra = 8
103
+ # 50-bit approximation
104
+ #r = int(_clog(Float((x, -prec), 64)) * 2.0**50)
105
+ fx = math.log(to_float((x, -prec, bitcount(x))))
106
+ r = int(fx * 2.0**50)
107
+ prevp = 50
108
+ for p in giant_steps(50, prec+extra):
109
+ rb = lshift_quick(r, p-prevp)
110
+ e = exp_series(-rb, p)
111
+ r = rb + ((rshift_quick(x, prec-p)*e)>>p) - (1 << p)
112
+ prevp = p
113
+ return r >> extra
114
+
115
+ def flog(x, prec, rounding):
116
+ if x == fzero: raise ValueError, "logarithm of 0"
117
+ if x == fone: return fzero
118
+ man, exp, bc = x
119
+ if man < 0: raise ValueError, "logarithm of a negative number"
120
+ # Estimated precision needed for log(t) + n*log(2)
121
+ prec2 = prec + int(math.log(1+abs(bc+exp), 2)) + 10
122
+ # Watch out for the case when x is very close to 1
123
+ if -1 < bc + exp < 2:
124
+ near_one = fabs(fsub(x, fone, STANDARD_PREC, ROUND_FLOOR), STANDARD_PREC, ROUND_FLOOR)
125
+ if near_one == 0:
126
+ return fzero
127
+ # estimate how close
128
+ prec2 += -(near_one[1]) - bitcount(near_one[0])
129
+ # Separate mantissa and exponent, calculate, join parts
130
+ t = rshift_quick(man, bc-prec2)
131
+ l = _log_newton(t, prec2)
132
+ a = (exp + bc) * log2_fixed(prec2)
133
+ return normalize(l+a, -prec2, prec, rounding)
134
+
135
+
136
+
137
+ """
138
+ We compute sin(x) around 0 from its Taylor series, and cos(x) around 0
139
+ from sqrt(1-sin(x)**2). This way we can simultaneously compute sin and
140
+ cos, which are often needed together (e.g. for the tangent function or
141
+ the complex exponential), with little extra cost compared to computing
142
+ just one of them. The main reason for computing sin first (and not sin
143
+ from cos) is to obtain high relative accuracy for x extremely close to
144
+ 0, where the operation sqrt(1-cos(x)**2) can cause huge cancellations.
145
+
146
+ For any value of x, we can reduce it to the interval A = [-pi/4, pi/4]
147
+ (where the Taylor series converges quickly) by translations, changing
148
+ signs, and switching the roles of cos and sin:
149
+
150
+ A : sin(x) = sin(x) cos(x) = cos(x)
151
+ B : sin(x) = cos(x-pi/2) cos(x) = -sin(x-pi/2)
152
+ C : sin(x) = -sin(x-pi) cos(x) = -cos(x-pi)
153
+ D : sin(x) = -cos(x-3*pi/2) cos(x) = sin(x-3*pi/2)
154
+
155
+ | A | B | C | D |
156
+ v v v v v
157
+
158
+ 1 | ____ .......... ____
159
+ | _.. .. __
160
+ | . __ . __
161
+ | .. _ .. _
162
+ | . __ . __
163
+ -----| -.----------_-----------.-------------_-----------
164
+ | . _ .. _ .
165
+ | __ . __ .
166
+ | _ .. _ ..
167
+ | __ . __ .
168
+ | __ _.. ..
169
+ -1 | _________ ..........
170
+ 0 pi 2*pi
171
+
172
+
173
+ TODO: could use cos series too when extremely close to 0
174
+ """
175
+
176
+ def _sin_series(x, prec):
177
+ x2 = (x*x) >> prec
178
+ s = a = x
179
+ k = 3
180
+ while a:
181
+ a = ((a * x2) >> prec) // (-k*(k-1))
182
+ s += a
183
+ k += 2
184
+ return s
185
+
186
+ def _trig_reduce(x, prec):
187
+ pi_ = pi_fixed(prec)
188
+ pi4 = pi_ >> 2
189
+ pi2 = pi_ >> 1
190
+ n, rem = divmod(x + pi4, pi2)
191
+ rem -= pi4
192
+ return n, rem
193
+
194
+ def cos_sin(x, prec, rounding):
195
+ """Simultaneously compute (cos(x), sin(x)) for real x."""
196
+ man, exp, bc = x
197
+ bits_from_unit = abs(bc + exp)
198
+ prec2 = prec + bits_from_unit + 15
199
+ xf = make_fixed(x, prec2)
200
+ n, rx = _trig_reduce(xf, prec2)
201
+ case = n % 4
202
+ one = 1 << prec2
203
+ if case == 0:
204
+ s = _sin_series(rx, prec2)
205
+ c = sqrt_fixed(one - ((s*s)>>prec2), prec2)
206
+ elif case == 1:
207
+ c = -_sin_series(rx, prec2)
208
+ s = sqrt_fixed(one - ((c*c)>>prec2), prec2)
209
+ elif case == 2:
210
+ s = -_sin_series(rx, prec2)
211
+ c = -sqrt_fixed(one - ((s*s)>>prec2), prec2)
212
+ elif case == 3:
213
+ c = _sin_series(rx, prec2)
214
+ s = -sqrt_fixed(one - ((c*c)>>prec2), prec2)
215
+ c = normalize(c, -prec2, prec, rounding)
216
+ s = normalize(s, -prec2, prec, rounding)
217
+ return c, s
218
+
219
+ def fcos(x, prec, rounding):
220
+ return cos_sin(x, prec, rounding)[0]
221
+
222
+ def fsin(x, prec, rounding):
223
+ return cos_sin(x, prec, rounding)[1]
224
+
225
+ def ftan(x, prec, rounding):
226
+ c, s = cos_sin(x, prec+6, ROUND_FLOOR)
227
+ return fdiv(s, c, prec, rounding)
228
+
229
+
230
+ #----------------------------------------------------------------------
231
+ # Hyperbolic functions
232
+ #
233
+
234
+ def _sinh_series(x, prec):
235
+ x2 = (x*x) >> prec
236
+ s = a = x
237
+ k = 3
238
+ while a:
239
+ a = ((a * x2) >> prec) // (k*(k-1))
240
+ s += a
241
+ k += 2
242
+ return s
243
+
244
+ def cosh_sinh(x, prec, rounding):
245
+ """Simultaneously compute (cosh(x), sinh(x)) for real x"""
246
+
247
+ man, exp, bc = x
248
+ high_bit = exp + bc
249
+ prec2 = prec + 6
250
+
251
+ if high_bit < -3:
252
+ # Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1
253
+ # TODO: support directed rounding
254
+ if high_bit < -prec-2:
255
+ return (fone, fpos(x, prec, rounding))
256
+
257
+ # Avoid cancellation when computing sinh
258
+ # TODO: might be faster to use sinh series directly
259
+ prec2 += (-high_bit) + 4
260
+
261
+ # In the general case, we use
262
+ # cosh(x) = (exp(x) + exp(-x))/2
263
+ # sinh(x) = (exp(x) - exp(-x))/2
264
+ # and note that the exponential only needs to be computed once.
265
+ ep = fexp(x, prec2, ROUND_FLOOR)
266
+ em = fdiv(fone, ep, prec2, ROUND_FLOOR)
267
+ ch = fshift_exact(fadd(ep, em, prec, rounding), -1)
268
+ sh = fshift_exact(fsub(ep, em, prec, rounding), -1)
269
+ return ch, sh
270
+
271
+ def fcosh(x, prec, rounding):
272
+ """Compute cosh(x) for a real argument x"""
273
+ return cosh_sinh(x, prec, rounding)[0]
274
+
275
+ def fsinh(x, prec, rounding):
276
+ """Compute sinh(x) for a real argument x"""
277
+ return cosh_sinh(x, prec, rounding)[1]
278
+
279
+ def ftanh(x, prec, rounding):
280
+ """Compute tanh(x) for a real argument x"""
281
+ ch, sh = cosh_sinh(x, prec+6, ROUND_FLOOR)
282
+ return fdiv(sh, ch, prec, rounding)
283
+
284
+
285
+ #----------------------------------------------------------------------
286
+ # Inverse tangent
287
+ #
288
+
289
+ """
290
+ Near x = 0, use atan(x) = x - x**3/3 + x**5/5 - ...
291
+ Near x = 1, use atan(x) = y/x * (1 + 2/3*y + 2*4/3/5*y**2 + ...)
292
+ where y = x**2/(1+x**2).
293
+
294
+ TODO: these series are not impressively fast. It is probably better
295
+ to calculate atan from tan, using Newton's method or even the
296
+ secant method.
297
+ """
298
+
299
+ def _atan_series_1(x, prec, rounding):
300
+ man, exp, bc = x
301
+ # Increase absolute precision when extremely close to 0
302
+ bc = bitcount(man)
303
+ diff = -(bc + exp)
304
+ prec2 = prec
305
+ if diff > 10:
306
+ if 3*diff - 4 > prec: # x**3 term vanishes; atan(x) ~x
307
+ return normalize(man, exp, prec, rounding)
308
+ prec2 = prec + diff
309
+ prec2 += 15 # XXX: better estimate for number of guard bits
310
+ x = make_fixed(x, prec2)
311
+ x2 = (x*x)>>prec2; one = 1<<prec2; s=a=x
312
+ for n in xrange(1, 1000000):
313
+ a = (a*x2) >> prec2
314
+ s += a // ((-1)**n * (n+n+1))
315
+ if -100 < a < 100:
316
+ break
317
+ return normalize(s, -prec2, prec, rounding)
318
+
319
+ def _atan_series_2(x, prec, rounding):
320
+ prec2 = prec + 15
321
+ x = make_fixed(x, prec2)
322
+ one = 1<<prec2; x2 = (x*x)>>prec2; y=(x2<<prec2)//(one+x2)
323
+ s = a = one
324
+ for n in xrange(1, 1000000):
325
+ a = ((a*y)>>prec2) * (2*n) // (2*n+1)
326
+ if a < 100:
327
+ break
328
+ s += a
329
+ return normalize(y*s//x, -prec2, prec, rounding)
330
+
331
+ _cutoff_1 = (5, -3, 3) # ~0.6
332
+ _cutoff_2 = (3, -1, 2) # 1.5
333
+
334
+ def fatan(x, prec, rounding):
335
+ if x[0] < 0:
336
+ t = fatan(fneg_exact(x), prec+4, ROUND_FLOOR)
337
+ return normalize(-t[0], t[1], prec, rounding)
338
+ if fcmp(x, _cutoff_1) < 0:
339
+ return _atan_series_1(x, prec, rounding)
340
+ if fcmp(x, _cutoff_2) < 0:
341
+ return _atan_series_2(x, prec, rounding)
342
+ # For large x, use atan(x) = pi/2 - atan(1/x)
343
+ if x[1] > 10*prec:
344
+ pi = fpi(prec, rounding)
345
+ pihalf = pi[0], pi[1]-1, pi[2]
346
+ else:
347
+ pi = fpi(prec+4, ROUND_FLOOR)
348
+ pihalf = pi[0], pi[1]-1, pi[2]
349
+ t = fatan(fdiv(fone, x, prec+4, ROUND_FLOOR), prec+4, ROUND_FLOOR)
350
+ return fsub(pihalf, t, prec, rounding)