2 Copyright (C) 2007, 2009, 2011 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2011. */
19 #if ! defined USE_LONG_DOUBLE
34 #include "integer_length.h"
37 #ifdef USE_LONG_DOUBLE
39 # define DOUBLE long double
42 # define MIN_EXP LDBL_MIN_EXP
43 # define MANT_BIT LDBL_MANT_BIT
44 # define L_(literal) literal##L
45 #elif ! defined USE_FLOAT
47 # define DOUBLE double
50 # define MIN_EXP DBL_MIN_EXP
51 # define MANT_BIT DBL_MANT_BIT
52 # define L_(literal) literal
53 #else /* defined USE_FLOAT */
58 # define MIN_EXP FLT_MIN_EXP
59 # define MANT_BIT FLT_MANT_BIT
60 # define L_(literal) literal##f
64 #define MAX(a,b) ((a) > (b) ? (a) : (b))
67 #define MIN(a,b) ((a) < (b) ? (a) : (b))
69 /* It is possible to write an implementation of fused multiply-add with
70 floating-point operations alone. See
71 Sylvie Boldo, Guillaume Melquiond:
72 Emulation of FMA and correctly-rounded sums: proved algorithms using
74 <http://www.lri.fr/~melquion/doc/08-tc.pdf>
75 But is it complicated.
76 Here we take the simpler (and probably slower) approach of doing
77 multi-precision arithmetic. */
79 /* We use the naming conventions of GNU gmp, but vastly simpler (and slower)
82 typedef unsigned int mp_limb_t;
83 #define GMP_LIMB_BITS 32
84 verify (sizeof (mp_limb_t) * CHAR_BIT == GMP_LIMB_BITS);
86 typedef unsigned long long mp_twolimb_t;
87 #define GMP_TWOLIMB_BITS 64
88 verify (sizeof (mp_twolimb_t) * CHAR_BIT == GMP_TWOLIMB_BITS);
90 /* Number of limbs needed for a single DOUBLE. */
91 #define NLIMBS1 ((MANT_BIT + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS)
93 /* Number of limbs needed for the accumulator. */
94 #define NLIMBS3 (3 * NLIMBS1 + 1)
96 /* Assuming 0.5 <= x < 1.0:
97 Convert the mantissa (x * 2^DBL_MANT_BIT) to a sequence of limbs. */
99 decode (DOUBLE x, mp_limb_t limbs[NLIMBS1])
101 /* I'm not sure whether it's safe to cast a 'double' value between
102 2^31 and 2^32 to 'unsigned int', therefore play safe and cast only
103 'double' values between 0 and 2^31 (to 'unsigned int' or 'int',
105 So, we split the MANT_BIT bits of x into a number of chunks of
107 enum { chunk_count = (MANT_BIT - 1) / 31 + 1 };
108 /* Variables used for storing the bits limb after limb. */
109 mp_limb_t *p = limbs + NLIMBS1 - 1;
111 unsigned int bits_needed = MANT_BIT - (NLIMBS1 - 1) * GMP_LIMB_BITS;
112 /* The bits bits_needed-1...0 need to be ORed into the accu.
113 1 <= bits_needed <= GMP_LIMB_BITS. */
114 /* Unroll the first 4 loop rounds. */
117 /* Here we still have MANT_BIT-0*31 bits to extract from x. */
118 enum { chunk_bits = MIN (31, MANT_BIT - 0 * 31) }; /* > 0, <= 31 */
120 x *= (mp_limb_t) 1 << chunk_bits;
121 d = (int) x; /* 0 <= d < 2^chunk_bits. */
123 if (!(x >= L_(0.0) && x < L_(1.0)))
125 if (bits_needed < chunk_bits)
127 /* store bits_needed bits */
128 accu |= d >> (chunk_bits - bits_needed);
133 /* hold (chunk_bits - bits_needed) bits */
134 accu = d << (GMP_LIMB_BITS - (chunk_bits - bits_needed));
135 bits_needed = GMP_LIMB_BITS - (chunk_bits - bits_needed);
139 /* store chunk_bits bits */
140 accu |= d << (bits_needed - chunk_bits);
141 bits_needed -= chunk_bits;
142 if (bits_needed == 0)
149 bits_needed = GMP_LIMB_BITS;
155 /* Here we still have MANT_BIT-1*31 bits to extract from x. */
156 enum { chunk_bits = MIN (31, MAX (MANT_BIT - 1 * 31, 0)) }; /* > 0, <= 31 */
158 x *= (mp_limb_t) 1 << chunk_bits;
159 d = (int) x; /* 0 <= d < 2^chunk_bits. */
161 if (!(x >= L_(0.0) && x < L_(1.0)))
163 if (bits_needed < chunk_bits)
165 /* store bits_needed bits */
166 accu |= d >> (chunk_bits - bits_needed);
171 /* hold (chunk_bits - bits_needed) bits */
172 accu = d << (GMP_LIMB_BITS - (chunk_bits - bits_needed));
173 bits_needed = GMP_LIMB_BITS - (chunk_bits - bits_needed);
177 /* store chunk_bits bits */
178 accu |= d << (bits_needed - chunk_bits);
179 bits_needed -= chunk_bits;
180 if (bits_needed == 0)
187 bits_needed = GMP_LIMB_BITS;
193 /* Here we still have MANT_BIT-2*31 bits to extract from x. */
194 enum { chunk_bits = MIN (31, MAX (MANT_BIT - 2 * 31, 0)) }; /* > 0, <= 31 */
196 x *= (mp_limb_t) 1 << chunk_bits;
197 d = (int) x; /* 0 <= d < 2^chunk_bits. */
199 if (!(x >= L_(0.0) && x < L_(1.0)))
201 if (bits_needed < chunk_bits)
203 /* store bits_needed bits */
204 accu |= d >> (chunk_bits - bits_needed);
209 /* hold (chunk_bits - bits_needed) bits */
210 accu = d << (GMP_LIMB_BITS - (chunk_bits - bits_needed));
211 bits_needed = GMP_LIMB_BITS - (chunk_bits - bits_needed);
215 /* store chunk_bits bits */
216 accu |= d << (bits_needed - chunk_bits);
217 bits_needed -= chunk_bits;
218 if (bits_needed == 0)
225 bits_needed = GMP_LIMB_BITS;
231 /* Here we still have MANT_BIT-3*31 bits to extract from x. */
232 enum { chunk_bits = MIN (31, MAX (MANT_BIT - 3 * 31, 0)) }; /* > 0, <= 31 */
234 x *= (mp_limb_t) 1 << chunk_bits;
235 d = (int) x; /* 0 <= d < 2^chunk_bits. */
237 if (!(x >= L_(0.0) && x < L_(1.0)))
239 if (bits_needed < chunk_bits)
241 /* store bits_needed bits */
242 accu |= d >> (chunk_bits - bits_needed);
247 /* hold (chunk_bits - bits_needed) bits */
248 accu = d << (GMP_LIMB_BITS - (chunk_bits - bits_needed));
249 bits_needed = GMP_LIMB_BITS - (chunk_bits - bits_needed);
253 /* store chunk_bits bits */
254 accu |= d << (bits_needed - chunk_bits);
255 bits_needed -= chunk_bits;
256 if (bits_needed == 0)
263 bits_needed = GMP_LIMB_BITS;
269 /* Here we still have MANT_BIT-4*31 bits to extract from x. */
272 for (k = 4; k < chunk_count; k++)
274 size_t chunk_bits = MIN (31, MANT_BIT - k * 31); /* > 0, <= 31 */
276 x *= (mp_limb_t) 1 << chunk_bits;
277 d = (int) x; /* 0 <= d < 2^chunk_bits. */
279 if (!(x >= L_(0.0) && x < L_(1.0)))
281 if (bits_needed < chunk_bits)
283 /* store bits_needed bits */
284 accu |= d >> (chunk_bits - bits_needed);
289 /* hold (chunk_bits - bits_needed) bits */
290 accu = d << (GMP_LIMB_BITS - (chunk_bits - bits_needed));
291 bits_needed = GMP_LIMB_BITS - (chunk_bits - bits_needed);
295 /* store chunk_bits bits */
296 accu |= d << (bits_needed - chunk_bits);
297 bits_needed -= chunk_bits;
298 if (bits_needed == 0)
305 bits_needed = GMP_LIMB_BITS;
310 /* We shouldn't get here. */
314 #ifndef USE_LONG_DOUBLE /* On FreeBSD 6.1/x86, 'long double' numbers sometimes
315 have excess precision. */
321 /* Multiply two sequences of limbs. */
323 multiply (mp_limb_t xlimbs[NLIMBS1], mp_limb_t ylimbs[NLIMBS1],
324 mp_limb_t prod_limbs[2 * NLIMBS1])
327 enum { len1 = NLIMBS1 };
328 enum { len2 = NLIMBS1 };
330 for (k = len2; k > 0; )
332 for (i = 0; i < len1; i++)
334 mp_limb_t digit1 = xlimbs[i];
335 mp_twolimb_t carry = 0;
336 for (j = 0; j < len2; j++)
338 mp_limb_t digit2 = ylimbs[j];
339 carry += (mp_twolimb_t) digit1 * (mp_twolimb_t) digit2;
340 carry += prod_limbs[i + j];
341 prod_limbs[i + j] = (mp_limb_t) carry;
342 carry = carry >> GMP_LIMB_BITS;
344 prod_limbs[i + len2] = (mp_limb_t) carry;
349 FUNC (DOUBLE x, DOUBLE y, DOUBLE z)
351 if (isfinite (x) && isfinite (y))
355 /* x, y, z are all finite. */
356 if (x == L_(0.0) || y == L_(0.0))
360 /* x, y, z are all non-zero.
361 The result is x * y + z. */
363 int e; /* exponent of x * y + z */
365 mp_limb_t sum[NLIMBS3];
369 int xys; /* sign of x * y */
370 int zs; /* sign of z */
371 int xye; /* sum of exponents of x and y */
372 int ze; /* exponent of z */
373 mp_limb_t summand1[NLIMBS3];
375 mp_limb_t summand2[NLIMBS3];
379 mp_limb_t zlimbs[NLIMBS1];
380 mp_limb_t xylimbs[2 * NLIMBS1];
383 DOUBLE xn; /* normalized part of x */
384 DOUBLE yn; /* normalized part of y */
385 DOUBLE zn; /* normalized part of z */
386 int xe; /* exponent of x */
387 int ye; /* exponent of y */
388 mp_limb_t xlimbs[NLIMBS1];
389 mp_limb_t ylimbs[NLIMBS1];
413 /* xn, yn, zn are all positive.
414 The result is (-1)^xys * xn * yn + (-1)^zs * zn. */
415 xn = FREXP (xn, &xe);
416 yn = FREXP (yn, &ye);
417 zn = FREXP (zn, &ze);
419 /* xn, yn, zn are all < 1.0 and >= 0.5.
421 (-1)^xys * 2^xye * xn * yn + (-1)^zs * 2^ze * zn. */
422 if (xye < ze - MANT_BIT)
424 /* 2^xye * xn * yn < 2^xye <= 2^(ze-MANT_BIT-1) */
427 if (xye - 2 * MANT_BIT > ze)
429 /* 2^ze * zn < 2^ze <= 2^(xye-2*MANT_BIT-1).
432 because it would round differently: A round-to-even
433 in the multiplication can be a round-up or round-down
434 here, due to z. So replace z with a value that doesn't
435 require the use of long bignums but that rounds the
438 ze = xye - 2 * MANT_BIT - 1;
440 /* Convert mantissas of xn, yn, zn to limb sequences:
441 xlimbs = 2^MANT_BIT * xn
442 ylimbs = 2^MANT_BIT * yn
443 zlimbs = 2^MANT_BIT * zn */
447 /* Multiply the mantissas of xn and yn:
448 xylimbs = xlimbs * ylimbs */
449 multiply (xlimbs, ylimbs, xylimbs);
452 (-1)^xys * 2^(xye-2*MANT_BIT) * xylimbs
453 + (-1)^zs * 2^(ze-MANT_BIT) * zlimbs.
455 e = min (xye-2*MANT_BIT, ze-MANT_BIT)
457 summand1 = 2^(xye-2*MANT_BIT-e) * xylimbs
458 summand2 = 2^(ze-MANT_BIT-e) * zlimbs */
459 e = MIN (xye - 2 * MANT_BIT, ze - MANT_BIT);
460 if (e == xye - 2 * MANT_BIT)
462 /* Simply copy the limbs of xylimbs. */
464 for (i = 0; i < 2 * NLIMBS1; i++)
465 summand1[i] = xylimbs[i];
466 summand1_len = 2 * NLIMBS1;
470 size_t ediff = xye - 2 * MANT_BIT - e;
471 /* Left shift the limbs of xylimbs by ediff bits. */
472 size_t ldiff = ediff / GMP_LIMB_BITS;
473 size_t shift = ediff % GMP_LIMB_BITS;
475 for (i = 0; i < ldiff; i++)
480 for (i = 0; i < 2 * NLIMBS1; i++)
482 summand1[ldiff + i] = (xylimbs[i] << shift) | carry;
483 carry = xylimbs[i] >> (GMP_LIMB_BITS - shift);
485 summand1[ldiff + 2 * NLIMBS1] = carry;
486 summand1_len = ldiff + 2 * NLIMBS1 + 1;
490 for (i = 0; i < 2 * NLIMBS1; i++)
491 summand1[ldiff + i] = xylimbs[i];
492 summand1_len = ldiff + 2 * NLIMBS1;
494 /* Estimation of needed array size:
495 ediff = (xye - 2 * MANT_BIT) - (ze - MANT_BIT) <= MANT_BIT + 1
498 = (ediff + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS + 2 * NLIMBS1
499 <= (MANT_BIT + GMP_LIMB_BITS) / GMP_LIMB_BITS + 2 * NLIMBS1
502 if (!(summand1_len <= NLIMBS3))
505 if (e == ze - MANT_BIT)
507 /* Simply copy the limbs of zlimbs. */
509 for (i = 0; i < NLIMBS1; i++)
510 summand2[i] = zlimbs[i];
511 summand2_len = NLIMBS1;
515 size_t ediff = ze - MANT_BIT - e;
516 /* Left shift the limbs of zlimbs by ediff bits. */
517 size_t ldiff = ediff / GMP_LIMB_BITS;
518 size_t shift = ediff % GMP_LIMB_BITS;
520 for (i = 0; i < ldiff; i++)
525 for (i = 0; i < NLIMBS1; i++)
527 summand2[ldiff + i] = (zlimbs[i] << shift) | carry;
528 carry = zlimbs[i] >> (GMP_LIMB_BITS - shift);
530 summand2[ldiff + NLIMBS1] = carry;
531 summand2_len = ldiff + NLIMBS1 + 1;
535 for (i = 0; i < NLIMBS1; i++)
536 summand2[ldiff + i] = zlimbs[i];
537 summand2_len = ldiff + NLIMBS1;
539 /* Estimation of needed array size:
540 ediff = (ze - MANT_BIT) - (xye - 2 * MANT_BIT) <= 2 * MANT_BIT
543 = (ediff + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS + NLIMBS1
544 <= (2 * MANT_BIT + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS + NLIMBS1
547 if (!(summand2_len <= NLIMBS3))
552 (-1)^xys * 2^e * summand1 + (-1)^zs * 2^e * summand2. */
555 /* Perform an addition. */
561 for (i = 0; i < MIN (summand1_len, summand2_len); i++)
563 mp_limb_t digit1 = summand1[i];
564 mp_limb_t digit2 = summand2[i];
565 sum[i] = digit1 + digit2 + carry;
567 ? digit1 >= (mp_limb_t)-1 - digit2
568 : digit1 > (mp_limb_t)-1 - digit2);
570 if (summand1_len > summand2_len)
571 for (; i < summand1_len; i++)
573 mp_limb_t digit1 = summand1[i];
574 sum[i] = carry + digit1;
575 carry = carry && digit1 == (mp_limb_t)-1;
578 for (; i < summand2_len; i++)
580 mp_limb_t digit2 = summand2[i];
581 sum[i] = carry + digit2;
582 carry = carry && digit2 == (mp_limb_t)-1;
590 /* Perform a subtraction. */
591 /* Compare summand1 and summand2 by magnitude. */
592 while (summand1[summand1_len - 1] == 0)
594 while (summand2[summand2_len - 1] == 0)
596 if (summand1_len > summand2_len)
598 else if (summand1_len < summand2_len)
602 size_t i = summand1_len;
606 if (summand1[i] > summand2[i])
611 if (summand1[i] < summand2[i])
617 /* summand1 and summand2 are equal. */
623 /* Compute summand1 - summand2. */
628 for (i = 0; i < summand2_len; i++)
630 mp_limb_t digit1 = summand1[i];
631 mp_limb_t digit2 = summand2[i];
632 sum[i] = digit1 - digit2 - carry;
633 carry = (carry ? digit1 <= digit2 : digit1 < digit2);
635 for (; i < summand1_len; i++)
637 mp_limb_t digit1 = summand1[i];
638 sum[i] = digit1 - carry;
639 carry = carry && digit1 == 0;
643 sum_len = summand1_len;
647 /* Compute summand2 - summand1. */
652 for (i = 0; i < summand1_len; i++)
654 mp_limb_t digit1 = summand1[i];
655 mp_limb_t digit2 = summand2[i];
656 sum[i] = digit2 - digit1 - carry;
657 carry = (carry ? digit2 <= digit1 : digit2 < digit1);
659 for (; i < summand2_len; i++)
661 mp_limb_t digit2 = summand2[i];
662 sum[i] = digit2 - carry;
663 carry = carry && digit2 == 0;
667 sum_len = summand2_len;
672 (-1)^sign * 2^e * sum. */
673 /* Now perform the rounding to MANT_BIT mantissa bits. */
674 while (sum[sum_len - 1] == 0)
676 /* Here we know that the most significant limb, sum[sum_len - 1], is
679 /* How many bits the sum has. */
680 unsigned int sum_bits =
681 integer_length (sum[sum_len - 1]) + (sum_len - 1) * GMP_LIMB_BITS;
682 /* How many bits to keep when rounding. */
683 unsigned int keep_bits;
684 /* How many bits to round off. */
685 unsigned int roundoff_bits;
686 if (e + (int) sum_bits >= MIN_EXP)
687 /* 2^e * sum >= 2^(MIN_EXP-1).
688 result will be a normalized number. */
689 keep_bits = MANT_BIT;
690 else if (e + (int) sum_bits >= MIN_EXP - MANT_BIT)
691 /* 2^e * sum >= 2^(MIN_EXP-MANT_BIT-1).
692 result will be a denormalized number or rounded to zero. */
693 keep_bits = e + (int) sum_bits - (MIN_EXP + MANT_BIT);
695 /* 2^e * sum < 2^(MIN_EXP-MANT_BIT-1). Round to zero. */
697 /* Note: 0 <= keep_bits <= MANT_BIT. */
698 if (sum_bits <= keep_bits)
702 keep_bits = sum_bits;
707 roundoff_bits = sum_bits - keep_bits; /* > 0, <= sum_bits */
709 #if HAVE_FEGETROUND && defined FE_TOWARDZERO
710 /* Cf. <http://pubs.opengroup.org/onlinepubs/9699919799/functions/fegetround.html> */
711 int rounding_mode = fegetround ();
712 if (rounding_mode == FE_TOWARDZERO)
714 else if (rounding_mode == FE_DOWNWARD)
716 else if (rounding_mode == FE_UPWARD)
719 /* Cf. <http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/float.h.html> */
720 int rounding_mode = FLT_ROUNDS;
721 if (rounding_mode == 0) /* toward zero */
723 else if (rounding_mode == 3) /* toward negative infinity */
725 else if (rounding_mode == 2) /* toward positive infinity */
730 /* Round to nearest. */
732 /* Test bit (roundoff_bits-1). */
733 if ((sum[(roundoff_bits - 1) / GMP_LIMB_BITS]
734 >> ((roundoff_bits - 1) % GMP_LIMB_BITS)) & 1)
736 /* Test bits roundoff_bits-1 .. 0. */
738 ((sum[(roundoff_bits - 1) / GMP_LIMB_BITS]
739 & (((mp_limb_t) 1 << ((roundoff_bits - 1) % GMP_LIMB_BITS)) - 1))
744 for (i = (roundoff_bits - 1) / GMP_LIMB_BITS - 1; i >= 0; i--)
752 /* Round to even. Test bit roundoff_bits. */
753 round_up = ((sum[roundoff_bits / GMP_LIMB_BITS]
754 >> (roundoff_bits % GMP_LIMB_BITS)) & 1);
761 /* Perform the rounding. */
763 size_t i = roundoff_bits / GMP_LIMB_BITS;
774 | (((mp_limb_t) 1 << (roundoff_bits % GMP_LIMB_BITS)) - 1))
778 /* Propagate carry. */
779 while (i < sum_len - 1)
787 /* sum[i] is the most significant limb that was
789 if (i == sum_len - 1 && (sum[i] & (sum[i] - 1)) == 0)
791 /* Through the carry, one more bit is needed. */
796 /* Instead of requiring one more limb of memory,
797 perform a shift by one bit, and adjust the
799 sum[i] = (mp_limb_t) 1 << (GMP_LIMB_BITS - 1);
802 /* The bit sequence has the form 1000...000. */
809 sum[i] &= ((mp_limb_t) -1 << (roundoff_bits % GMP_LIMB_BITS));
810 if (i == sum_len - 1 && sum[i] == 0)
811 /* The entire sum has become zero. */
817 (-1)^sign * 2^e * sum
818 and here we know that
819 2^(sum_bits-1) <= sum < 2^sum_bits,
820 and sum is a multiple of 2^(sum_bits-keep_bits), where
821 0 < keep_bits <= MANT_BIT and keep_bits <= sum_bits.
822 (If keep_bits was initially 0, the rounding either returned zero
823 or produced a bit sequence of the form 1000...000, setting
826 /* Split the keep_bits bits into chunks of at most 32 bits. */
827 unsigned int chunk_count = (keep_bits - 1) / GMP_LIMB_BITS + 1;
828 /* 1 <= chunk_count <= ceil (sum_bits / GMP_LIMB_BITS) = sum_len. */
829 static const DOUBLE chunk_multiplier = /* 2^-GMP_LIMB_BITS */
830 L_(1.0) / ((DOUBLE) (1 << (GMP_LIMB_BITS / 2))
831 * (DOUBLE) (1 << ((GMP_LIMB_BITS + 1) / 2)));
832 unsigned int shift = sum_bits % GMP_LIMB_BITS;
834 if (MANT_BIT <= GMP_LIMB_BITS)
836 /* Since keep_bits <= MANT_BIT <= GMP_LIMB_BITS,
837 chunk_count is 1. No need for a loop. */
839 fsum = (DOUBLE) sum[sum_len - 1];
842 ((sum[sum_len - 1] << (GMP_LIMB_BITS - shift))
843 | (sum_len >= 2 ? sum[sum_len - 2] >> shift : 0));
851 /* First loop round. */
852 fsum = (DOUBLE) sum[sum_len - k - 1];
856 fsum *= chunk_multiplier;
857 fsum += (DOUBLE) sum[sum_len - k - 1];
862 /* First loop round. */
864 ((sum[sum_len - k - 1] << (GMP_LIMB_BITS - shift))
865 | (sum_len >= k + 2 ? sum[sum_len - k - 2] >> shift : 0));
869 fsum *= chunk_multiplier;
871 ((sum[sum_len - k - 1] << (GMP_LIMB_BITS - shift))
872 | (sum[sum_len - k - 2] >> shift));
876 fsum = LDEXP (fsum, e + (int) sum_bits - GMP_LIMB_BITS);
877 return (sign ? - fsum : fsum);