2012-10-31 Tobias Burnus <burnus@net-b.de>
Joseph Myers <joseph@codesourcery.com>
David S. Miller <davem@davemloft.net>
Ulrich Drepper <drepper@redhat.com>
Marek Polacek <polacek@redhat.com>:
Petr Baudis <pasky@suse.cz>
* math/expm1q.c (expm1q): Changes from GLIBC. Use expq for
large parameters. Fix errno for boundary conditions.
* math/finiteq.c (finiteq): Add comment.
* math/fmaq.c (fmaq): Changes from GLIBC. Fix missing underflows
and bad results for some subnormal results. Fix sign of inexact
zero return. Fix sign of exact zero return.
Ensure additions are not scheduled after fetestexcept.
* math/jnq.c (jnq): Changes from GLIBC. Set up errno properly
for ynq. Fix jnq precision.
* math/nearbyintq.c (nearbyintq): Changes from GLIBC. Do not
manipulate bits before adding and subtracting TWO112[sx].
* math/rintq.c (rintq): Ditto.
* math/scalbnq.c (scalbnq): Changes from GLIBC. Fix integer
overflow.
@@ -53,6 +53,7 @@
+#include <errno.h>
#include "quadmath-imp.h"
/* exp(x) - 1 = x + 0.5 x^2 + x^3 P(x)/Q(x)
@@ -100,6 +101,11 @@ expm1q (__float128 x)
ix = u.words32.w0;
sign = ix & 0x80000000;
ix &= 0x7fffffff;
+ if (!sign && ix >= 0x40060000)
+ {
+ /* If num is positive and exp >= 6 use plain exp. */
+ return expq (x);
+ }
if (ix >= 0x7fff0000)
{
/* Infinity. */
@@ -120,7 +126,10 @@ expm1q (__float128 x)
/* Overflow. */
if (x > maxlog)
- return (HUGE_VALQ * HUGE_VALQ);
+ {
+ __set_errno (ERANGE);
+ return (HUGE_VALQ * HUGE_VALQ);
+ }
/* Minimum value. */
if (x < minarg)
@@ -15,6 +15,11 @@
#include "quadmath-imp.h"
+/*
+ * finiteq(x) returns 1 is x is finite, else 0;
+ * no branching!
+ */
+
int
finiteq (const __float128 x)
{
@@ -1,5 +1,5 @@
/* Compute x * y + z as ternary operation.
- Copyright (C) 2010 Free Software Foundation, Inc.
+ Copyright (C) 2010-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
@@ -57,6 +57,11 @@ fmaq (__float128 x, __float128 y, __float128 z)
&& u.ieee.exponent != 0x7fff
&& v.ieee.exponent != 0x7fff)
return (z + x) + y;
+ /* If z is zero and x are y are nonzero, compute the result
+ as x * y to avoid the wrong sign of a zero result if x * y
+ underflows to 0. */
+ if (z == 0 && x != 0 && y != 0)
+ return x * y;
/* If x or y or z is Inf/NaN, or if fma will certainly overflow,
or if x * y is less than half of FLT128_DENORM_MIN,
compute as x * y + z. */
@@ -136,6 +141,11 @@ fmaq (__float128 x, __float128 y, __float128 z)
y = v.value;
z = w.value;
}
+
+ /* Ensure correct sign of exact 0 + 0. */
+ if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
+ return x * y + z;
+
/* Multiplication m1 + m2 = x * y using Dekker's algorithm. */
#define C ((1LL << (FLT128_MANT_DIG + 1) / 2) + 1)
__float128 x1 = x * C;
@@ -191,7 +201,7 @@ fmaq (__float128 x, __float128 y, __float128 z)
#endif
v.value = a1 + u.value;
/* Ensure the addition is not scheduled after fetestexcept call. */
- asm volatile ("" : : "m" (v));
+ asm volatile ("" : : "m" (v.value));
#ifdef USE_FENV_H
int j = fetestexcept (FE_INEXACT) != 0;
feupdateenv (&env);
@@ -220,20 +230,14 @@ fmaq (__float128 x, __float128 y, __float128 z)
{
/* v.ieee.mant_low & 2 is LSB bit of the result before rounding,
v.ieee.mant_low & 1 is the round bit and j is our sticky
- bit. In round-to-nearest 001 rounds down like 00,
- 011 rounds up, even though 01 rounds down (thus we need
- to adjust), 101 rounds down like 10 and 111 rounds up
- like 11. */
- if ((v.ieee.mant_low & 3) == 1)
- {
- v.value *= 0x1p-226Q;
- if (v.ieee.negative)
- return v.value - 0x1p-16494Q /* __FLT128_DENORM_MIN__ */;
- else
- return v.value + 0x1p-16494Q /* __FLT128_DENORM_MIN__ */;
- }
- else
- return v.value * 0x1p-226Q;
+ bit. */
+ w.value = 0.0Q;
+ w.ieee.mant_low = ((v.ieee.mant_low & 3) << 1) | j;
+ w.ieee.negative = v.ieee.negative;
+ v.ieee.mant_low &= ~3U;
+ v.value *= 0x1p-226Q;
+ w.value *= 0x1p-2Q;
+ return v.value + w.value;
}
v.ieee.mant_low |= j;
return v.value * 0x1p-226Q;
@@ -11,9 +11,9 @@
/* Modifications for 128-bit long double are
Copyright (C) 2001 Stephen L. Moshier <moshier@na-net.ornl.gov>
- and are incorporated herein by permission of the author. The author
+ and are incorporated herein by permission of the author. The author
reserves the right to distribute this material elsewhere under different
- copying permissions. These modifications are distributed here under
+ copying permissions. These modifications are distributed here under
the following terms:
This library is free software; you can redistribute it and/or
@@ -56,6 +56,7 @@
*
*/
+#include <errno.h>
#include "quadmath-imp.h"
static const __float128
@@ -273,7 +274,16 @@ jnq (int n, __float128 x)
}
}
}
- b = (t * j0q (x) / b);
+ /* j0() and j1() suffer enormous loss of precision at and
+ * near zero; however, we know that their zero points never
+ * coincide, so just choose the one further away from zero.
+ */
+ z = j0q (x);
+ w = j1q (x);
+ if (fabsq (z) >= fabsq (w))
+ b = (t * z / b);
+ else
+ b = (t * w / a);
}
}
if (sgn == 1)
@@ -374,6 +384,9 @@ ynq (int n, __float128 x)
a = temp;
}
}
+ /* If B is +-Inf, set up errno accordingly. */
+ if (! finiteq (b))
+ __set_errno (ERANGE);
if (sign > 0)
return b;
else
@@ -44,18 +44,13 @@ nearbyintq(__float128 x)
fenv_t env;
#endif
int64_t i0,j0,sx;
- uint64_t i,i1;
+ uint64_t i1;
__float128 w,t;
GET_FLT128_WORDS64(i0,i1,x);
sx = (((uint64_t)i0)>>63);
j0 = ((i0>>48)&0x7fff)-0x3fff;
- if(j0<48) {
+ if(j0<112) {
if(j0<0) {
- if(((i0&0x7fffffffffffffffLL)|i1)==0) return x;
- i1 |= (i0&0x0000ffffffffffffLL);
- i0 &= 0xffffe00000000000ULL;
- i0 |= ((i1|-i1)>>16)&0x0000800000000000LL;
- SET_FLT128_MSW64(x,i0);
#ifdef USE_FENV_H
feholdexcept (&env);
#endif
@@ -67,25 +62,11 @@ nearbyintq(__float128 x)
GET_FLT128_MSW64(i0,t);
SET_FLT128_MSW64(t,(i0&0x7fffffffffffffffLL)|(sx<<63));
return t;
- } else {
- i = (0x0000ffffffffffffLL)>>j0;
- if(((i0&i)|i1)==0) return x; /* x is integral */
- i>>=1;
- if(((i0&i)|i1)!=0) {
- if(j0==47) i1 = 0x4000000000000000ULL; else
- i0 = (i0&(~i))|((0x0000200000000000LL)>>j0);
- }
}
- } else if (j0>111) {
+ } else {
if(j0==0x4000) return x+x; /* inf or NaN */
else return x; /* x is integral */
- } else {
- i = -1ULL>>(j0-48);
- if((i1&i)==0) return x; /* x is integral */
- i>>=1;
- if((i1&i)!=0) i1 = (i1&(~i))|((0x4000000000000000LL)>>(j0-48));
}
- SET_FLT128_WORDS64(x,i0,i1);
#ifdef USE_FENV_H
feholdexcept (&env);
#endif
@@ -13,6 +13,16 @@
* ====================================================
*/
+/*
+ * rintq(x)
+ * Return x rounded to integral value according to the prevailing
+ * rounding mode.
+ * Method:
+ * Using floating addition.
+ * Exception:
+ * Inexact flag raised if x not equal to rintq(x).
+ */
+
#include "quadmath-imp.h"
static const __float128
@@ -32,35 +42,16 @@ rintq (__float128 x)
j0 = ((i0>>48)&0x7fff)-0x3fff;
if(j0<48) {
if(j0<0) {
- if(((i0&0x7fffffffffffffffLL)|i1)==0) return x;
- i1 |= (i0&0x0000ffffffffffffLL);
- i0 &= 0xffffe00000000000ULL;
- i0 |= ((i1|-i1)>>16)&0x0000800000000000LL;
- SET_FLT128_MSW64(x,i0);
w = TWO112[sx]+x;
t = w-TWO112[sx];
GET_FLT128_MSW64(i0,t);
SET_FLT128_MSW64(t,(i0&0x7fffffffffffffffLL)|(sx<<63));
return t;
- } else {
- i = (0x0000ffffffffffffLL)>>j0;
- if(((i0&i)|i1)==0) return x; /* x is integral */
- i>>=1;
- if(((i0&i)|i1)!=0) {
- if(j0==47) i1 = 0x4000000000000000ULL; else
- i0 = (i0&(~i))|((0x0000200000000000LL)>>j0);
- }
}
- } else if (j0>111) {
+ } else {
if(j0==0x4000) return x+x; /* inf or NaN */
else return x; /* x is integral */
- } else {
- i = -1ULL>>(j0-48);
- if((i1&i)==0) return x; /* x is integral */
- i>>=1;
- if((i1&i)!=0) i1 = (i1&(~i))|((0x4000000000000000LL)>>(j0-48));
}
- SET_FLT128_WORDS64(x,i0,i1);
w = TWO112[sx]+x;
return w-TWO112[sx];
}
@@ -13,6 +13,13 @@
* ====================================================
*/
+/*
+ * scalblnq (_float128 x, long int n)
+ * scalblnq(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
#include "quadmath-imp.h"
static const __float128
@@ -34,10 +41,12 @@ scalblnq (__float128 x, long int n)
k = ((hx>>48)&0x7fff) - 114;
}
if (k==0x7fff) return x+x; /* NaN or Inf */
- k = k+n;
- if (n> 50000 || k > 0x7ffe)
- return huge*copysignq(huge,x); /* overflow */
if (n< -50000) return tiny*copysignq(tiny,x); /*underflow*/
+ if (n> 50000 || k+n > 0x7ffe)
+ return huge*copysignq(huge,x); /* overflow */
+ /* Now k and n are bounded we know that k = k+n does not
+ overflow. */
+ k = k+n;
if (k > 0) /* normal result */
{SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48)); return x;}
if (k <= -114)
@@ -13,6 +13,14 @@
* ====================================================
*/
+
+/*
+ * scalbnq (__float128 x, int n)
+ * scalbnq(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
#include "quadmath-imp.h"
static const __float128
@@ -34,10 +42,12 @@ scalbnq (__float128 x, int n)
k = ((hx>>48)&0x7fff) - 114;
}
if (k==0x7fff) return x+x; /* NaN or Inf */
- k = k+n;
- if (n> 50000 || k > 0x7ffe)
- return huge*copysignq(huge,x); /* overflow */
if (n< -50000) return tiny*copysignq(tiny,x); /*underflow*/
+ if (n> 50000 || k+n > 0x7ffe)
+ return huge*copysignq(huge,x); /* overflow */
+ /* Now k and n are bounded we know that k = k+n does not
+ overflow. */
+ k = k+n;
if (k > 0) /* normal result */
{SET_FLT128_MSW64(x,(hx&0x8000ffffffffffffULL)|(k<<48)); return x;}
if (k <= -114)