@@ -1,5 +1,6 @@
libmvec-supported-funcs = cos \
exp \
+ exp10 \
exp2 \
log \
log10 \
@@ -18,6 +18,10 @@ libmvec {
_ZGVsMxv_sinf;
}
GLIBC_2.39 {
+ _ZGVnN4v_exp10f;
+ _ZGVnN2v_exp10;
+ _ZGVsMxv_exp10f;
+ _ZGVsMxv_exp10;
_ZGVnN4v_exp2f;
_ZGVnN2v_exp2;
_ZGVsMxv_exp2f;
@@ -51,6 +51,7 @@ typedef __SVBool_t __sv_bool_t;
__vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_exp10f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log10f (__f32x4_t);
@@ -60,6 +61,7 @@ __vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_exp10 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log10 (__f64x2_t);
@@ -74,6 +76,7 @@ __vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
__sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_exp10f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log10f (__sv_f32_t, __sv_bool_t);
@@ -83,6 +86,7 @@ __sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_exp10 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log10 (__sv_f64_t, __sv_bool_t);
new file mode 100644
@@ -0,0 +1,145 @@
+/* Double-precision vector (AdvSIMD) exp10 function.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+
+/* Value of |x| above which scale overflows without special treatment. */
+#define SpecialBound 306.0 /* floor (log10 (2^1023)) - 1. */
+/* Value of n above which scale overflows even with special treatment. */
+#define ScaleBound 163840.0 /* 1280.0 * N. */
+
+const static struct data
+{
+ float64x2_t poly[4];
+ float64x2_t log10_2, log2_10_hi, log2_10_lo, shift;
+#if !WANT_SIMD_EXCEPT
+ float64x2_t special_bound, scale_thresh;
+#endif
+} data = {
+ /* Coefficients generated using Remez algorithm.
+ rel error: 0x1.5ddf8f28p-54
+ abs error: 0x1.5ed266c8p-54 in [ -log10(2)/256, log10(2)/256 ]
+ maxerr: 1.14432 +0.5 ulp. */
+ .poly = { V2 (0x1.26bb1bbb5524p1), V2 (0x1.53524c73cecdap1),
+ V2 (0x1.047060efb781cp1), V2 (0x1.2bd76040f0d16p0) },
+ .log10_2 = V2 (0x1.a934f0979a371p8), /* N/log2(10). */
+ .log2_10_hi = V2 (0x1.34413509f79ffp-9), /* log2(10)/N. */
+ .log2_10_lo = V2 (-0x1.9dc1da994fd21p-66),
+ .shift = V2 (0x1.8p+52),
+#if !WANT_SIMD_EXCEPT
+ .scale_thresh = V2 (ScaleBound),
+ .special_bound = V2 (SpecialBound),
+#endif
+};
+
+#define N (1 << V_EXP_TABLE_BITS)
+#define IndexMask v_u64 (N - 1)
+
+#if WANT_SIMD_EXCEPT
+
+# define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511). */
+# define BigBound v_u64 (0x4070000000000000) /* asuint64 (0x1p8). */
+# define Thres v_u64 (0x2070000000000000) /* BigBound - TinyBound. */
+
+static inline float64x2_t VPCS_ATTR
+special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
+{
+ /* If fenv exceptions are to be triggered correctly, fall back to the scalar
+ routine for special lanes. */
+ return v_call_f64 (exp10, x, y, cmp);
+}
+
+#else
+
+# define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513. */
+/* SpecialBias1 + SpecialBias1 = asuint(1.0). */
+# define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769. */
+# define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254. */
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t s, float64x2_t y, float64x2_t n,
+ const struct data *d)
+{
+ /* 2^(n/N) may overflow, break it up into s1*s2. */
+ uint64x2_t b = vandq_u64 (vcltzq_f64 (n), SpecialOffset);
+ float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (SpecialBias1, b));
+ float64x2_t s2 = vreinterpretq_f64_u64 (
+ vaddq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (s), SpecialBias2), b));
+ uint64x2_t cmp = vcagtq_f64 (n, d->scale_thresh);
+ float64x2_t r1 = vmulq_f64 (s1, s1);
+ float64x2_t r0 = vmulq_f64 (vfmaq_f64 (s2, y, s2), s1);
+ return vbslq_f64 (cmp, r1, r0);
+}
+
+#endif
+
+/* Fast vector implementation of exp10.
+ Maximum measured error is 1.64 ulp.
+ _ZGVnN2v_exp10(0x1.ccd1c9d82cc8cp+0) got 0x1.f8dab6d7fed0cp+5
+ want 0x1.f8dab6d7fed0ap+5. */
+float64x2_t VPCS_ATTR V_NAME_D1 (exp10) (float64x2_t x)
+{
+ const struct data *d = ptr_barrier (&data);
+ uint64x2_t cmp;
+#if WANT_SIMD_EXCEPT
+ /* If any lanes are special, mask them with 1 and retain a copy of x to allow
+ special_case to fix special lanes later. This is only necessary if fenv
+ exceptions are to be triggered correctly. */
+ float64x2_t xm = x;
+ uint64x2_t iax = vreinterpretq_u64_f64 (vabsq_f64 (x));
+ cmp = vcgeq_u64 (vsubq_u64 (iax, TinyBound), Thres);
+ if (__glibc_unlikely (v_any_u64 (cmp)))
+ x = vbslq_f64 (cmp, v_f64 (1), x);
+#else
+ cmp = vcageq_f64 (x, d->special_bound);
+#endif
+
+ /* n = round(x/(log10(2)/N)). */
+ float64x2_t z = vfmaq_f64 (d->shift, x, d->log10_2);
+ uint64x2_t u = vreinterpretq_u64_f64 (z);
+ float64x2_t n = vsubq_f64 (z, d->shift);
+
+ /* r = x - n*log10(2)/N. */
+ float64x2_t r = x;
+ r = vfmsq_f64 (r, d->log2_10_hi, n);
+ r = vfmsq_f64 (r, d->log2_10_lo, n);
+
+ uint64x2_t e = vshlq_n_u64 (u, 52 - V_EXP_TABLE_BITS);
+ uint64x2_t i = vandq_u64 (u, IndexMask);
+
+ /* y = exp10(r) - 1 ~= C0 r + C1 r^2 + C2 r^3 + C3 r^4. */
+ float64x2_t r2 = vmulq_f64 (r, r);
+ float64x2_t p = vfmaq_f64 (d->poly[0], r, d->poly[1]);
+ float64x2_t y = vfmaq_f64 (d->poly[2], r, d->poly[3]);
+ p = vfmaq_f64 (p, y, r2);
+ y = vmulq_f64 (r, p);
+
+ /* s = 2^(n/N). */
+ u = v_lookup_u64 (__v_exp_data, i);
+ float64x2_t s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
+
+ if (__glibc_unlikely (v_any_u64 (cmp)))
+#if WANT_SIMD_EXCEPT
+ return special_case (xm, vfmaq_f64 (s, y, s), cmp);
+#else
+ return special_case (s, y, n, d);
+#endif
+
+ return vfmaq_f64 (s, y, s);
+}
new file mode 100644
@@ -0,0 +1,127 @@
+/* Double-precision vector (SVE) exp10 function.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+#include "poly_sve_f64.h"
+
+#define SpecialBound 307.0 /* floor (log10 (2^1023)). */
+
+static const struct data
+{
+ double poly[5];
+ double shift, log10_2, log2_10_hi, log2_10_lo, scale_thres, special_bound;
+} data = {
+ /* Coefficients generated using Remez algorithm.
+ rel error: 0x1.9fcb9b3p-60
+ abs error: 0x1.a20d9598p-60 in [ -log10(2)/128, log10(2)/128 ]
+ max ulp err 0.52 +0.5. */
+ .poly = { 0x1.26bb1bbb55516p1, 0x1.53524c73cd32ap1, 0x1.0470591daeafbp1,
+ 0x1.2bd77b1361ef6p0, 0x1.142b5d54e9621p-1 },
+ /* 1.5*2^46+1023. This value is further explained below. */
+ .shift = 0x1.800000000ffc0p+46,
+ .log10_2 = 0x1.a934f0979a371p1, /* 1/log2(10). */
+ .log2_10_hi = 0x1.34413509f79ffp-2, /* log2(10). */
+ .log2_10_lo = -0x1.9dc1da994fd21p-59,
+ .scale_thres = 1280.0,
+ .special_bound = SpecialBound,
+};
+
+#define SpecialOffset 0x6000000000000000 /* 0x1p513. */
+/* SpecialBias1 + SpecialBias1 = asuint(1.0). */
+#define SpecialBias1 0x7000000000000000 /* 0x1p769. */
+#define SpecialBias2 0x3010000000000000 /* 0x1p-254. */
+
+/* Update of both special and non-special cases, if any special case is
+ detected. */
+static inline svfloat64_t
+special_case (svbool_t pg, svfloat64_t s, svfloat64_t y, svfloat64_t n,
+ const struct data *d)
+{
+ /* s=2^n may overflow, break it up into s=s1*s2,
+ such that exp = s + s*y can be computed as s1*(s2+s2*y)
+ and s1*s1 overflows only if n>0. */
+
+ /* If n<=0 then set b to 0x6, 0 otherwise. */
+ svbool_t p_sign = svcmple (pg, n, 0.0); /* n <= 0. */
+ svuint64_t b = svdup_u64_z (p_sign, SpecialOffset);
+
+ /* Set s1 to generate overflow depending on sign of exponent n. */
+ svfloat64_t s1 = svreinterpret_f64 (svsubr_x (pg, b, SpecialBias1));
+ /* Offset s to avoid overflow in final result if n is below threshold. */
+ svfloat64_t s2 = svreinterpret_f64 (
+ svadd_x (pg, svsub_x (pg, svreinterpret_u64 (s), SpecialBias2), b));
+
+ /* |n| > 1280 => 2^(n) overflows. */
+ svbool_t p_cmp = svacgt (pg, n, d->scale_thres);
+
+ svfloat64_t r1 = svmul_x (pg, s1, s1);
+ svfloat64_t r2 = svmla_x (pg, s2, s2, y);
+ svfloat64_t r0 = svmul_x (pg, r2, s1);
+
+ return svsel (p_cmp, r1, r0);
+}
+
+/* Fast vector implementation of exp10 using FEXPA instruction.
+ Maximum measured error is 1.02 ulp.
+ SV_NAME_D1 (exp10)(-0x1.2862fec805e58p+2) got 0x1.885a89551d782p-16
+ want 0x1.885a89551d781p-16. */
+svfloat64_t SV_NAME_D1 (exp10) (svfloat64_t x, svbool_t pg)
+{
+ const struct data *d = ptr_barrier (&data);
+ svbool_t no_big_scale = svacle (pg, x, d->special_bound);
+ svbool_t special = svnot_z (pg, no_big_scale);
+
+ /* n = round(x/(log10(2)/N)). */
+ svfloat64_t shift = sv_f64 (d->shift);
+ svfloat64_t z = svmla_x (pg, shift, x, d->log10_2);
+ svfloat64_t n = svsub_x (pg, z, shift);
+
+ /* r = x - n*log10(2)/N. */
+ svfloat64_t log2_10 = svld1rq (svptrue_b64 (), &d->log2_10_hi);
+ svfloat64_t r = x;
+ r = svmls_lane (r, n, log2_10, 0);
+ r = svmls_lane (r, n, log2_10, 1);
+
+ /* scale = 2^(n/N), computed using FEXPA. FEXPA does not propagate NaNs, so
+ for consistent NaN handling we have to manually propagate them. This
+ comes at significant performance cost. */
+ svuint64_t u = svreinterpret_u64 (z);
+ svfloat64_t scale = svexpa (u);
+
+ /* Approximate exp10(r) using polynomial. */
+ svfloat64_t r2 = svmul_x (pg, r, r);
+ svfloat64_t y = svmla_x (pg, svmul_x (pg, r, d->poly[0]), r2,
+ sv_pairwise_poly_3_f64_x (pg, r, r2, d->poly + 1));
+
+ /* Assemble result as exp10(x) = 2^n * exp10(r). If |x| > SpecialBound
+ multiplication may overflow, so use special case routine. */
+ if (__glibc_unlikely (svptest_any (pg, special)))
+ {
+ /* FEXPA zeroes the sign bit, however the sign is meaningful to the
+ special case function so needs to be copied.
+ e = sign bit of u << 46. */
+ svuint64_t e = svand_x (pg, svlsl_x (pg, u, 46), 0x8000000000000000);
+ /* Copy sign to scale. */
+ scale = svreinterpret_f64 (svadd_x (pg, e, svreinterpret_u64 (scale)));
+ return special_case (pg, scale, y, n, d);
+ }
+
+ /* No special case. */
+ return svmla_x (pg, scale, scale, y);
+}
new file mode 100644
@@ -0,0 +1,140 @@
+/* Single-precision vector (AdvSIMD) exp10 function.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+#include "poly_advsimd_f32.h"
+
+#define ScaleBound 192.0f
+
+static const struct data
+{
+ float32x4_t poly[5];
+ float32x4_t shift, log10_2, log2_10_hi, log2_10_lo;
+#if !WANT_SIMD_EXCEPT
+ float32x4_t scale_thresh;
+#endif
+} data = {
+ /* Coefficients generated using Remez algorithm with minimisation of relative
+ error.
+ rel error: 0x1.89dafa3p-24
+ abs error: 0x1.167d55p-23 in [-log10(2)/2, log10(2)/2]
+ maxerr: 1.85943 +0.5 ulp. */
+ .poly = { V4 (0x1.26bb16p+1f), V4 (0x1.5350d2p+1f), V4 (0x1.04744ap+1f),
+ V4 (0x1.2d8176p+0f), V4 (0x1.12b41ap-1f) },
+ .shift = V4 (0x1.8p23f),
+ .log10_2 = V4 (0x1.a934fp+1),
+ .log2_10_hi = V4 (0x1.344136p-2),
+ .log2_10_lo = V4 (-0x1.ec10cp-27),
+#if !WANT_SIMD_EXCEPT
+ .scale_thresh = V4 (ScaleBound)
+#endif
+};
+
+#define ExponentBias v_u32 (0x3f800000)
+
+#if WANT_SIMD_EXCEPT
+
+# define SpecialBound 38.0f /* rint(log10(2^127)). */
+# define TinyBound v_u32 (0x20000000) /* asuint (0x1p-63). */
+# define BigBound v_u32 (0x42180000) /* asuint (SpecialBound). */
+# define Thres v_u32 (0x22180000) /* BigBound - TinyBound. */
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
+{
+ /* If fenv exceptions are to be triggered correctly, fall back to the scalar
+ routine to special lanes. */
+ return v_call_f32 (exp10f, x, y, cmp);
+}
+
+#else
+
+# define SpecialBound 126.0f /* rint (log2 (2^127 / (1 + sqrt (2)))). */
+# define SpecialOffset v_u32 (0x82000000)
+# define SpecialBias v_u32 (0x7f000000)
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t poly, float32x4_t n, uint32x4_t e, uint32x4_t cmp1,
+ float32x4_t scale, const struct data *d)
+{
+ /* 2^n may overflow, break it up into s1*s2. */
+ uint32x4_t b = vandq_u32 (vclezq_f32 (n), SpecialOffset);
+ float32x4_t s1 = vreinterpretq_f32_u32 (vaddq_u32 (b, SpecialBias));
+ float32x4_t s2 = vreinterpretq_f32_u32 (vsubq_u32 (e, b));
+ uint32x4_t cmp2 = vcagtq_f32 (n, d->scale_thresh);
+ float32x4_t r2 = vmulq_f32 (s1, s1);
+ float32x4_t r1 = vmulq_f32 (vfmaq_f32 (s2, poly, s2), s1);
+ /* Similar to r1 but avoids double rounding in the subnormal range. */
+ float32x4_t r0 = vfmaq_f32 (scale, poly, scale);
+ float32x4_t r = vbslq_f32 (cmp1, r1, r0);
+ return vbslq_f32 (cmp2, r2, r);
+}
+
+#endif
+
+/* Fast vector implementation of single-precision exp10.
+ Algorithm is accurate to 2.36 ULP.
+ _ZGVnN4v_exp10f(0x1.be2b36p+1) got 0x1.7e79c4p+11
+ want 0x1.7e79cp+11. */
+float32x4_t VPCS_ATTR V_NAME_F1 (exp10) (float32x4_t x)
+{
+ const struct data *d = ptr_barrier (&data);
+#if WANT_SIMD_EXCEPT
+ /* asuint(x) - TinyBound >= BigBound - TinyBound. */
+ uint32x4_t cmp = vcgeq_u32 (
+ vsubq_u32 (vandq_u32 (vreinterpretq_u32_f32 (x), v_u32 (0x7fffffff)),
+ TinyBound),
+ Thres);
+ float32x4_t xm = x;
+ /* If any lanes are special, mask them with 1 and retain a copy of x to allow
+ special case handler to fix special lanes later. This is only necessary if
+ fenv exceptions are to be triggered correctly. */
+ if (__glibc_unlikely (v_any_u32 (cmp)))
+ x = vbslq_f32 (cmp, v_f32 (1), x);
+#endif
+
+ /* exp10(x) = 2^n * 10^r = 2^n * (1 + poly (r)),
+ with poly(r) in [1/sqrt(2), sqrt(2)] and
+ x = r + n * log10 (2), with r in [-log10(2)/2, log10(2)/2]. */
+ float32x4_t z = vfmaq_f32 (d->shift, x, d->log10_2);
+ float32x4_t n = vsubq_f32 (z, d->shift);
+ float32x4_t r = vfmsq_f32 (x, n, d->log2_10_hi);
+ r = vfmsq_f32 (r, n, d->log2_10_lo);
+ uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_f32 (z), 23);
+
+ float32x4_t scale = vreinterpretq_f32_u32 (vaddq_u32 (e, ExponentBias));
+
+#if !WANT_SIMD_EXCEPT
+ uint32x4_t cmp = vcagtq_f32 (n, v_f32 (SpecialBound));
+#endif
+
+ float32x4_t r2 = vmulq_f32 (r, r);
+ float32x4_t poly
+ = vfmaq_f32 (vmulq_f32 (r, d->poly[0]),
+ v_pairwise_poly_3_f32 (r, r2, d->poly + 1), r2);
+
+ if (__glibc_unlikely (v_any_u32 (cmp)))
+#if WANT_SIMD_EXCEPT
+ return special_case (xm, vfmaq_f32 (scale, poly, scale), cmp);
+#else
+ return special_case (poly, n, e, cmp, scale, d);
+#endif
+
+ return vfmaq_f32 (scale, poly, scale);
+}
new file mode 100644
@@ -0,0 +1,91 @@
+/* Single-precision vector (SVE) exp10 function.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+#include "poly_sve_f32.h"
+
+/* For x < -SpecialBound, the result is subnormal and not handled correctly by
+ FEXPA. */
+#define SpecialBound 37.9
+
+static const struct data
+{
+ float poly[5];
+ float shift, log10_2, log2_10_hi, log2_10_lo, special_bound;
+} data = {
+ /* Coefficients generated using Remez algorithm with minimisation of relative
+ error.
+ rel error: 0x1.89dafa3p-24
+ abs error: 0x1.167d55p-23 in [-log10(2)/2, log10(2)/2]
+ maxerr: 0.52 +0.5 ulp. */
+ .poly = { 0x1.26bb16p+1f, 0x1.5350d2p+1f, 0x1.04744ap+1f, 0x1.2d8176p+0f,
+ 0x1.12b41ap-1f },
+ /* 1.5*2^17 + 127, a shift value suitable for FEXPA. */
+ .shift = 0x1.903f8p17f,
+ .log10_2 = 0x1.a934fp+1,
+ .log2_10_hi = 0x1.344136p-2,
+ .log2_10_lo = -0x1.ec10cp-27,
+ .special_bound = SpecialBound,
+};
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t special)
+{
+ return sv_call_f32 (exp10f, x, y, special);
+}
+
+/* Single-precision SVE exp10f routine. Implements the same algorithm
+ as AdvSIMD exp10f.
+ Worst case error is 1.02 ULPs.
+ _ZGVsMxv_exp10f(-0x1.040488p-4) got 0x1.ba5f9ep-1
+ want 0x1.ba5f9cp-1. */
+svfloat32_t SV_NAME_F1 (exp10) (svfloat32_t x, const svbool_t pg)
+{
+ const struct data *d = ptr_barrier (&data);
+ /* exp10(x) = 2^(n/N) * 10^r = 2^n * (1 + poly (r)),
+ with poly(r) in [1/sqrt(2), sqrt(2)] and
+ x = r + n * log10(2) / N, with r in [-log10(2)/2N, log10(2)/2N]. */
+
+ /* Load some constants in quad-word chunks to minimise memory access (last
+ lane is wasted). */
+ svfloat32_t log10_2_and_inv = svld1rq (svptrue_b32 (), &d->log10_2);
+
+ /* n = round(x/(log10(2)/N)). */
+ svfloat32_t shift = sv_f32 (d->shift);
+ svfloat32_t z = svmla_lane (shift, x, log10_2_and_inv, 0);
+ svfloat32_t n = svsub_x (pg, z, shift);
+
+ /* r = x - n*log10(2)/N. */
+ svfloat32_t r = svmls_lane (x, n, log10_2_and_inv, 1);
+ r = svmls_lane (r, n, log10_2_and_inv, 2);
+
+ svbool_t special = svacgt (pg, x, d->special_bound);
+ svfloat32_t scale = svexpa (svreinterpret_u32 (z));
+
+ /* Polynomial evaluation: poly(r) ~ exp10(r)-1. */
+ svfloat32_t r2 = svmul_x (pg, r, r);
+ svfloat32_t poly
+ = svmla_x (pg, svmul_x (pg, r, d->poly[0]),
+ sv_pairwise_poly_3_f32_x (pg, r, r2, d->poly + 1), r2);
+
+ if (__glibc_unlikely (svptest_any (pg, special)))
+ return special_case (x, svmla_x (pg, scale, scale, poly), special);
+
+ return svmla_x (pg, scale, scale, poly);
+}
@@ -25,6 +25,7 @@
VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp)
+VPCS_VECTOR_WRAPPER (exp10_advsimd, _ZGVnN2v_exp10)
VPCS_VECTOR_WRAPPER (exp2_advsimd, _ZGVnN2v_exp2)
VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
VPCS_VECTOR_WRAPPER (log10_advsimd, _ZGVnN2v_log10)
@@ -34,6 +34,7 @@
SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp)
+SVE_VECTOR_WRAPPER (exp10_sve, _ZGVsMxv_exp10)
SVE_VECTOR_WRAPPER (exp2_sve, _ZGVsMxv_exp2)
SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
SVE_VECTOR_WRAPPER (log10_sve, _ZGVsMxv_log10)
@@ -25,6 +25,7 @@
VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf)
+VPCS_VECTOR_WRAPPER (exp10f_advsimd, _ZGVnN4v_exp10f)
VPCS_VECTOR_WRAPPER (exp2f_advsimd, _ZGVnN4v_exp2f)
VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
VPCS_VECTOR_WRAPPER (log10f_advsimd, _ZGVnN4v_log10f)
@@ -34,6 +34,7 @@
SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf)
+SVE_VECTOR_WRAPPER (exp10f_sve, _ZGVsMxv_exp10f)
SVE_VECTOR_WRAPPER (exp2f_sve, _ZGVsMxv_exp2f)
SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
SVE_VECTOR_WRAPPER (log10f_sve, _ZGVsMxv_log10f)
@@ -970,11 +970,19 @@ double: 2
float: 1
ldouble: 2
+Function: "exp10_advsimd":
+double: 1
+float: 2
+
Function: "exp10_downward":
double: 2
float: 1
ldouble: 3
+Function: "exp10_sve":
+double: 1
+float: 1
+
Function: "exp10_towardzero":
double: 2
float: 1
@@ -14,14 +14,18 @@ GLIBC_2.38 _ZGVsMxv_log F
GLIBC_2.38 _ZGVsMxv_logf F
GLIBC_2.38 _ZGVsMxv_sin F
GLIBC_2.38 _ZGVsMxv_sinf F
+GLIBC_2.39 _ZGVnN2v_exp10 F
GLIBC_2.39 _ZGVnN2v_exp2 F
GLIBC_2.39 _ZGVnN2v_log10 F
GLIBC_2.39 _ZGVnN2v_log2 F
GLIBC_2.39 _ZGVnN2v_tan F
+GLIBC_2.39 _ZGVnN4v_exp10f F
GLIBC_2.39 _ZGVnN4v_exp2f F
GLIBC_2.39 _ZGVnN4v_log10f F
GLIBC_2.39 _ZGVnN4v_log2f F
GLIBC_2.39 _ZGVnN4v_tanf F
+GLIBC_2.39 _ZGVsMxv_exp10 F
+GLIBC_2.39 _ZGVsMxv_exp10f F
GLIBC_2.39 _ZGVsMxv_exp2 F
GLIBC_2.39 _ZGVsMxv_exp2f F
GLIBC_2.39 _ZGVsMxv_log10 F