new file mode 100644
@@ -0,0 +1,290 @@
+/*
+ * QEMU TILE-Gx helpers
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#include "fpu.h"
+
+#define TILEGX_F_MAN_HBIT (1ULL << 59)
+
+#pragma pack(push, 1)
+typedef union F64Fmt {
+ float64 d;
+ struct {
+#if defined(HOST_WORDS_BIGENDIAN)
+ uint64_t sign : 1;
+ uint64_t exp : 11;
+ uint64_t frac : 52;
+#else
+ uint64_t frac : 52;
+ uint64_t exp : 11;
+ uint64_t sign : 1;
+#endif
+ } bits;
+} F64Fmt;
+#pragma pack(pop)
+
+static uint64_t fr_to_man(F64Fmt v)
+{
+ uint64_t val = (uint64_t)v.bits.frac << 7;
+
+ if (v.bits.exp)
+ val |= TILEGX_F_MAN_HBIT;
+
+ return val;
+}
+
+uint64_t helper_fdouble_unpack_min(CPUTLGState *env,
+ uint64_t srca, uint64_t srcb)
+{
+ F64Fmt va, vb;
+ TileGXFPDFmtV v;
+
+ va.d = make_float64(srca);
+ vb.d = make_float64(srcb);
+ v.ll = 0; /* also cause v.fmt.overflow = 0 */
+
+ if (float64_is_any_nan(srca) || float64_is_any_nan(srcb)
+ || float64_is_infinity(srca) || float64_is_infinity(srcb))
+ ;
+ else if (va.bits.exp> vb.bits.exp) {
+ if (va.bits.exp - vb.bits.exp < 64)
+ v.fmt.mantissa = fr_to_man(vb)>> (va.bits.exp - vb.bits.exp);
+ } else if (va.bits.exp < vb.bits.exp) {
+ if (vb.bits.exp - va.bits.exp < 64)
+ v.fmt.mantissa = fr_to_man(va)>> (vb.bits.exp - va.bits.exp);
+ } else if (va.bits.frac> vb.bits.frac)
+ v.fmt.mantissa = fr_to_man(vb);
+ else
+ v.fmt.mantissa = fr_to_man(va);
+
+ return v.ll;
+}
+
+uint64_t helper_fdouble_unpack_max(CPUTLGState *env,
+ uint64_t srca, uint64_t srcb)
+{
+ F64Fmt va, vb;
+ TileGXFPDFmtV v;
+
+ va.d = make_float64(srca);
+ vb.d = make_float64(srcb);
+ v.ll = 0; /* also cause v.fmt.overflow = 0 */
+
+ if (float64_is_any_nan(srca) || float64_is_any_nan(srcb)
+ || float64_is_infinity(srca) || float64_is_infinity(srcb))
+ ;
+ else if (va.bits.exp> vb.bits.exp)
+ v.fmt.mantissa = fr_to_man(va);
+ else if (va.bits.exp < vb.bits.exp)
+ v.fmt.mantissa = fr_to_man(vb);
+ else if (va.bits.frac> vb.bits.frac)
+ v.fmt.mantissa = fr_to_man(va);
+ else
+ v.fmt.mantissa = fr_to_man(vb);
+
+ return v.ll;
+}
+
+uint64_t helper_fdouble_addsub(CPUTLGState *env,
+ uint64_t dest, uint64_t srca, uint64_t srcb)
+{
+ TileGXFPDFmtF flags;
+ TileGXFPDFmtV v;
+
+ flags.ll = srcb;
+ if (flags.fmt.calc == TILEGX_F_CALC_ADD) {
+ v.ll = dest + srca; /* maybe set addsub overflow bit */
+ } else
+ v.ll = dest - srca;
+
+ return v.ll;
+}
+
+/* absolute-add/mul may cause add/mul carry or overflow */
+static bool proc_oflow(TileGXFPDFmtF *flags, TileGXFPDFmtV *v, uint64_t *srcb)
+{
+ if (v->fmt.overflow) {
+ flags->fmt.vexp++;
+ *srcb>>= 1;
+ *srcb |= (uint64_t)v->ll << 63;
+ v->ll>>= 1;
+ v->fmt.overflow = 0;
+ }
+ return flags->fmt.vexp> TILEGX_F_EXP_DMAX;
+}
+
+uint64_t helper_fdouble_pack2(CPUTLGState *env,
+ uint64_t dest, uint64_t srca, uint64_t srcb)
+{
+ TileGXFPDFmtF flags;
+ TileGXFPDFmtV v;
+ F64Fmt d;
+
+ flags.ll = dest;
+ v.ll = srca;
+
+ d.d = 0;
+ d.bits.sign = flags.fmt.sign;
+
+ /*
+ * fdouble_add_flags, fdouble_sub_flags, or fdouble_mul_flags have
+ * processed exceptions. So need not process fp_status, again.
+ */
+
+ if (flags.fmt.nan)
+ return float64_val(float64_default_nan);
+ else if (flags.fmt.inf)
+ return float64_val(d.d |= float64_infinity);
+
+ /* absolute-mul needs left shift 4 + 1 bytes to match the real mantissa */
+ if (flags.fmt.calc == TILEGX_F_CALC_MUL) {
+ v.ll <<= 5;
+ v.ll |= srcb>> 59;
+ srcb <<= 5;
+ }
+
+ if (flags.fmt.vexp & TILEGX_F_EXP_DUF) /* must check underflow, firstly */
+ return float64_val(d.d);
+
+ if (proc_oflow(&flags, &v, &srcb))
+ return float64_val(d.d |= float64_infinity);
+
+ while (!(v.fmt.mantissa & TILEGX_F_MAN_HBIT) && (v.fmt.mantissa | srcb)) {
+ flags.fmt.vexp--;
+ v.fmt.mantissa <<= 1;
+ v.fmt.mantissa |= srcb>> 63;
+ srcb <<= 1;
+ }
+
+ /* check underflow, again, after format */
+ if ((flags.fmt.vexp & TILEGX_F_EXP_DUF) || !v.fmt.mantissa)
+ return float64_val(d.d);
+
+ if (flags.fmt.sign)
+ d.d = int64_to_float64(0 - (int64_t)v.fmt.mantissa, &env->fp_status);
+ else
+ d.d = uint64_to_float64((uint64_t)v.fmt.mantissa, &env->fp_status);
+
+ if (d.bits.exp == 59 + TILEGX_F_EXP_DZERO)
+ d.bits.exp = flags.fmt.vexp;
+ else { /* for carry and overflow again */
+ d.bits.exp = flags.fmt.vexp + 1;
+ if (d.bits.exp == TILEGX_F_EXP_DMAX)
+ d.d = float64_infinity;
+ }
+
+ d.bits.sign = flags.fmt.sign;
+
+ return float64_val(d.d);
+}
+
+static void ana_bits(float_status *fp_status,
+ float64 fsrca, float64 fsrcb, TileGXFPDFmtF *dfmt)
+{
+ if (float64_eq(fsrca, fsrcb, fp_status)) {
+ dfmt->fmt.eq = 1;
+ } else {
+ dfmt->fmt.neq = 1;
+ }
+
+ if (float64_lt(fsrca, fsrcb, fp_status)) {
+ dfmt->fmt.lt = 1;
+ }
+ if (float64_le(fsrca, fsrcb, fp_status)) {
+ dfmt->fmt.le = 1;
+ }
+
+ if (float64_lt(fsrcb, fsrca, fp_status)) {
+ dfmt->fmt.gt = 1;
+ }
+ if (float64_le(fsrcb, fsrca, fp_status)) {
+ dfmt->fmt.ge = 1;
+ }
+
+ if (float64_unordered(fsrca, fsrcb, fp_status)) {
+ dfmt->fmt.unordered = 1;
+ }
+}
+
+static uint64_t main_calc(float_status *fp_status,
+ float64 fsrca, float64 fsrcb,
+ float64 (*calc)(float64, float64, float_status *))
+{
+ F64Fmt va, vb, vf;
+ TileGXFPDFmtF flags;
+
+ flags.ll = 0;
+ ana_bits(fp_status, fsrca, fsrcb, &flags);
+
+ vf.d = calc(fsrca, fsrcb, fp_status); /* also check exceptions */
+ flags.fmt.sign = vf.bits.sign;
+
+ va.d = fsrca;
+ vb.d = fsrcb;
+ if (float64_is_any_nan(vf.d))
+ flags.fmt.nan = 1;
+ else if (float64_is_infinity(vf.d))
+ flags.fmt.inf = 1;
+ else if (calc == float64_add) {
+ flags.fmt.vexp = (va.bits.exp> vb.bits.exp)
+ ? va.bits.exp : vb.bits.exp;
+ flags.fmt.calc = (va.bits.sign == vb.bits.sign)
+ ? TILEGX_F_CALC_ADD : TILEGX_F_CALC_SUB;
+
+ } else if (calc == float64_sub) {
+ flags.fmt.vexp = (va.bits.exp> vb.bits.exp)
+ ? va.bits.exp : vb.bits.exp;
+ flags.fmt.calc = (va.bits.sign != vb.bits.sign)
+ ? TILEGX_F_CALC_ADD : TILEGX_F_CALC_SUB;
+
+ } else {
+ flags.fmt.vexp = (int64_t)(va.bits.exp - TILEGX_F_EXP_DZERO)
+ + (int64_t)(vb.bits.exp - TILEGX_F_EXP_DZERO)
+ + TILEGX_F_EXP_DZERO;
+ flags.fmt.calc = TILEGX_F_CALC_MUL;
+ }
+
+ return flags.ll;
+}
+
+uint64_t helper_fdouble_add_flags(CPUTLGState *env,
+ uint64_t srca, uint64_t srcb)
+{
+ return main_calc(&env->fp_status,
+ make_float64(srca), make_float64(srcb), float64_add);
+}
+
+uint64_t helper_fdouble_sub_flags(CPUTLGState *env,
+ uint64_t srca, uint64_t srcb)
+{
+ return main_calc(&env->fp_status,
+ make_float64(srca), make_float64(srcb), float64_sub);
+}
+
+uint64_t helper_fdouble_mul_flags(CPUTLGState *env,
+ uint64_t srca, uint64_t srcb)
+{
+ return main_calc(&env->fp_status,
+ make_float64(srca), make_float64(srcb), float64_mul);
+}