@@ -1890,6 +1890,44 @@ INSN_LASX(xvmini_hu, vv_i)
INSN_LASX(xvmini_wu, vv_i)
INSN_LASX(xvmini_du, vv_i)
+INSN_LASX(xvmul_b, vvv)
+INSN_LASX(xvmul_h, vvv)
+INSN_LASX(xvmul_w, vvv)
+INSN_LASX(xvmul_d, vvv)
+INSN_LASX(xvmuh_b, vvv)
+INSN_LASX(xvmuh_h, vvv)
+INSN_LASX(xvmuh_w, vvv)
+INSN_LASX(xvmuh_d, vvv)
+INSN_LASX(xvmuh_bu, vvv)
+INSN_LASX(xvmuh_hu, vvv)
+INSN_LASX(xvmuh_wu, vvv)
+INSN_LASX(xvmuh_du, vvv)
+
+INSN_LASX(xvmulwev_h_b, vvv)
+INSN_LASX(xvmulwev_w_h, vvv)
+INSN_LASX(xvmulwev_d_w, vvv)
+INSN_LASX(xvmulwev_q_d, vvv)
+INSN_LASX(xvmulwod_h_b, vvv)
+INSN_LASX(xvmulwod_w_h, vvv)
+INSN_LASX(xvmulwod_d_w, vvv)
+INSN_LASX(xvmulwod_q_d, vvv)
+INSN_LASX(xvmulwev_h_bu, vvv)
+INSN_LASX(xvmulwev_w_hu, vvv)
+INSN_LASX(xvmulwev_d_wu, vvv)
+INSN_LASX(xvmulwev_q_du, vvv)
+INSN_LASX(xvmulwod_h_bu, vvv)
+INSN_LASX(xvmulwod_w_hu, vvv)
+INSN_LASX(xvmulwod_d_wu, vvv)
+INSN_LASX(xvmulwod_q_du, vvv)
+INSN_LASX(xvmulwev_h_bu_b, vvv)
+INSN_LASX(xvmulwev_w_hu_h, vvv)
+INSN_LASX(xvmulwev_d_wu_w, vvv)
+INSN_LASX(xvmulwev_q_du_d, vvv)
+INSN_LASX(xvmulwod_h_bu_b, vvv)
+INSN_LASX(xvmulwod_w_hu_h, vvv)
+INSN_LASX(xvmulwod_d_wu_w, vvv)
+INSN_LASX(xvmulwod_q_du_d, vvv)
+
INSN_LASX(xvreplgr2vr_b, vr)
INSN_LASX(xvreplgr2vr_h, vr)
INSN_LASX(xvreplgr2vr_w, vr)
@@ -207,6 +207,74 @@ TRANS(xvmaxi_hu, gvec_vv_i, 32, MO_16, do_vmaxi_u)
TRANS(xvmaxi_wu, gvec_vv_i, 32, MO_32, do_vmaxi_u)
TRANS(xvmaxi_du, gvec_vv_i, 32, MO_64, do_vmaxi_u)
+TRANS(xvmul_b, gvec_vvv, 32, MO_8, tcg_gen_gvec_mul)
+TRANS(xvmul_h, gvec_vvv, 32, MO_16, tcg_gen_gvec_mul)
+TRANS(xvmul_w, gvec_vvv, 32, MO_32, tcg_gen_gvec_mul)
+TRANS(xvmul_d, gvec_vvv, 32, MO_64, tcg_gen_gvec_mul)
+TRANS(xvmuh_b, gvec_vvv, 32, MO_8, do_vmuh_s)
+TRANS(xvmuh_h, gvec_vvv, 32, MO_16, do_vmuh_s)
+TRANS(xvmuh_w, gvec_vvv, 32, MO_32, do_vmuh_s)
+TRANS(xvmuh_d, gvec_vvv, 32, MO_64, do_vmuh_s)
+TRANS(xvmuh_bu, gvec_vvv, 32, MO_8, do_vmuh_u)
+TRANS(xvmuh_hu, gvec_vvv, 32, MO_16, do_vmuh_u)
+TRANS(xvmuh_wu, gvec_vvv, 32, MO_32, do_vmuh_u)
+TRANS(xvmuh_du, gvec_vvv, 32, MO_64, do_vmuh_u)
+
+TRANS(xvmulwev_h_b, gvec_vvv, 32, MO_8, do_vmulwev_s)
+TRANS(xvmulwev_w_h, gvec_vvv, 32, MO_16, do_vmulwev_s)
+TRANS(xvmulwev_d_w, gvec_vvv, 32, MO_32, do_vmulwev_s)
+
+#define XVMUL_Q(NAME, FN, idx1, idx2) \
+static bool trans_## NAME(DisasContext *ctx, arg_vvv * a) \
+{ \
+ TCGv_i64 rh, rl, arg1, arg2; \
+ int i; \
+ \
+ CHECK_VEC; \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ arg1 = tcg_temp_new_i64(); \
+ arg2 = tcg_temp_new_i64(); \
+ \
+ for (i = 0; i < 2; i++) { \
+ get_vreg64(arg1, a->vj, idx1 + i * 2); \
+ get_vreg64(arg2, a->vk, idx2 + i * 2); \
+ \
+ tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \
+ \
+ set_vreg64(rh, a->vd, 1 + i * 2); \
+ set_vreg64(rl, a->vd, 0 + i * 2); \
+ } \
+ \
+ return true; \
+}
+
+XVMUL_Q(xvmulwev_q_d, muls2, 0, 0)
+XVMUL_Q(xvmulwod_q_d, muls2, 1, 1)
+XVMUL_Q(xvmulwev_q_du, mulu2, 0, 0)
+XVMUL_Q(xvmulwod_q_du, mulu2, 1, 1)
+XVMUL_Q(xvmulwev_q_du_d, mulus2, 0, 0)
+XVMUL_Q(xvmulwod_q_du_d, mulus2, 1, 1)
+
+TRANS(xvmulwod_h_b, gvec_vvv, 32, MO_8, do_vmulwod_s)
+TRANS(xvmulwod_w_h, gvec_vvv, 32, MO_16, do_vmulwod_s)
+TRANS(xvmulwod_d_w, gvec_vvv, 32, MO_32, do_vmulwod_s)
+
+TRANS(xvmulwev_h_bu, gvec_vvv, 32, MO_8, do_vmulwev_u)
+TRANS(xvmulwev_w_hu, gvec_vvv, 32, MO_16, do_vmulwev_u)
+TRANS(xvmulwev_d_wu, gvec_vvv, 32, MO_32, do_vmulwev_u)
+TRANS(xvmulwod_h_bu, gvec_vvv, 32, MO_8, do_vmulwod_u)
+TRANS(xvmulwod_w_hu, gvec_vvv, 32, MO_16, do_vmulwod_u)
+TRANS(xvmulwod_d_wu, gvec_vvv, 32, MO_32, do_vmulwod_u)
+
+TRANS(xvmulwev_h_bu_b, gvec_vvv, 32, MO_8, do_vmulwev_u_s)
+TRANS(xvmulwev_w_hu_h, gvec_vvv, 32, MO_16, do_vmulwev_u_s)
+TRANS(xvmulwev_d_wu_w, gvec_vvv, 32, MO_32, do_vmulwev_u_s)
+TRANS(xvmulwod_h_bu_b, gvec_vvv, 32, MO_8, do_vmulwod_u_s)
+TRANS(xvmulwod_w_hu_h, gvec_vvv, 32, MO_16, do_vmulwod_u_s)
+TRANS(xvmulwod_d_wu_w, gvec_vvv, 32, MO_32, do_vmulwod_u_s)
+
TRANS(xvreplgr2vr_b, gvec_dup, 32, MO_8)
TRANS(xvreplgr2vr_h, gvec_dup, 32, MO_16)
TRANS(xvreplgr2vr_w, gvec_dup, 32, MO_32)
@@ -1730,6 +1730,8 @@ static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
{ \
TCGv_i64 rh, rl, arg1, arg2; \
\
+ CHECK_VEC; \
+ \
rh = tcg_temp_new_i64(); \
rl = tcg_temp_new_i64(); \
arg1 = tcg_temp_new_i64(); \
@@ -1473,6 +1473,44 @@ xvmini_hu 0111 01101001 01101 ..... ..... ..... @vv_ui5
xvmini_wu 0111 01101001 01110 ..... ..... ..... @vv_ui5
xvmini_du 0111 01101001 01111 ..... ..... ..... @vv_ui5
+xvmul_b 0111 01001000 01000 ..... ..... ..... @vvv
+xvmul_h 0111 01001000 01001 ..... ..... ..... @vvv
+xvmul_w 0111 01001000 01010 ..... ..... ..... @vvv
+xvmul_d 0111 01001000 01011 ..... ..... ..... @vvv
+xvmuh_b 0111 01001000 01100 ..... ..... ..... @vvv
+xvmuh_h 0111 01001000 01101 ..... ..... ..... @vvv
+xvmuh_w 0111 01001000 01110 ..... ..... ..... @vvv
+xvmuh_d 0111 01001000 01111 ..... ..... ..... @vvv
+xvmuh_bu 0111 01001000 10000 ..... ..... ..... @vvv
+xvmuh_hu 0111 01001000 10001 ..... ..... ..... @vvv
+xvmuh_wu 0111 01001000 10010 ..... ..... ..... @vvv
+xvmuh_du 0111 01001000 10011 ..... ..... ..... @vvv
+
+xvmulwev_h_b 0111 01001001 00000 ..... ..... ..... @vvv
+xvmulwev_w_h 0111 01001001 00001 ..... ..... ..... @vvv
+xvmulwev_d_w 0111 01001001 00010 ..... ..... ..... @vvv
+xvmulwev_q_d 0111 01001001 00011 ..... ..... ..... @vvv
+xvmulwod_h_b 0111 01001001 00100 ..... ..... ..... @vvv
+xvmulwod_w_h 0111 01001001 00101 ..... ..... ..... @vvv
+xvmulwod_d_w 0111 01001001 00110 ..... ..... ..... @vvv
+xvmulwod_q_d 0111 01001001 00111 ..... ..... ..... @vvv
+xvmulwev_h_bu 0111 01001001 10000 ..... ..... ..... @vvv
+xvmulwev_w_hu 0111 01001001 10001 ..... ..... ..... @vvv
+xvmulwev_d_wu 0111 01001001 10010 ..... ..... ..... @vvv
+xvmulwev_q_du 0111 01001001 10011 ..... ..... ..... @vvv
+xvmulwod_h_bu 0111 01001001 10100 ..... ..... ..... @vvv
+xvmulwod_w_hu 0111 01001001 10101 ..... ..... ..... @vvv
+xvmulwod_d_wu 0111 01001001 10110 ..... ..... ..... @vvv
+xvmulwod_q_du 0111 01001001 10111 ..... ..... ..... @vvv
+xvmulwev_h_bu_b 0111 01001010 00000 ..... ..... ..... @vvv
+xvmulwev_w_hu_h 0111 01001010 00001 ..... ..... ..... @vvv
+xvmulwev_d_wu_w 0111 01001010 00010 ..... ..... ..... @vvv
+xvmulwev_q_du_d 0111 01001010 00011 ..... ..... ..... @vvv
+xvmulwod_h_bu_b 0111 01001010 00100 ..... ..... ..... @vvv
+xvmulwod_w_hu_h 0111 01001010 00101 ..... ..... ..... @vvv
+xvmulwod_d_wu_w 0111 01001010 00110 ..... ..... ..... @vvv
+xvmulwod_q_du_d 0111 01001010 00111 ..... ..... ..... @vvv
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
@@ -60,4 +60,6 @@
#define DO_MIN(a, b) (a < b ? a : b)
#define DO_MAX(a, b) (a > b ? a : b)
+#define DO_MUL(a, b) (a * b)
+
#endif /* LOONGARCH_VEC_H */
@@ -434,29 +434,31 @@ VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
{ \
- int i; \
+ int i, len; \
VReg *Vd = (VReg *)vd; \
VReg *Vj = (VReg *)vj; \
VReg *Vk = (VReg *)vk; \
typedef __typeof(Vd->E1(0)) T; \
\
- for (i = 0; i < LSX_LEN/BIT; i++) { \
+ len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN; \
+ for (i = 0; i < len / BIT; i++) { \
Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
} \
}
void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v)
{
- uint64_t l, h1, h2;
+ int i, len;
+ uint64_t l, h;
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
VReg *Vk = (VReg *)vk;
- muls64(&l, &h1, Vj->D(0), Vk->D(0));
- muls64(&l, &h2, Vj->D(1), Vk->D(1));
-
- Vd->D(0) = h1;
- Vd->D(1) = h2;
+ len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;
+ for (i = 0; i < len / 64; i++) {
+ muls64(&l, &h, Vj->D(i), Vk->D(i));
+ Vd->D(i) = h;
+ }
}
DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
@@ -465,24 +467,23 @@ DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v)
{
- uint64_t l, h1, h2;
+ int i, len;
+ uint64_t l, h;
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
VReg *Vk = (VReg *)vk;
- mulu64(&l, &h1, Vj->D(0), Vk->D(0));
- mulu64(&l, &h2, Vj->D(1), Vk->D(1));
-
- Vd->D(0) = h1;
- Vd->D(1) = h2;
+ len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;
+ for (i = 0; i < len / 64; i++) {
+ mulu64(&l, &h, Vj->D(i), Vk->D(i));
+ Vd->D(i) = h;
+ }
}
DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
-#define DO_MUL(a, b) (a * b)
-
DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
This patch includes: - XVMUL.{B/H/W/D}; - XVMUH.{B/H/W/D}[U]; - XVMULW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVMULW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D}. Signed-off-by: Song Gao <gaosong@loongson.cn> --- target/loongarch/disas.c | 38 +++++++++++ target/loongarch/insn_trans/trans_lasx.c.inc | 68 ++++++++++++++++++++ target/loongarch/insn_trans/trans_lsx.c.inc | 2 + target/loongarch/insns.decode | 38 +++++++++++ target/loongarch/vec.h | 2 + target/loongarch/vec_helper.c | 33 +++++----- 6 files changed, 165 insertions(+), 16 deletions(-)