@@ -1311,6 +1311,15 @@ xvsub_w 0111 01000000 11010 ..... ..... ..... @vvv
xvsub_d 0111 01000000 11011 ..... ..... ..... @vvv
xvsub_q 0111 01010010 11011 ..... ..... ..... @vvv
+xvaddi_bu 0111 01101000 10100 ..... ..... ..... @vv_ui5
+xvaddi_hu 0111 01101000 10101 ..... ..... ..... @vv_ui5
+xvaddi_wu 0111 01101000 10110 ..... ..... ..... @vv_ui5
+xvaddi_du 0111 01101000 10111 ..... ..... ..... @vv_ui5
+xvsubi_bu 0111 01101000 11000 ..... ..... ..... @vv_ui5
+xvsubi_hu 0111 01101000 11001 ..... ..... ..... @vv_ui5
+xvsubi_wu 0111 01101000 11010 ..... ..... ..... @vv_ui5
+xvsubi_du 0111 01101000 11011 ..... ..... ..... @vv_ui5
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
@@ -1713,6 +1713,11 @@ static void output_vr_x(DisasContext *ctx, arg_vr *a, const char *mnemonic)
output(ctx, mnemonic, "x%d, r%d", a->vd, a->rj);
}
+static void output_vv_i_x(DisasContext *ctx, arg_vv_i *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "x%d, x%d, 0x%x", a->vd, a->vj, a->imm);
+}
+
INSN_LASX(xvadd_b, vvv)
INSN_LASX(xvadd_h, vvv)
INSN_LASX(xvadd_w, vvv)
@@ -1724,6 +1729,15 @@ INSN_LASX(xvsub_w, vvv)
INSN_LASX(xvsub_d, vvv)
INSN_LASX(xvsub_q, vvv)
+INSN_LASX(xvaddi_bu, vv_i)
+INSN_LASX(xvaddi_hu, vv_i)
+INSN_LASX(xvaddi_wu, vv_i)
+INSN_LASX(xvaddi_du, vv_i)
+INSN_LASX(xvsubi_bu, vv_i)
+INSN_LASX(xvsubi_hu, vv_i)
+INSN_LASX(xvsubi_wu, vv_i)
+INSN_LASX(xvsubi_du, vv_i)
+
INSN_LASX(xvreplgr2vr_b, vr)
INSN_LASX(xvreplgr2vr_h, vr)
INSN_LASX(xvreplgr2vr_w, vr)
@@ -247,6 +247,10 @@ static bool gvec_vv_i_vl(DisasContext *ctx, arg_vv_i *a,
uint32_t vd_ofs = vec_full_offset(a->vd);
uint32_t vj_ofs = vec_full_offset(a->vj);
+ if (!check_vec(ctx, oprsz)) {
+ return true;
+ }
+
func(mop, vd_ofs, vj_ofs, a->imm, oprsz, ctx->vl / 8);
return true;
}
@@ -255,32 +259,40 @@ static bool gvec_vv_i(DisasContext *ctx, arg_vv_i *a, MemOp mop,
void (*func)(unsigned, uint32_t, uint32_t,
int64_t, uint32_t, uint32_t))
{
- if (!check_vec(ctx, 16)) {
- return true;
- }
-
return gvec_vv_i_vl(ctx, a, 16, mop, func);
}
+static bool gvec_xx_i(DisasContext *ctx, arg_vv_i *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ int64_t, uint32_t, uint32_t))
+{
+ return gvec_vv_i_vl(ctx,a, 32, mop, func);
+}
+
static bool gvec_subi_vl(DisasContext *ctx, arg_vv_i *a,
uint32_t oprsz, MemOp mop)
{
uint32_t vd_ofs = vec_full_offset(a->vd);
uint32_t vj_ofs = vec_full_offset(a->vj);
+ if (!check_vec(ctx, oprsz)) {
+ return true;
+ }
+
tcg_gen_gvec_addi(mop, vd_ofs, vj_ofs, -a->imm, oprsz, ctx->vl / 8);
return true;
}
static bool gvec_subi(DisasContext *ctx, arg_vv_i *a, MemOp mop)
{
- if (!check_vec(ctx, 16)) {
- return true;
- }
-
return gvec_subi_vl(ctx, a, 16, mop);
}
+static bool gvec_xsubi(DisasContext *ctx, arg_vv_i *a, MemOp mop)
+{
+ return gvec_subi_vl(ctx, a, 32, mop);
+}
+
TRANS(vadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_add)
TRANS(vadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_add)
TRANS(vadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_add)
@@ -358,6 +370,14 @@ TRANS(vsubi_bu, LSX, gvec_subi, MO_8)
TRANS(vsubi_hu, LSX, gvec_subi, MO_16)
TRANS(vsubi_wu, LSX, gvec_subi, MO_32)
TRANS(vsubi_du, LSX, gvec_subi, MO_64)
+TRANS(xvaddi_bu, LASX, gvec_xx_i, MO_8, tcg_gen_gvec_addi)
+TRANS(xvaddi_hu, LASX, gvec_xx_i, MO_16, tcg_gen_gvec_addi)
+TRANS(xvaddi_wu, LASX, gvec_xx_i, MO_32, tcg_gen_gvec_addi)
+TRANS(xvaddi_du, LASX, gvec_xx_i, MO_64, tcg_gen_gvec_addi)
+TRANS(xvsubi_bu, LASX, gvec_xsubi, MO_8)
+TRANS(xvsubi_hu, LASX, gvec_xsubi, MO_16)
+TRANS(xvsubi_wu, LASX, gvec_xsubi, MO_32)
+TRANS(xvsubi_du, LASX, gvec_xsubi, MO_64)
TRANS(vneg_b, LSX, gvec_vv, MO_8, tcg_gen_gvec_neg)
TRANS(vneg_h, LSX, gvec_vv, MO_16, tcg_gen_gvec_neg)