@@ -89,11 +89,56 @@ static void decode_addi_opcode_y0(struct DisasContext *dc,
static void decode_rrr_1_opcode_y0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_RRROpcodeExtension_Y0(bundle)) {
+ case UNARY_RRR_1_OPCODE_Y0:
+ switch (get_UnaryOpcodeExtension_Y0(bundle)) {
+ case NOP_UNARY_OPCODE_Y0:
+ case FNOP_UNARY_OPCODE_Y0:
+ if (!get_SrcA_Y0(bundle) && !get_Dest_Y0(bundle)) {
+ return;
+ }
+ break;
+ case CNTLZ_UNARY_OPCODE_Y0:
+ case CNTTZ_UNARY_OPCODE_Y0:
+ case FSINGLE_PACK1_UNARY_OPCODE_Y0:
+ case PCNT_UNARY_OPCODE_Y0:
+ case REVBITS_UNARY_OPCODE_Y0:
+ case REVBYTES_UNARY_OPCODE_Y0:
+ case TBLIDXB0_UNARY_OPCODE_Y0:
+ case TBLIDXB1_UNARY_OPCODE_Y0:
+ case TBLIDXB2_UNARY_OPCODE_Y0:
+ case TBLIDXB3_UNARY_OPCODE_Y0:
+ default:
+ break;
+ }
+ break;
+ case SHL1ADD_RRR_1_OPCODE_Y0:
+ case SHL2ADD_RRR_1_OPCODE_Y0:
+ case SHL3ADD_RRR_1_OPCODE_Y0:
+ default:
+ break;
+ }
+
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_1_opcode_y0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_rrr_5_opcode_y0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_RRROpcodeExtension_Y0(bundle)) {
+ case OR_RRR_5_OPCODE_Y0:
+ return;
+ case AND_RRR_5_OPCODE_Y0:
+ case NOR_RRR_5_OPCODE_Y0:
+ case XOR_RRR_5_OPCODE_Y0:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_5_opcode_y0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_addi_opcode_y1(struct DisasContext *dc,
@@ -104,31 +149,116 @@ static void decode_addi_opcode_y1(struct DisasContext *dc,
static void decode_rrr_1_opcode_y1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_RRROpcodeExtension_Y1(bundle)) {
+ case UNARY_RRR_1_OPCODE_Y1:
+ switch (get_UnaryOpcodeExtension_Y1(bundle)) {
+ case NOP_UNARY_OPCODE_Y1:
+ case FNOP_UNARY_OPCODE_Y1:
+ if (!get_SrcA_Y1(bundle) && !get_Dest_Y1(bundle)) {
+ return;
+ }
+ break;
+ case ILL_UNARY_OPCODE_Y1:
+ case JALRP_UNARY_OPCODE_Y1:
+ case JALR_UNARY_OPCODE_Y1:
+ case JRP_UNARY_OPCODE_Y1:
+ case JR_UNARY_OPCODE_Y1:
+ case LNK_UNARY_OPCODE_Y1:
+ default:
+ break;
+ }
+ break;
+ case SHL1ADD_RRR_1_OPCODE_Y1:
+ case SHL2ADD_RRR_1_OPCODE_Y1:
+ case SHL3ADD_RRR_1_OPCODE_Y1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_1_opcode_y1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_rrr_5_opcode_y1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_RRROpcodeExtension_Y1(bundle)) {
+ case OR_RRR_5_OPCODE_Y1:
+ return;
+ case AND_RRR_5_OPCODE_Y1:
+ case NOR_RRR_5_OPCODE_Y1:
+ case XOR_RRR_5_OPCODE_Y1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_5_opcode_y1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_ldst0_opcode_y2(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Mode(bundle)) {
+ case MODE_OPCODE_YA2:
+ return;
+ case MODE_OPCODE_YB2:
+ case MODE_OPCODE_YC2:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP ldst0_opcode_y2, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_ldst1_opcode_y2(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Mode(bundle)) {
+ case MODE_OPCODE_YB2:
+ return;
+ case MODE_OPCODE_YA2:
+ case MODE_OPCODE_YC2:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP ldst1_opcode_y2, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_ldst2_opcode_y2(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Mode(bundle)) {
+ case MODE_OPCODE_YC2:
+ return;
+ case MODE_OPCODE_YA2:
+ case MODE_OPCODE_YB2:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP ldst2_opcode_y2, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_ldst3_opcode_y2(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Mode(bundle)) {
+ case MODE_OPCODE_YB2:
+ return;
+ case MODE_OPCODE_YC2:
+ return;
+ case MODE_OPCODE_YA2:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP ldst3_opcode_y2, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_addli_opcode_x0(struct DisasContext *dc,
@@ -139,21 +269,290 @@ static void decode_addli_opcode_x0(struct DisasContext *dc,
static void decode_bf_opcode_x0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_BFOpcodeExtension_X0(bundle)) {
+ case BFEXTU_BF_OPCODE_X0:
+ return;
+ case BFEXTS_BF_OPCODE_X0:
+ case BFINS_BF_OPCODE_X0:
+ case MM_BF_OPCODE_X0:
+ default:
+ break;
+ }
+
+ qemu_log_mask(LOG_UNIMP, "UNIMP bf_opcode_x0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_imm8_opcode_x0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Imm8OpcodeExtension_X0(bundle)) {
+ case ADDI_IMM8_OPCODE_X0:
+ return;
+ case ADDXI_IMM8_OPCODE_X0:
+ return;
+ case ANDI_IMM8_OPCODE_X0:
+ return;
+ case CMPEQI_IMM8_OPCODE_X0:
+ return;
+ case ORI_IMM8_OPCODE_X0:
+ return;
+ case CMPLTSI_IMM8_OPCODE_X0:
+ case CMPLTUI_IMM8_OPCODE_X0:
+ case V1ADDI_IMM8_OPCODE_X0:
+ case V1CMPEQI_IMM8_OPCODE_X0:
+ case V1CMPLTSI_IMM8_OPCODE_X0:
+ case V1CMPLTUI_IMM8_OPCODE_X0:
+ case V1MAXUI_IMM8_OPCODE_X0:
+ case V1MINUI_IMM8_OPCODE_X0:
+ case V2ADDI_IMM8_OPCODE_X0:
+ case V2CMPEQI_IMM8_OPCODE_X0:
+ case V2CMPLTSI_IMM8_OPCODE_X0:
+ case V2CMPLTUI_IMM8_OPCODE_X0:
+ case V2MAXSI_IMM8_OPCODE_X0:
+ case V2MINSI_IMM8_OPCODE_X0:
+ case XORI_IMM8_OPCODE_X0:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP imm8_opcode_x0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_rrr_0_opcode_x0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+ uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+
+ switch (get_RRROpcodeExtension_X0(bundle)) {
+ case ADD_RRR_0_OPCODE_X0:
+ return;
+ case CMOVEQZ_RRR_0_OPCODE_X0:
+ return;
+ case CMOVNEZ_RRR_0_OPCODE_X0:
+ return;
+ case CMPNE_RRR_0_OPCODE_X0:
+ return;
+ case MULX_RRR_0_OPCODE_X0:
+ return;
+ case OR_RRR_0_OPCODE_X0:
+ return;
+ case SHL3ADD_RRR_0_OPCODE_X0:
+ return;
+ case SHLX_RRR_0_OPCODE_X0:
+ return;
+ case SUBX_RRR_0_OPCODE_X0:
+ return;
+ case UNARY_RRR_0_OPCODE_X0:
+ switch (get_UnaryOpcodeExtension_X0(bundle)) {
+ case FNOP_UNARY_OPCODE_X0:
+ case NOP_UNARY_OPCODE_X0:
+ if (!rsrc && !rdst) {
+ return;
+ }
+ break;
+ case CNTTZ_UNARY_OPCODE_X0:
+ case FSINGLE_PACK1_UNARY_OPCODE_X0:
+ case PCNT_UNARY_OPCODE_X0:
+ case REVBITS_UNARY_OPCODE_X0:
+ case REVBYTES_UNARY_OPCODE_X0:
+ case TBLIDXB0_UNARY_OPCODE_X0:
+ case TBLIDXB1_UNARY_OPCODE_X0:
+ case TBLIDXB2_UNARY_OPCODE_X0:
+ case TBLIDXB3_UNARY_OPCODE_X0:
+ default:
+ break;
+ }
+ break;
+ case XOR_RRR_0_OPCODE_X0:
+ return;
+ case ADDXSC_RRR_0_OPCODE_X0:
+ case ADDX_RRR_0_OPCODE_X0:
+ case AND_RRR_0_OPCODE_X0:
+ case CMPEQ_RRR_0_OPCODE_X0:
+ case CMPLES_RRR_0_OPCODE_X0:
+ case CMPLEU_RRR_0_OPCODE_X0:
+ case CMPLTS_RRR_0_OPCODE_X0:
+ case CMPLTU_RRR_0_OPCODE_X0:
+ case CMULAF_RRR_0_OPCODE_X0:
+ case CMULA_RRR_0_OPCODE_X0:
+ case CMULFR_RRR_0_OPCODE_X0:
+ case CMULF_RRR_0_OPCODE_X0:
+ case CMULHR_RRR_0_OPCODE_X0:
+ case CMULH_RRR_0_OPCODE_X0:
+ case CMUL_RRR_0_OPCODE_X0:
+ case CRC32_32_RRR_0_OPCODE_X0:
+ case CRC32_8_RRR_0_OPCODE_X0:
+ case DBLALIGN2_RRR_0_OPCODE_X0:
+ case DBLALIGN4_RRR_0_OPCODE_X0:
+ case DBLALIGN6_RRR_0_OPCODE_X0:
+ case DBLALIGN_RRR_0_OPCODE_X0:
+ case FDOUBLE_ADDSUB_RRR_0_OPCODE_X0:
+ case FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0:
+ case FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0:
+ case FDOUBLE_PACK1_RRR_0_OPCODE_X0:
+ case FDOUBLE_PACK2_RRR_0_OPCODE_X0:
+ case FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0:
+ case FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0:
+ case FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0:
+ case FSINGLE_ADD1_RRR_0_OPCODE_X0:
+ case FSINGLE_ADDSUB2_RRR_0_OPCODE_X0:
+ case FSINGLE_MUL1_RRR_0_OPCODE_X0:
+ case FSINGLE_MUL2_RRR_0_OPCODE_X0:
+ case FSINGLE_PACK2_RRR_0_OPCODE_X0:
+ case FSINGLE_SUB1_RRR_0_OPCODE_X0:
+ case MNZ_RRR_0_OPCODE_X0:
+ case MULAX_RRR_0_OPCODE_X0:
+ case MULA_HS_HS_RRR_0_OPCODE_X0:
+ case MULA_HS_HU_RRR_0_OPCODE_X0:
+ case MULA_HS_LS_RRR_0_OPCODE_X0:
+ case MULA_HS_LU_RRR_0_OPCODE_X0:
+ case MULA_HU_HU_RRR_0_OPCODE_X0:
+ case MULA_HU_LS_RRR_0_OPCODE_X0:
+ case MULA_HU_LU_RRR_0_OPCODE_X0:
+ case MULA_LS_LS_RRR_0_OPCODE_X0:
+ case MULA_LS_LU_RRR_0_OPCODE_X0:
+ case MULA_LU_LU_RRR_0_OPCODE_X0:
+ case MUL_HS_HS_RRR_0_OPCODE_X0:
+ case MUL_HS_HU_RRR_0_OPCODE_X0:
+ case MUL_HS_LS_RRR_0_OPCODE_X0:
+ case MUL_HS_LU_RRR_0_OPCODE_X0:
+ case MUL_HU_HU_RRR_0_OPCODE_X0:
+ case MUL_HU_LS_RRR_0_OPCODE_X0:
+ case MUL_HU_LU_RRR_0_OPCODE_X0:
+ case MUL_LS_LS_RRR_0_OPCODE_X0:
+ case MUL_LS_LU_RRR_0_OPCODE_X0:
+ case MUL_LU_LU_RRR_0_OPCODE_X0:
+ case MZ_RRR_0_OPCODE_X0:
+ case NOR_RRR_0_OPCODE_X0:
+ case ROTL_RRR_0_OPCODE_X0:
+ case SHL1ADDX_RRR_0_OPCODE_X0:
+ case SHL1ADD_RRR_0_OPCODE_X0:
+ case SHL2ADDX_RRR_0_OPCODE_X0:
+ case SHL2ADD_RRR_0_OPCODE_X0:
+ case SHL3ADDX_RRR_0_OPCODE_X0:
+ case SHL_RRR_0_OPCODE_X0:
+ case SHRS_RRR_0_OPCODE_X0:
+ case SHRUX_RRR_0_OPCODE_X0:
+ case SHRU_RRR_0_OPCODE_X0:
+ case SHUFFLEBYTES_RRR_0_OPCODE_X0:
+ case SUBXSC_RRR_0_OPCODE_X0:
+ case SUB_RRR_0_OPCODE_X0:
+ case V1ADDUC_RRR_0_OPCODE_X0:
+ case V1ADD_RRR_0_OPCODE_X0:
+ case V1ADIFFU_RRR_0_OPCODE_X0:
+ case V1AVGU_RRR_0_OPCODE_X0:
+ case V1CMPEQ_RRR_0_OPCODE_X0:
+ case V1CMPLES_RRR_0_OPCODE_X0:
+ case V1CMPLEU_RRR_0_OPCODE_X0:
+ case V1CMPLTS_RRR_0_OPCODE_X0:
+ case V1CMPLTU_RRR_0_OPCODE_X0:
+ case V1CMPNE_RRR_0_OPCODE_X0:
+ case V1DDOTPUSA_RRR_0_OPCODE_X0:
+ case V1DDOTPUS_RRR_0_OPCODE_X0:
+ case V1DOTPA_RRR_0_OPCODE_X0:
+ case V1DOTPUSA_RRR_0_OPCODE_X0:
+ case V1DOTPUS_RRR_0_OPCODE_X0:
+ case V1DOTP_RRR_0_OPCODE_X0:
+ case V1INT_H_RRR_0_OPCODE_X0:
+ case V1INT_L_RRR_0_OPCODE_X0:
+ case V1MAXU_RRR_0_OPCODE_X0:
+ case V1MINU_RRR_0_OPCODE_X0:
+ case V1MNZ_RRR_0_OPCODE_X0:
+ case V1MULTU_RRR_0_OPCODE_X0:
+ case V1MULUS_RRR_0_OPCODE_X0:
+ case V1MULU_RRR_0_OPCODE_X0:
+ case V1MZ_RRR_0_OPCODE_X0:
+ case V1SADAU_RRR_0_OPCODE_X0:
+ case V1SADU_RRR_0_OPCODE_X0:
+ case V1SHL_RRR_0_OPCODE_X0:
+ case V1SHRS_RRR_0_OPCODE_X0:
+ case V1SHRU_RRR_0_OPCODE_X0:
+ case V1SUBUC_RRR_0_OPCODE_X0:
+ case V1SUB_RRR_0_OPCODE_X0:
+ case V2ADDSC_RRR_0_OPCODE_X0:
+ case V2ADD_RRR_0_OPCODE_X0:
+ case V2ADIFFS_RRR_0_OPCODE_X0:
+ case V2AVGS_RRR_0_OPCODE_X0:
+ case V2CMPEQ_RRR_0_OPCODE_X0:
+ case V2CMPLES_RRR_0_OPCODE_X0:
+ case V2CMPLEU_RRR_0_OPCODE_X0:
+ case V2CMPLTS_RRR_0_OPCODE_X0:
+ case V2CMPLTU_RRR_0_OPCODE_X0:
+ case V2CMPNE_RRR_0_OPCODE_X0:
+ case V2DOTPA_RRR_0_OPCODE_X0:
+ case V2DOTP_RRR_0_OPCODE_X0:
+ case V2INT_H_RRR_0_OPCODE_X0:
+ case V2INT_L_RRR_0_OPCODE_X0:
+ case V2MAXS_RRR_0_OPCODE_X0:
+ case V2MINS_RRR_0_OPCODE_X0:
+ case V2MNZ_RRR_0_OPCODE_X0:
+ case V2MULFSC_RRR_0_OPCODE_X0:
+ case V2MULS_RRR_0_OPCODE_X0:
+ case V2MULTS_RRR_0_OPCODE_X0:
+ case V2MZ_RRR_0_OPCODE_X0:
+ case V2PACKH_RRR_0_OPCODE_X0:
+ case V2PACKL_RRR_0_OPCODE_X0:
+ case V2PACKUC_RRR_0_OPCODE_X0:
+ case V2SADAS_RRR_0_OPCODE_X0:
+ case V2SADAU_RRR_0_OPCODE_X0:
+ case V2SADS_RRR_0_OPCODE_X0:
+ case V2SADU_RRR_0_OPCODE_X0:
+ case V2SHLSC_RRR_0_OPCODE_X0:
+ case V2SHL_RRR_0_OPCODE_X0:
+ case V2SHRS_RRR_0_OPCODE_X0:
+ case V2SHRU_RRR_0_OPCODE_X0:
+ case V2SUBSC_RRR_0_OPCODE_X0:
+ case V2SUB_RRR_0_OPCODE_X0:
+ case V4ADDSC_RRR_0_OPCODE_X0:
+ case V4ADD_RRR_0_OPCODE_X0:
+ case V4INT_H_RRR_0_OPCODE_X0:
+ case V4INT_L_RRR_0_OPCODE_X0:
+ case V4PACKSC_RRR_0_OPCODE_X0:
+ case V4SHLSC_RRR_0_OPCODE_X0:
+ case V4SHL_RRR_0_OPCODE_X0:
+ case V4SHRS_RRR_0_OPCODE_X0:
+ case V4SHRU_RRR_0_OPCODE_X0:
+ case V4SUBSC_RRR_0_OPCODE_X0:
+ case V4SUB_RRR_0_OPCODE_X0:
+ case V1DDOTPUA_RRR_0_OPCODE_X0:
+ case V1DDOTPU_RRR_0_OPCODE_X0:
+ case V1DOTPUA_RRR_0_OPCODE_X0:
+ case V1DOTPU_RRR_0_OPCODE_X0:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_0_opcode_x0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_shift_opcode_x0(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_ShiftOpcodeExtension_X0(bundle)) {
+ case SHLXI_SHIFT_OPCODE_X0:
+ return;
+ case ROTLI_SHIFT_OPCODE_X0:
+ case SHLI_SHIFT_OPCODE_X0:
+ case SHRSI_SHIFT_OPCODE_X0:
+ case SHRUI_SHIFT_OPCODE_X0:
+ case SHRUXI_SHIFT_OPCODE_X0:
+ case V1SHLI_SHIFT_OPCODE_X0:
+ case V1SHRSI_SHIFT_OPCODE_X0:
+ case V1SHRUI_SHIFT_OPCODE_X0:
+ case V2SHLI_SHIFT_OPCODE_X0:
+ case V2SHRSI_SHIFT_OPCODE_X0:
+ case V2SHRUI_SHIFT_OPCODE_X0:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP shift_opcode_x0, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_shl16insli_opcode_x0(struct DisasContext *dc,
@@ -169,26 +568,331 @@ static void decode_addli_opcode_x1(struct DisasContext *dc,
static void decode_branch_opcode_x1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_BrType_X1(bundle)) {
+ case BEQZT_BRANCH_OPCODE_X1:
+ case BEQZ_BRANCH_OPCODE_X1:
+ return;
+ case BNEZT_BRANCH_OPCODE_X1:
+ case BNEZ_BRANCH_OPCODE_X1:
+ return;
+ case BLBC_BRANCH_OPCODE_X1:
+ return;
+ case BGEZT_BRANCH_OPCODE_X1:
+ case BGEZ_BRANCH_OPCODE_X1:
+ case BGTZT_BRANCH_OPCODE_X1:
+ case BGTZ_BRANCH_OPCODE_X1:
+ case BLBCT_BRANCH_OPCODE_X1:
+ case BLBST_BRANCH_OPCODE_X1:
+ case BLBS_BRANCH_OPCODE_X1:
+ case BLEZT_BRANCH_OPCODE_X1:
+ case BLEZ_BRANCH_OPCODE_X1:
+ case BLTZT_BRANCH_OPCODE_X1:
+ case BLTZ_BRANCH_OPCODE_X1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP branch_opcode_x1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_imm8_opcode_x1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_Imm8OpcodeExtension_X1(bundle)) {
+ case ADDI_IMM8_OPCODE_X1:
+ return;
+ case ADDXI_IMM8_OPCODE_X1:
+ return;
+ case CMPEQI_IMM8_OPCODE_X1:
+ return;
+ case CMPLTUI_IMM8_OPCODE_X1:
+ return;
+ case ANDI_IMM8_OPCODE_X1:
+ case CMPLTSI_IMM8_OPCODE_X1:
+ case LD1S_ADD_IMM8_OPCODE_X1:
+ case LD1U_ADD_IMM8_OPCODE_X1:
+ case LD2S_ADD_IMM8_OPCODE_X1:
+ case LD2U_ADD_IMM8_OPCODE_X1:
+ case LD4S_ADD_IMM8_OPCODE_X1:
+ case LD4U_ADD_IMM8_OPCODE_X1:
+ case LDNT1S_ADD_IMM8_OPCODE_X1:
+ case LDNT1U_ADD_IMM8_OPCODE_X1:
+ case LDNT2S_ADD_IMM8_OPCODE_X1:
+ case LDNT2U_ADD_IMM8_OPCODE_X1:
+ case LDNT4S_ADD_IMM8_OPCODE_X1:
+ case LDNT4U_ADD_IMM8_OPCODE_X1:
+ case LDNT_ADD_IMM8_OPCODE_X1:
+ case LD_ADD_IMM8_OPCODE_X1:
+ case LWNA_ADD_IMM8_OPCODE_X1:
+ case MFSPR_IMM8_OPCODE_X1:
+ case MTSPR_IMM8_OPCODE_X1:
+ case ORI_IMM8_OPCODE_X1:
+ case ST1_ADD_IMM8_OPCODE_X1:
+ case ST2_ADD_IMM8_OPCODE_X1:
+ case ST4_ADD_IMM8_OPCODE_X1:
+ case STNT1_ADD_IMM8_OPCODE_X1:
+ case STNT2_ADD_IMM8_OPCODE_X1:
+ case STNT4_ADD_IMM8_OPCODE_X1:
+ case STNT_ADD_IMM8_OPCODE_X1:
+ case ST_ADD_IMM8_OPCODE_X1:
+ case V1ADDI_IMM8_OPCODE_X1:
+ case V1CMPEQI_IMM8_OPCODE_X1:
+ case V1CMPLTSI_IMM8_OPCODE_X1:
+ case V1CMPLTUI_IMM8_OPCODE_X1:
+ case V1MAXUI_IMM8_OPCODE_X1:
+ case V1MINUI_IMM8_OPCODE_X1:
+ case V2ADDI_IMM8_OPCODE_X1:
+ case V2CMPEQI_IMM8_OPCODE_X1:
+ case V2CMPLTSI_IMM8_OPCODE_X1:
+ case V2CMPLTUI_IMM8_OPCODE_X1:
+ case V2MAXSI_IMM8_OPCODE_X1:
+ case V2MINSI_IMM8_OPCODE_X1:
+ case XORI_IMM8_OPCODE_X1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP imm8_opcode_x1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_jump_opcode_x1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_JumpOpcodeExtension_X1(bundle)) {
+ case JAL_JUMP_OPCODE_X1:
+ return;
+ case J_JUMP_OPCODE_X1:
+ return;
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP jump_opcode_x1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_rrr_0_opcode_x1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+ uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+
+ switch (get_RRROpcodeExtension_X1(bundle)) {
+ case ADDX_RRR_0_OPCODE_X1:
+ return;
+ case ADD_RRR_0_OPCODE_X1:
+ return;
+ case AND_RRR_0_OPCODE_X1:
+ return;
+ case OR_RRR_0_OPCODE_X1:
+ return;
+ case CMPNE_RRR_0_OPCODE_X1:
+ return;
+ case SHL3ADD_RRR_0_OPCODE_X1:
+ return;
+ case ST4_RRR_0_OPCODE_X1:
+ if (!rdst) {
+ return;
+ }
+ break;
+ case ST_RRR_0_OPCODE_X1:
+ if (!rdst) {
+ return;
+ }
+ break;
+ case UNARY_RRR_0_OPCODE_X1:
+ switch (get_UnaryOpcodeExtension_X1(bundle)) {
+ case NOP_UNARY_OPCODE_X1:
+ case FNOP_UNARY_OPCODE_X1:
+ if (!rdst && !rsrc) {
+ return;
+ }
+ break;
+ case JRP_UNARY_OPCODE_X1:
+ case JR_UNARY_OPCODE_X1:
+ if (!rdst) {
+ return;
+ }
+ break;
+ case LD4S_UNARY_OPCODE_X1:
+ return;
+ case LD_UNARY_OPCODE_X1:
+ return;
+ case LNK_UNARY_OPCODE_X1:
+ if (!rsrc) {
+ return;
+ }
+ break;
+ case SWINT1_UNARY_OPCODE_X1:
+ if (!rsrc && !rdst) {
+ return;
+ }
+ break;
+ case DRAIN_UNARY_OPCODE_X1:
+ case DTLBPR_UNARY_OPCODE_X1:
+ case FINV_UNARY_OPCODE_X1:
+ case FLUSHWB_UNARY_OPCODE_X1:
+ case FLUSH_UNARY_OPCODE_X1:
+ case ICOH_UNARY_OPCODE_X1:
+ case ILL_UNARY_OPCODE_X1:
+ case INV_UNARY_OPCODE_X1:
+ case IRET_UNARY_OPCODE_X1:
+ case JALRP_UNARY_OPCODE_X1:
+ case JALR_UNARY_OPCODE_X1:
+ case LD1S_UNARY_OPCODE_X1:
+ case LD1U_UNARY_OPCODE_X1:
+ case LD2S_UNARY_OPCODE_X1:
+ case LD2U_UNARY_OPCODE_X1:
+ case LD4U_UNARY_OPCODE_X1:
+ case LDNA_UNARY_OPCODE_X1:
+ case LDNT1S_UNARY_OPCODE_X1:
+ case LDNT1U_UNARY_OPCODE_X1:
+ case LDNT2S_UNARY_OPCODE_X1:
+ case LDNT2U_UNARY_OPCODE_X1:
+ case LDNT4S_UNARY_OPCODE_X1:
+ case LDNT4U_UNARY_OPCODE_X1:
+ case LDNT_UNARY_OPCODE_X1:
+ case MF_UNARY_OPCODE_X1:
+ case NAP_UNARY_OPCODE_X1:
+ case SWINT0_UNARY_OPCODE_X1:
+ case SWINT2_UNARY_OPCODE_X1:
+ case SWINT3_UNARY_OPCODE_X1:
+ case WH64_UNARY_OPCODE_X1:
+ default:
+ break;
+ }
+ break;
+ case ADDXSC_RRR_0_OPCODE_X1:
+ case CMPEQ_RRR_0_OPCODE_X1:
+ case CMPEXCH4_RRR_0_OPCODE_X1:
+ case CMPEXCH_RRR_0_OPCODE_X1:
+ case CMPLES_RRR_0_OPCODE_X1:
+ case CMPLEU_RRR_0_OPCODE_X1:
+ case CMPLTS_RRR_0_OPCODE_X1:
+ case CMPLTU_RRR_0_OPCODE_X1:
+ case DBLALIGN2_RRR_0_OPCODE_X1:
+ case DBLALIGN4_RRR_0_OPCODE_X1:
+ case DBLALIGN6_RRR_0_OPCODE_X1:
+ case EXCH4_RRR_0_OPCODE_X1:
+ case EXCH_RRR_0_OPCODE_X1:
+ case FETCHADD4_RRR_0_OPCODE_X1:
+ case FETCHADDGEZ4_RRR_0_OPCODE_X1:
+ case FETCHADDGEZ_RRR_0_OPCODE_X1:
+ case FETCHADD_RRR_0_OPCODE_X1:
+ case FETCHAND4_RRR_0_OPCODE_X1:
+ case FETCHAND_RRR_0_OPCODE_X1:
+ case FETCHOR4_RRR_0_OPCODE_X1:
+ case FETCHOR_RRR_0_OPCODE_X1:
+ case MNZ_RRR_0_OPCODE_X1:
+ case MZ_RRR_0_OPCODE_X1:
+ case NOR_RRR_0_OPCODE_X1:
+ case ROTL_RRR_0_OPCODE_X1:
+ case SHL1ADDX_RRR_0_OPCODE_X1:
+ case SHL1ADD_RRR_0_OPCODE_X1:
+ case SHL2ADDX_RRR_0_OPCODE_X1:
+ case SHL2ADD_RRR_0_OPCODE_X1:
+ case SHL3ADDX_RRR_0_OPCODE_X1:
+ case SHLX_RRR_0_OPCODE_X1:
+ case SHL_RRR_0_OPCODE_X1:
+ case SHRS_RRR_0_OPCODE_X1:
+ case SHRUX_RRR_0_OPCODE_X1:
+ case SHRU_RRR_0_OPCODE_X1:
+ case ST1_RRR_0_OPCODE_X1:
+ case ST2_RRR_0_OPCODE_X1:
+ case STNT1_RRR_0_OPCODE_X1:
+ case STNT2_RRR_0_OPCODE_X1:
+ case STNT4_RRR_0_OPCODE_X1:
+ case STNT_RRR_0_OPCODE_X1:
+ case SUBXSC_RRR_0_OPCODE_X1:
+ case SUBX_RRR_0_OPCODE_X1:
+ case SUB_RRR_0_OPCODE_X1:
+ case V1ADDUC_RRR_0_OPCODE_X1:
+ case V1ADD_RRR_0_OPCODE_X1:
+ case V1CMPEQ_RRR_0_OPCODE_X1:
+ case V1CMPLES_RRR_0_OPCODE_X1:
+ case V1CMPLEU_RRR_0_OPCODE_X1:
+ case V1CMPLTS_RRR_0_OPCODE_X1:
+ case V1CMPLTU_RRR_0_OPCODE_X1:
+ case V1CMPNE_RRR_0_OPCODE_X1:
+ case V1INT_H_RRR_0_OPCODE_X1:
+ case V1INT_L_RRR_0_OPCODE_X1:
+ case V1MAXU_RRR_0_OPCODE_X1:
+ case V1MINU_RRR_0_OPCODE_X1:
+ case V1MNZ_RRR_0_OPCODE_X1:
+ case V1MZ_RRR_0_OPCODE_X1:
+ case V1SHL_RRR_0_OPCODE_X1:
+ case V1SHRS_RRR_0_OPCODE_X1:
+ case V1SHRU_RRR_0_OPCODE_X1:
+ case V1SUBUC_RRR_0_OPCODE_X1:
+ case V1SUB_RRR_0_OPCODE_X1:
+ case V2ADDSC_RRR_0_OPCODE_X1:
+ case V2ADD_RRR_0_OPCODE_X1:
+ case V2CMPEQ_RRR_0_OPCODE_X1:
+ case V2CMPLES_RRR_0_OPCODE_X1:
+ case V2CMPLEU_RRR_0_OPCODE_X1:
+ case V2CMPLTS_RRR_0_OPCODE_X1:
+ case V2CMPLTU_RRR_0_OPCODE_X1:
+ case V2CMPNE_RRR_0_OPCODE_X1:
+ case V2INT_H_RRR_0_OPCODE_X1:
+ case V2INT_L_RRR_0_OPCODE_X1:
+ case V2MAXS_RRR_0_OPCODE_X1:
+ case V2MINS_RRR_0_OPCODE_X1:
+ case V2MNZ_RRR_0_OPCODE_X1:
+ case V2MZ_RRR_0_OPCODE_X1:
+ case V2PACKH_RRR_0_OPCODE_X1:
+ case V2PACKL_RRR_0_OPCODE_X1:
+ case V2PACKUC_RRR_0_OPCODE_X1:
+ case V2SHLSC_RRR_0_OPCODE_X1:
+ case V2SHL_RRR_0_OPCODE_X1:
+ case V2SHRS_RRR_0_OPCODE_X1:
+ case V2SHRU_RRR_0_OPCODE_X1:
+ case V2SUBSC_RRR_0_OPCODE_X1:
+ case V2SUB_RRR_0_OPCODE_X1:
+ case V4ADDSC_RRR_0_OPCODE_X1:
+ case V4ADD_RRR_0_OPCODE_X1:
+ case V4INT_H_RRR_0_OPCODE_X1:
+ case V4INT_L_RRR_0_OPCODE_X1:
+ case V4PACKSC_RRR_0_OPCODE_X1:
+ case V4SHLSC_RRR_0_OPCODE_X1:
+ case V4SHL_RRR_0_OPCODE_X1:
+ case V4SHRS_RRR_0_OPCODE_X1:
+ case V4SHRU_RRR_0_OPCODE_X1:
+ case V4SUBSC_RRR_0_OPCODE_X1:
+ case V4SUB_RRR_0_OPCODE_X1:
+ case XOR_RRR_0_OPCODE_X1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP rrr_0_opcode_x1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_shift_opcode_x1(struct DisasContext *dc,
tilegx_bundle_bits bundle)
{
+ switch (get_ShiftOpcodeExtension_X1(bundle)) {
+ case SHLXI_SHIFT_OPCODE_X1:
+ return;
+ case ROTLI_SHIFT_OPCODE_X1:
+ case SHLI_SHIFT_OPCODE_X1:
+ case SHRSI_SHIFT_OPCODE_X1:
+ case SHRUI_SHIFT_OPCODE_X1:
+ case SHRUXI_SHIFT_OPCODE_X1:
+ case V1SHLI_SHIFT_OPCODE_X1:
+ case V1SHRSI_SHIFT_OPCODE_X1:
+ case V1SHRUI_SHIFT_OPCODE_X1:
+ case V2SHLI_SHIFT_OPCODE_X1:
+ case V2SHRSI_SHIFT_OPCODE_X1:
+ case V2SHRUI_SHIFT_OPCODE_X1:
+ default:
+ break;
+ }
+ qemu_log_mask(LOG_UNIMP, "UNIMP shift_opcode_x1, [" FMT64X "]\n",
+ (uint64_t)bundle);
+ dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
}
static void decode_shl16insli_opcode_x1(struct DisasContext *dc,
For the instructions which need tcg generation, the decoding functions return directly, or they will direct to the exception. Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com> --- target-tilegx/translate.c | 704 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 704 insertions(+)