@@ -54,10 +54,7 @@ extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_default_endian);
-extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rt, unsigned int bytes,
- int is_default_endian);
+ int is_default_endian, int sign_extend);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
@@ -129,24 +129,6 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case 31:
switch (get_xop(inst)) {
- case OP_31_XOP_MFMSR:
- kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
- break;
- case OP_31_XOP_MTMSRD:
- {
- ulong rs_val = kvmppc_get_gpr(vcpu, rs);
- if (inst & 0x10000) {
- ulong new_msr = kvmppc_get_msr(vcpu);
- new_msr &= ~(MSR_RI | MSR_EE);
- new_msr |= rs_val & (MSR_RI | MSR_EE);
- kvmppc_set_msr_fast(vcpu, new_msr);
- } else
- kvmppc_set_msr(vcpu, rs_val);
- break;
- }
- case OP_31_XOP_MTMSR:
- kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
- break;
case OP_31_XOP_MFSR:
{
int srnum;
@@ -189,6 +171,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
+ case OP_31_XOP_TLBSYNC:
+ break;
#ifdef CONFIG_PPC_BOOK3S_64
case OP_31_XOP_FAKE_SC1:
{
@@ -217,8 +201,6 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
#endif
- case OP_31_XOP_EIOIO:
- break;
case OP_31_XOP_SLBMTE:
if (!vcpu->arch.mmu.slbmte)
return EMULATE_FAIL;
@@ -200,7 +200,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
- len, 1);
+ len, 1, 0);
goto done_load;
}
@@ -291,12 +291,12 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
- 4, 1);
+ 4, 1, 0);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
- 8, 1);
+ 8, 1, 0);
goto done_load;
}
@@ -74,11 +74,6 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
- case OP_31_XOP_MFMSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
- kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
- break;
-
case OP_31_XOP_MTMSR:
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
@@ -96,6 +91,9 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
+ case OP_31_XOP_TLBSYNC:
+ break;
+
default:
emulated = EMULATE_FAIL;
}
@@ -31,6 +31,7 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
+#include <asm/sstep.h>
#include "timing.h"
#include "trace.h"
@@ -90,10 +91,9 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
return vcpu->arch.dec - jd;
}
-static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
enum emulation_result emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
@@ -207,255 +207,135 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
return emulated;
}
+static ulong maybe_truncate(struct kvm_vcpu *vcpu, ulong addr)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_64BIT))
+ addr &= 0xffffffffUL;
+ return addr;
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+static enum emulation_result deliver_interrupt(struct kvm_vcpu *vcpu,
+ struct instruction_op *op)
+{
+ ulong vec = op->type & ~INSTR_TYPE_MASK;
+
+ if (vec == BOOK3S_INTERRUPT_PROGRAM)
+ kvmppc_core_queue_program(vcpu, op->val);
+ else
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ return EMULATE_DONE;
+}
+#else
+static enum emulation_result deliver_interrupt(struct kvm_vcpu *vcpu,
+ struct instruction_op *op)
+{
+ ulong vec = op->type & ~INSTR_TYPE_MASK;
+ ulong esr = 0;
+
+ switch (vec) {
+ case BOOK3S_INTERRUPT_PROGRAM:
+ if (srr1 == SRR1_PROGTRAP)
+ esr = ESR_PTR;
+ else if (srr1 == SRR1_PROGPRIV)
+ esr = ESR_PPR;
+ else
+ esr = 0;
+ kvmppc_core_queue_program(vcpu, esr);
+ break;
+ case BOOK3S_INTERRUPT_FP_UNAVAIL:
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+ break;
+ default:
+ return EMULATE_FAIL;
+ }
+ return EMULATE_DONE;
+}
+#endif /* CONFIG_PPC_BOOK3S */
+
/* XXX to do:
- * lhax
- * lhaux
* lswx
* lswi
* stswx
* stswi
- * lha
- * lhau
* lmw
* stmw
*
*/
-/* XXX Should probably auto-generate instruction decoding for a particular core
- * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = kvmppc_get_last_inst(vcpu);
- int ra = get_ra(inst);
- int rs = get_rs(inst);
- int rt = get_rt(inst);
- int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
+ ulong pc;
+ ulong val;
+ struct instruction_op op;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
- switch (get_op(inst)) {
- case OP_TRAP:
-#ifdef CONFIG_PPC_BOOK3S
- case OP_TRAP_64:
- kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
-#else
- kvmppc_core_queue_program(vcpu,
- vcpu->arch.shared->esr | ESR_PTR);
-#endif
- advance = 0;
- break;
-
- case 31:
- switch (get_xop(inst)) {
-
- case OP_31_XOP_TRAP:
-#ifdef CONFIG_64BIT
- case OP_31_XOP_TRAP_64:
-#endif
-#ifdef CONFIG_PPC_BOOK3S
- kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
-#else
- kvmppc_core_queue_program(vcpu,
- vcpu->arch.shared->esr | ESR_PTR);
-#endif
- advance = 0;
- break;
- case OP_31_XOP_LWZX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- break;
-
- case OP_31_XOP_LBZX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- break;
-
- case OP_31_XOP_LBZUX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_31_XOP_STWX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 1);
- break;
-
- case OP_31_XOP_STBX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
- break;
-
- case OP_31_XOP_STBUX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_31_XOP_LHAX:
- emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
- break;
-
- case OP_31_XOP_LHZX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- break;
-
- case OP_31_XOP_LHZUX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_31_XOP_MFSPR:
- emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
- break;
-
- case OP_31_XOP_STHX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
- break;
-
- case OP_31_XOP_STHUX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_31_XOP_MTSPR:
- emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
- break;
-
- case OP_31_XOP_DCBST:
- case OP_31_XOP_DCBF:
- case OP_31_XOP_DCBI:
- /* Do nothing. The guest is performing dcbi because
- * hardware DMA is not snooped by the dcache, but
- * emulated DMA either goes through the dcache as
- * normal writes, or the host kernel has handled dcache
- * coherence. */
- break;
-
- case OP_31_XOP_LWBRX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
- break;
-
- case OP_31_XOP_TLBSYNC:
- break;
-
- case OP_31_XOP_STWBRX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 0);
- break;
-
- case OP_31_XOP_LHBRX:
- emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
- break;
-
- case OP_31_XOP_STHBRX:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 0);
- break;
-
- default:
- /* Attempt core-specific emulation below. */
- emulated = EMULATE_FAIL;
- }
- break;
-
- case OP_LWZ:
- emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- break;
-
- /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
- case OP_LD:
- rt = get_rt(inst);
- emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
- break;
-
- case OP_LWZU:
- emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_LBZ:
- emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- break;
-
- case OP_LBZU:
- emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
-
- case OP_STW:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 1);
- break;
-
- /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
- case OP_STD:
- rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 8, 1);
- break;
-
- case OP_STWU:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
- break;
+ pc = kvmppc_get_pc(vcpu);
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ if (analyse_instr(&op, &vcpu->arch.regs, inst)) {
+ /* Instruction has been executed by updating vcpu->arch.regs */
+ advance = 0; /* already advanced */
+ goto out;
+ }
- case OP_STB:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
+ switch (op.type & INSTR_TYPE_MASK) {
+ case INTERRUPT:
+ emulated = deliver_interrupt(vcpu, &op);
+ advance = 0;
break;
- case OP_STBU:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ case LOAD:
+ /* address already in vcpu->arch.paddr_accessed */
+ emulated = kvmppc_handle_load(run, vcpu, op.reg,
+ GETSIZE(op.type),
+ !(op.type & BYTEREV),
+ !!(op.type & SIGNEXT));
+ if (op.type & UPDATE)
+ kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
break;
- case OP_LHZ:
- emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ case STORE:
+ /* address already in vcpu->arch.paddr_accessed */
+ emulated = kvmppc_handle_store(run, vcpu, op.val,
+ GETSIZE(op.type), 1);
+ if (op.type & UPDATE)
+ kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
break;
- case OP_LHZU:
- emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ case MFSPR:
+ emulated = kvmppc_emulate_mfspr(vcpu, op.spr, op.reg);
break;
- case OP_LHA:
- emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ case MTSPR:
+ emulated = kvmppc_emulate_mtspr(vcpu, op.spr, op.val);
break;
- case OP_LHAU:
- emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ case MFMSR:
+ kvmppc_set_gpr(vcpu, op.reg, kvmppc_get_msr(vcpu));
+ kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
- case OP_STH:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
+#ifdef CONFIG_PPC_BOOK3S
+ case MTMSR:
+ val = kvmppc_get_gpr(vcpu, op.reg);
+ val = (val & op.val) | (kvmppc_get_msr(vcpu) & ~op.val);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
+ kvmppc_set_msr(vcpu, val);
break;
+#endif
- case OP_STHU:
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ case CACHEOP:
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+ * emulated DMA either goes through the dcache as
+ * normal writes, or the host kernel has handled dcache
+ * coherence. */
break;
default:
@@ -475,11 +355,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
- trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
+ out:
+ trace_kvm_ppc_instr(inst, pc, emulated);
/* Advance past emulated instruction. */
if (advance)
- kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ kvmppc_set_pc(vcpu, maybe_truncate(vcpu, pc + 4));
return emulated;
}
@@ -729,7 +729,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_default_endian)
+ int is_default_endian, int sign_extend)
{
int idx, ret;
int is_bigendian;
@@ -755,7 +755,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmio_is_bigendian = is_bigendian;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
- vcpu->arch.mmio_sign_extend = 0;
+ vcpu->arch.mmio_sign_extend = sign_extend;
idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -774,19 +774,6 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
-/* Same as above, but sign extends */
-int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rt, unsigned int bytes,
- int is_default_endian)
-{
- int r;
-
- vcpu->arch.mmio_sign_extend = 1;
- r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
-
- return r;
-}
-
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
This changes kvmppc_emulate_instruction() to use the common instruction decoding code from arch/powerpc/lib/sstep.c. This expands the set of instructions that we recognize to include all of the integer load and store instructions except for the string (lsw*, stsw*) and multiple (lmw, stmw) instructions and reduces the total amount of code. This removes kvmppc_handle_loads() and instead adds a 'sign_extend' parameter to kvmppc_handle_load(). (In fact kvmppc_handle_loads() could not have worked previously; it sets vcpu->arch.mmio_sign_extend to 1 before calling kvmppc_handle_load, which sets it back to 0.) The instruction emulation for specific CPU flavours is largely unchanged, except that emulation of mfmsr, eieio, and (for book 3S) mtmsr[d] has moved into the generic code, and tlbsync emulation into the CPU-specific code. At this point the code still assumes that the instruction caused the most recent guest exit, and that if the instruction is a load or store, it must have been an emulated MMIO access and vcpu->arch.paddr_accessed already contains the guest physical address being accessed. Signed-off-by: Paul Mackerras <paulus@samba.org> --- arch/powerpc/include/asm/kvm_ppc.h | 5 +- arch/powerpc/kvm/book3s_emulate.c | 22 +-- arch/powerpc/kvm/book3s_paired_singles.c | 6 +- arch/powerpc/kvm/booke_emulate.c | 8 +- arch/powerpc/kvm/emulate.c | 317 ++++++++++--------------------- arch/powerpc/kvm/powerpc.c | 17 +- 6 files changed, 110 insertions(+), 265 deletions(-)