@@ -615,6 +615,18 @@ int cpu_exec(CPUArchState *env)
cc->do_interrupt(cpu);
next_tb = 0;
}
+ if (interrupt_request & CPU_INTERRUPT_VIRQ
+ && arm_excp_unmasked(cpu, EXCP_VIRQ)) {
+ cpu->exception_index = EXCP_VIRQ;
+ cc->do_interrupt(cpu);
+ next_tb = 0;
+ }
+ if (interrupt_request & CPU_INTERRUPT_VFIQ
+ && arm_excp_unmasked(cpu, EXCP_VFIQ)) {
+ cpu->exception_index = EXCP_VFIQ;
+ cc->do_interrupt(cpu);
+ next_tb = 0;
+ }
#elif defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HARD
&& !(env->uncached_asr & ASR_I)) {
@@ -179,20 +179,22 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
ARMCPU *cpu = opaque;
CPUState *cs = CPU(cpu);
+ static const int mask[] = {
+ [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
+ [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
+ [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ };
switch (irq) {
case ARM_CPU_IRQ:
- if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
- break;
case ARM_CPU_FIQ:
+ case ARM_CPU_VIRQ:
+ case ARM_CPU_VFIQ:
if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_FIQ);
+ cpu_interrupt(cs, mask[irq]);
} else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_FIQ);
+ cpu_reset_interrupt(cs, mask[irq]);
}
break;
default:
@@ -242,9 +244,12 @@ static void arm_cpu_initfn(Object *obj)
#ifndef CONFIG_USER_ONLY
/* Our inbound IRQ and FIQ lines */
if (kvm_enabled()) {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 2);
+ /* VIRQ and VFIQ are unused with KVM but we add them to maintain
+ * the same interface as non-KVM CPUs.
+ */
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
} else {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2);
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
}
cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
@@ -53,6 +53,8 @@
#define EXCP_STREX 10
#define EXCP_HVC 11 /* HyperVisor Call */
#define EXCP_SMC 12 /* Secure Monitor Call */
+#define EXCP_VIRQ 13
+#define EXCP_VFIQ 14
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -67,6 +69,8 @@
/* ARM-specific interrupt pending bits. */
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
@@ -82,9 +86,12 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* Meanings of the ARMCPU object's two inbound GPIO lines */
+/* Meanings of the ARMCPU object's four inbound GPIO lines */
#define ARM_CPU_IRQ 0
#define ARM_CPU_FIQ 1
+#define ARM_CPU_VIRQ 2
+#define ARM_CPU_VFIQ 3
+
typedef void ARMWriteCPFunc(void *opaque, int cp_info,
int srcreg, int operand, uint32_t value);
@@ -1159,6 +1166,18 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
* EL2 if we are in NS EL0/1.
*/
bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
+ /* ARMv7-M interrupt return works by loading a magic value
+ * into the PC. On real hardware the load causes the
+ * return to occur. The qemu implementation performs the
+ * jump normally, then does the exception return when the
+ * CPU tries to execute code at the magic address.
+ * This will cause the magic PC value to be pushed to
+ * the stack if an interrupt occurred at the wrong time.
+ * We avoid this by disabling interrupts when
+ * pc contains a magic address.
+ */
+ bool irq_unmasked = ((IS_M(env) && env->regs[15] < 0xfffffff0)
+ || !(env->daif & PSTATE_I));
/* Don't take exceptions if they target a lower EL. */
if (cur_el > target_el) {
@@ -1175,8 +1194,19 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
return true;
}
- return ((IS_M(env) && env->regs[15] < 0xfffffff0)
- || !(env->daif & PSTATE_I));
+ return irq_unmasked;
+ case EXCP_VFIQ:
+ if (!secure && !(env->cp15.hcr_el2 & HCR_FMO)) {
+ /* VFIQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_F);
+ case EXCP_VIRQ:
+ if (!secure && !(env->cp15.hcr_el2 & HCR_IMO)) {
+ /* VIRQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return irq_unmasked;
default:
g_assert_not_reached();
}
@@ -481,9 +481,11 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
env->cp15.esr_el[new_el] = env->exception.syndrome;
break;
case EXCP_IRQ:
+ case EXCP_VIRQ:
addr += 0x80;
break;
case EXCP_FIQ:
+ case EXCP_VFIQ:
addr += 0x100;
break;
default:
@@ -3353,6 +3353,10 @@ unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
}
break;
}
+ case EXCP_VIRQ:
+ case EXCP_VFIQ:
+ target_el = 1;
+ break;
}
return target_el;
}
@@ -55,6 +55,8 @@ static const char * const excnames[] = {
[EXCP_STREX] = "QEMU intercept of STREX",
[EXCP_HVC] = "Hypervisor Call",
[EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
};
static inline void arm_log_exception(int idx)