@@ -288,6 +288,12 @@ int cpu_exec(CPUArchState *env)
}
#endif
#if defined(TARGET_I386)
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(env->apic_state);
+ }
+#endif
if (interrupt_request & CPU_INTERRUPT_INIT) {
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
0);
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
+#include "qemu-thread.h"
#include "apic_internal.h"
#include "apic.h"
#include "ioapic.h"
@@ -361,7 +362,9 @@ static void apic_update_irq(APICCommonState *s)
if (!(s->spurious_vec & APIC_SV_ENABLE)) {
return;
}
- if (apic_irq_pending(s) > 0) {
+ if (!qemu_cpu_is_self(s->cpu_env)) {
+ cpu_interrupt(s->cpu_env, CPU_INTERRUPT_POLL);
+ } else if (apic_irq_pending(s) > 0) {
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
}
}
@@ -20,6 +20,7 @@ void apic_init_reset(DeviceState *s);
void apic_sipi(DeviceState *s);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access);
+void apic_poll_irq(DeviceState *d);
/* pc.c */
int cpu_is_bsp(CPUX86State *env);
@@ -141,7 +141,6 @@ void apic_report_irq_delivered(int delivered);
bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_tpr_access_reporting(DeviceState *d, bool enable);
void apic_enable_vapic(DeviceState *d, target_phys_addr_t paddr);
-void apic_poll_irq(DeviceState *d);
void vapic_report_tpr_access(DeviceState *dev, void *cpu, target_ulong ip,
TPRAccess access);
@@ -477,6 +477,7 @@
for syscall instruction */
/* i386-specific interrupt pending bits. */
+#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
@@ -1047,7 +1048,8 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
static inline bool cpu_has_work(CPUX86State *env)
{
- return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
@@ -1727,6 +1727,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
return 0;
}
+ if (env->interrupt_request & CPU_INTERRUPT_POLL) {
+ env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(env->apic_state);
+ }
if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & CPU_INTERRUPT_NMI)) {
KVM performs TPR raising asynchronously to QEMU, specifically outside QEMU's global lock. When an interrupt is injected into the APIC and TPR is checked to decide if this can be delivered, a stale TPR value may be used, causing spurious interrupts in the end. Fix this by deferring apic_update_irq to the context of the target VCPU. We introduce a new interrupt flag for this, CPU_INTERRUPT_POLL. When it is set, the VCPU calls apic_poll_irq before checking for further pending interrupts. To avoid special-casing KVM, we also implement this logic for TCG mode. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> --- cpu-exec.c | 6 ++++++ hw/apic.c | 5 ++++- hw/apic.h | 1 + hw/apic_internal.h | 1 - target-i386/cpu.h | 4 +++- target-i386/kvm.c | 4 ++++ 6 files changed, 18 insertions(+), 3 deletions(-)