@@ -636,6 +636,7 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+ unsigned long intr_msr;
#endif
};
@@ -480,6 +480,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+ DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
- kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+ kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
}
/*
@@ -788,6 +788,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
spin_lock(&vc->lock);
/*
+ * If ILE (interrupt little-endian) has changed, update the
+ * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+ */
+ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.vcore != vc)
+ continue;
+ if (new_lpcr & LPCR_ILE)
+ vcpu->arch.intr_msr |= MSR_LE;
+ else
+ vcpu->arch.intr_msr &= ~MSR_LE;
+ }
+ mutex_unlock(&kvm->lock);
+ }
+
+ /*
* Userspace can only modify DPFD (default prefetch depth),
* ILE (interrupt little-endian) and TC (translation control).
* On POWER8 userspace can also modify AIL (alt. interrupt loc.)
@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
+ vcpu->arch.intr_msr = MSR_SF | MSR_ME;
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -815,8 +815,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
12: mtspr SPRN_SRR0, r10
mr r10,r0
mtspr SPRN_SRR1, r11
- li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11,r11,63
+ ld r11, VCPU_INTR_MSR(r4)
5:
/*
@@ -1554,8 +1553,7 @@ kvmppc_hdsi:
mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9)
@@ -1624,8 +1622,7 @@ kvmppc_hisi:
1: mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_INST_STORAGE
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
b fast_interrupt_c_return
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1668,8 +1665,7 @@ sc_1_fast_return:
mtspr SPRN_SRR0,r10
mtspr SPRN_SRR1,r11
li r10, BOOK3S_INTERRUPT_SYSCALL
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
mr r4,r9
b fast_guest_return
@@ -1997,8 +1993,7 @@ machine_check_realmode:
beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
b fast_interrupt_c_return
/*