@@ -1128,6 +1128,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long *rmapp;
struct kvm_vcpu *vcpu;
+ mutex_lock(&kvm->arch.resize_hpt_mutex); /* exclude a concurrent HPT resize */
preempt_disable();
rmapp = memslot->arch.rmap;
for (i = 0; i < memslot->npages; ++i) {
@@ -1152,6 +1153,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
spin_unlock(&vcpu->arch.vpa_update_lock);
}
preempt_enable();
+ mutex_unlock(&kvm->arch.resize_hpt_mutex);
return 0;
}
While there is an active HPT resize in progress, working out which guest pages are dirty is rather more complicated, because depending on exactly the phase of the resize the information could be in either the current, tentative or previous HPT or reverse map of the guest. To avoid this problem, for now we just exclude collecting the dirty map while a resize is in progress, blocking the dirty map operation until the resize is complete. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 ++ 1 file changed, 2 insertions(+)