@@ -26,6 +26,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_INCOHERENT_MEM
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -486,6 +486,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return ret;
}
+ kvm_flush_incoherent_memory_regions(vcpu->kvm);
+
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -556,6 +558,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = handle_exit(vcpu, run, ret);
}
+ kvm_invalidate_incoherent_memory_regions(vcpu->kvm);
+
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
return ret;
@@ -1161,6 +1161,24 @@ static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
__coherent_cache_guest_page(pfn, size, need_flush, false);
}
+static void coherent_cache_memslot(struct kvm_memory_slot *slot, bool flush)
+{
+ gfn_t gfn, end = slot->base_gfn + slot->npages;
+ pfn_t pfn;
+
+ for (gfn = slot->base_gfn; gfn < end; ++gfn) {
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_pfn(pfn)) {
+ pr_err("%s: Bad pfn: gfn=%llx, pfn=%llx, "
+ "userspace_addr=%lx\n", __func__,
+ gfn, pfn, slot->userspace_addr);
+ continue;
+ }
+ __coherent_cache_guest_page(pfn, PAGE_SIZE, flush, !flush);
+ kvm_release_pfn_clean(pfn);
+ }
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -1802,6 +1820,42 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return 0;
}
+void kvm_arch_flush_incoherent(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ if (slot->flags & KVM_MEM_READONLY) {
+ /*
+ * Readonly memory shouldn't be changing, and we do a
+ * clean+invalidate for KVM_MEM_INCOHERENT memory when
+ * faulting it in. So, there's nothing to do now.
+ */
+ return;
+ }
+
+ /*
+ * Ideally, we would further filter out all pages not touched by
+ * userspace on the last exit. No way to know those though, unless
+ * we force userspace to fault on all pages in the incoherent
+ * memory regions, but even then, I don't see any sane way for
+ * do_wp_page to handle the faults without modification. So, sigh...
+ */
+
+ coherent_cache_memslot(slot, true);
+}
+
+void kvm_arch_invalidate_incoherent(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ if (slot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ /*
+ * We fault each write when logging is enabled, and do a
+ * clean+invalidate on KVM_MEM_INCOHERENT memory while
+ * handling the fault. So, there's nothing to do now.
+ */
+ return;
+ }
+
+ coherent_cache_memslot(slot, false);
+}
+
void kvm_arch_memslots_updated(struct kvm *kvm)
{
}
@@ -38,6 +38,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_INCOHERENT_MEM
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
Add the kvm_*_incoherent_memory_regions calls to arm's kvm_arch_vcpu_ioctl_run and implement the corresponding arch flush/invalidate functions. Signed-off-by: Andrew Jones <drjones@redhat.com> --- arch/arm/include/uapi/asm/kvm.h | 1 + arch/arm/kvm/arm.c | 4 +++ arch/arm/kvm/mmu.c | 54 +++++++++++++++++++++++++++++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 1 + 4 files changed, 60 insertions(+)