diff mbox

[6/6] KVM: MMU: Avoid handling same rmap_pde in kvm_handle_hva_range()

Message ID 20120621175238.705e6188.yoshikawa.takuya@oss.ntt.co.jp
State New, archived
Headers show

Commit Message

Takuya Yoshikawa June 21, 2012, 8:52 a.m. UTC
When we invalidate a THP page, we call the handler with the same
rmap_pde argument 512 times in the following loop:

  for each guest page in the range
    for each level
      unmap using rmap

This patch avoids these extra handler calls by changing the loop order
like this:

  for each level
    for each rmap in the range
      unmap using rmap

With the preceding patches in the patch series, this made THP page
invalidation more than 5 times faster on our x86 host: the host became
more responsive during swapping the guest's memory as a result.

Note: in the new code we could not use trace_kvm_age_page(), so we just
dropped the point from kvm_handle_hva_range().

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
 arch/x86/kvm/mmu.c |   39 ++++++++++++++++++++-------------------
 1 files changed, 20 insertions(+), 19 deletions(-)

Comments

Takuya Yoshikawa June 22, 2012, 2:33 p.m. UTC | #1
On Thu, 21 Jun 2012 17:52:38 +0900
Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> wrote:

...
> +			/* Handle the first one even if idx == idx_end. */
> +			do {
> +				ret |= handler(kvm, rmapp++, data);
> +			} while (++idx < idx_end);

This is unclear and... maybe wrong.


I will change this as follows:
---
  hva_start = max(start, memslot->userspace_addr);
  hva_end = min(end, memslot->userspace_addr +
                          (memslot->npages << PAGE_SHIFT));
  if (hva_start >= hva_end)
          continue;
  /*
   * { gfn(page) | page intersects with [hva_start, hva_end) }
   * = [gfn_start, gfn_end).
   */
  gfn_start = hva_to_gfn_memslot(hva_start, memslot);
  gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE-1, memslot);

  for (j = PT_PAGE_TABLE_LEVEL;
       j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
          unsigned long idx, idx_end;
          unsigned long *rmapp; 
          /*
           * { idx(page_j) | page_j intersects with [hva_start, hva_end) }
           * = [idx, idx_end].
           */
          idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
          idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);

          rmapp = __gfn_to_rmap(gfn_start, j, memslot);

          for (; idx <= idx_end; ++idx) {
                  ret |= handler(kvm, rmapp++, data);
          }
  }
---

This way, much clearly, we can handle exactly same rmaps as before.

I also need to change ppc code as well in patch 3.

Thanks,
	Takuya
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 306711a..462becb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1192,16 +1192,15 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 					       unsigned long data))
 {
 	int j;
-	int ret;
-	int retval = 0;
+	int ret = 0;
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
 
 	slots = kvm_memslots(kvm);
 
 	kvm_for_each_memslot(memslot, slots) {
-		gfn_t gfn;
 		unsigned long hva_start, hva_end;
+		gfn_t gfn_start, gfn_end;
 
 		hva_start = max(start, memslot->userspace_addr);
 		hva_end = min(end, memslot->userspace_addr +
@@ -1209,25 +1208,27 @@  static int kvm_handle_hva_range(struct kvm *kvm,
 		if (hva_start >= hva_end)
 			continue;
 
-		for (gfn = hva_to_gfn_memslot(hva_start, memslot);
-		     gfn < hva_to_gfn_memslot(hva_end, memslot); gfn++) {
-			ret = 0;
+		gfn_start = hva_to_gfn_memslot(hva_start, memslot);
+		gfn_end = hva_to_gfn_memslot(hva_end, memslot);
 
-			for (j = PT_PAGE_TABLE_LEVEL;
-			     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
-				unsigned long *rmapp;
+		for (j = PT_PAGE_TABLE_LEVEL;
+		     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
+			unsigned long idx, idx_end;
+			unsigned long *rmapp;
 
-				rmapp = __gfn_to_rmap(gfn, j, memslot);
-				ret |= handler(kvm, rmapp, data);
-			}
-			trace_kvm_age_page(memslot->userspace_addr +
-					(gfn - memslot->base_gfn) * PAGE_SIZE,
-					memslot, ret);
-			retval |= ret;
+			idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
+			idx_end = gfn_to_index(gfn_end, memslot->base_gfn, j);
+
+			rmapp = __gfn_to_rmap(gfn_start, j, memslot);
+
+			/* Handle the first one even if idx == idx_end. */
+			do {
+				ret |= handler(kvm, rmapp++, data);
+			} while (++idx < idx_end);
 		}
 	}
 
-	return retval;
+	return ret;
 }
 
 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,