diff mbox

[2/4] KVM: Introduce hva_to_gfn() for kvm_handle_hva()

Message ID 20120615203144.2cbcd88f.yoshikawa.takuya@oss.ntt.co.jp
State New, archived
Headers show

Commit Message

Takuya Yoshikawa June 15, 2012, 11:31 a.m. UTC
This restricts hva handling in mmu code and makes it easier to extend
kvm_handle_hva() so that it can treat a range of addresses later in this
patch series.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Cc: Alexander Graf <agraf@suse.de>
Cc: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/kvm/book3s_64_mmu_hv.c |   12 +++++-------
 arch/x86/kvm/mmu.c                  |   10 +++-------
 include/linux/kvm_host.h            |    7 +++++++
 3 files changed, 15 insertions(+), 14 deletions(-)

Comments

Takuya Yoshikawa June 15, 2012, 9:49 p.m. UTC | #1
On Fri, 15 Jun 2012 20:31:44 +0900
Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> wrote:

...

> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> index d03eb6f..53716dd 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> @@ -767,15 +767,13 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
>  
>  	slots = kvm_memslots(kvm);
>  	kvm_for_each_memslot(memslot, slots) {
> -		unsigned long start = memslot->userspace_addr;
> -		unsigned long end;
> +		gfn_t gfn = hva_to_gfn(hva, memslot);
>  
> -		end = start + (memslot->npages << PAGE_SHIFT);
> -		if (hva >= start && hva < end) {
> -			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
> +		if (gfn >= memslot->base_gfn &&
> +		    gfn < memslot->base_gfn + memslot->npages) {

Here
...

> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index a2f3969..ba57b3b 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1199,14 +1199,10 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
>  	slots = kvm_memslots(kvm);
>  
>  	kvm_for_each_memslot(memslot, slots) {
> -		unsigned long start = memslot->userspace_addr;
> -		unsigned long end;
> -
> -		end = start + (memslot->npages << PAGE_SHIFT);
> -		if (hva >= start && hva < end) {
> -			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
> -			gfn_t gfn = memslot->base_gfn + gfn_offset;
> +		gfn_t gfn = hva_to_gfn(hva, memslot);
>  
> +		if (gfn >= memslot->base_gfn &&
> +		    gfn < memslot->base_gfn + memslot->npages) {

and here
...

> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 27ac8a4..92b2029 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -740,6 +740,13 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
>  		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
>  }
>  
> +static inline gfn_t hva_to_gfn(unsigned long hva, struct kvm_memory_slot *slot)
> +{
> +	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
> +
> +	return slot->base_gfn + gfn_offset;
> +}

Something wrong may happen when hva < slot->userspace_addr.

I will fix this after I get some feedback for other parts.

	Takuya
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 18, 2012, 11:59 a.m. UTC | #2
On 06/15/2012 02:31 PM, Takuya Yoshikawa wrote:
> This restricts hva handling in mmu code and makes it easier to extend
> kvm_handle_hva() so that it can treat a range of addresses later in this
> patch series.
> 
> 
>  
>  	kvm_for_each_memslot(memslot, slots) {
> -		unsigned long start = memslot->userspace_addr;
> -		unsigned long end;
> -
> -		end = start + (memslot->npages << PAGE_SHIFT);
> -		if (hva >= start && hva < end) {
> -			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
> -			gfn_t gfn = memslot->base_gfn + gfn_offset;
> +		gfn_t gfn = hva_to_gfn(hva, memslot);
>  
> +		if (gfn >= memslot->base_gfn &&
> +		    gfn < memslot->base_gfn + memslot->npages) {

First you convert it, then you check if the conversion worked?  Let's
make is a straightforward check-then-convert (or check-and-convert).

>  			ret = 0;
>  
>  			for (j = PT_PAGE_TABLE_LEVEL;
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 27ac8a4..92b2029 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -740,6 +740,13 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
>  		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
>  }
>  
> +static inline gfn_t hva_to_gfn(unsigned long hva, struct kvm_memory_slot *slot)
> +{
> +	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
> +
> +	return slot->base_gfn + gfn_offset;
> +}

Should be named hva_to_gfn_memslot(), like the below, to emphasise it
isn't generic.

> +
>  static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
>  					       gfn_t gfn)
>  {
>
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d03eb6f..53716dd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -767,15 +767,13 @@  static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
 
 	slots = kvm_memslots(kvm);
 	kvm_for_each_memslot(memslot, slots) {
-		unsigned long start = memslot->userspace_addr;
-		unsigned long end;
+		gfn_t gfn = hva_to_gfn(hva, memslot);
 
-		end = start + (memslot->npages << PAGE_SHIFT);
-		if (hva >= start && hva < end) {
-			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+		if (gfn >= memslot->base_gfn &&
+		    gfn < memslot->base_gfn + memslot->npages) {
+			gfn_t gfn_offset = gfn - memslot->base_gfn;
 
-			ret = handler(kvm, &memslot->rmap[gfn_offset],
-				      memslot->base_gfn + gfn_offset);
+			ret = handler(kvm, &memslot->rmap[gfn_offset], gfn);
 			retval |= ret;
 		}
 	}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a2f3969..ba57b3b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1199,14 +1199,10 @@  static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
 	slots = kvm_memslots(kvm);
 
 	kvm_for_each_memslot(memslot, slots) {
-		unsigned long start = memslot->userspace_addr;
-		unsigned long end;
-
-		end = start + (memslot->npages << PAGE_SHIFT);
-		if (hva >= start && hva < end) {
-			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
-			gfn_t gfn = memslot->base_gfn + gfn_offset;
+		gfn_t gfn = hva_to_gfn(hva, memslot);
 
+		if (gfn >= memslot->base_gfn &&
+		    gfn < memslot->base_gfn + memslot->npages) {
 			ret = 0;
 
 			for (j = PT_PAGE_TABLE_LEVEL;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 27ac8a4..92b2029 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -740,6 +740,13 @@  static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 }
 
+static inline gfn_t hva_to_gfn(unsigned long hva, struct kvm_memory_slot *slot)
+{
+	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
+
+	return slot->base_gfn + gfn_offset;
+}
+
 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
 					       gfn_t gfn)
 {