From patchwork Wed Nov 16 22:58:39 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paul Mackerras X-Patchwork-Id: 126067 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id ABA41B72B0 for ; Thu, 17 Nov 2011 10:58:51 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754785Ab1KPX6u (ORCPT ); Wed, 16 Nov 2011 18:58:50 -0500 Received: from ozlabs.org ([203.10.76.45]:42654 "EHLO ozlabs.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753955Ab1KPX6u (ORCPT ); Wed, 16 Nov 2011 18:58:50 -0500 Received: by ozlabs.org (Postfix, from userid 1003) id 0E650B71C6; Thu, 17 Nov 2011 10:58:49 +1100 (EST) Date: Thu, 17 Nov 2011 09:58:39 +1100 From: Paul Mackerras To: kvm-ppc@vger.kernel.org Cc: Alexander Graf , linuxppc-dev@ozlabs.org Subject: [PATCH 04/11] KVM: PPC: Remove io_slot_pfn array Message-ID: <20111116225839.GE26985@bloggs.ozlabs.ibm.com> References: <20111116225055.GA26985@bloggs.ozlabs.ibm.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20111116225055.GA26985@bloggs.ozlabs.ibm.com> User-Agent: Mutt/1.5.21 (2010-09-15) Sender: kvm-ppc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm-ppc@vger.kernel.org This stores the PFNs for I/O mappings in the slot->rmap array, as is now done for system RAM. This simplifies the h_enter code and allows us to remove the io_slot_pfn array. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 2 -- arch/powerpc/kvm/book3s_hv.c | 16 +++++++++------- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 22 +++++++++++----------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 52fd741..e0751e5 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -188,8 +188,6 @@ struct kvm_arch { struct list_head spapr_tce_tables; unsigned short last_vcpu[NR_CPUS]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; - unsigned long io_slot_pfn[KVM_MEMORY_SLOTS + - KVM_PRIVATE_MEM_SLOTS]; #endif /* CONFIG_KVM_BOOK3S_64_HV */ }; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index bc512ef..48a0648 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1078,7 +1078,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvmppc_rma_info *ri = NULL; struct vm_area_struct *vma; struct page *page; - unsigned long hva; + unsigned long hva, pfn; unsigned long lpcr; /* @@ -1092,6 +1092,8 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, if (!vma || vma->vm_start > mem->userspace_addr) goto err_unlock; + npages = mem->memory_size >> PAGE_SHIFT; + /* For now require the memory to be in one vma */ if (mem->userspace_addr + mem->memory_size > vma->vm_end) { pr_err("not one vma %llx > %lx\n", @@ -1114,15 +1116,16 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, if ((vma->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) goto err_unlock; + up_read(¤t->mm->mmap_sem); /* - * Tag the memslot with a private flag and store the pfn - * in a separate array for use by H_ENTER + * Tag the memslot with a private flag and store the pfns + * in the rmap array. */ memslot->flags |= KVM_MEMSLOT_IO; - kvm->arch.io_slot_pfn[memslot->id] = - vma->vm_pgoff + (offset >> PAGE_SHIFT); - up_read(¤t->mm->mmap_sem); + pfn = vma->vm_pgoff + (offset >> PAGE_SHIFT); + for (i = 0; i < npages; ++i) + memslot->rmap[i] = pfn++; return 0; } @@ -1218,7 +1221,6 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); } - npages = mem->memory_size >> PAGE_SHIFT; for (i = 0; i < npages; ++i) { hva = mem->userspace_addr + (i << PAGE_SHIFT); page = hva_to_page(hva); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index b82da85..5a84791 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -121,12 +121,6 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* Check for MMIO pass-through */ if (memslot->flags & KVM_MEMSLOT_IO) { - /* check if the start pfn has page size alignment */ - pa = kvm->arch.io_slot_pfn[memslot->id] << PAGE_SHIFT; - pa += gpa - (memslot->base_gfn << PAGE_SHIFT); - if (pa & (psize - 1)) - return H_PARAMETER; - /* Check WIMG */ if ((ptel & HPTE_R_WIMG) != (HPTE_R_I) && (ptel & HPTE_R_WIMG) != (HPTE_R_I | HPTE_R_G)) @@ -135,17 +129,23 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* System RAM */ if (porder > kvm->arch.ram_porder) return H_PARAMETER; - rmap_entry = &memslot->rmap[gfn - memslot->base_gfn]; - rmap_entry = real_vmalloc_addr(rmap_entry); - pa = *rmap_entry << PAGE_SHIFT; - if (!pa) - return H_PARAMETER; /* Check WIMG */ if ((ptel & HPTE_R_WIMG) != HPTE_R_M && (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M)) return H_PARAMETER; } + rmap_entry = &memslot->rmap[gfn - memslot->base_gfn]; + rmap_entry = real_vmalloc_addr(rmap_entry); + if (!rmap_entry) + return H_PARAMETER; + pa = *rmap_entry << PAGE_SHIFT; + if (!pa) + return H_PARAMETER; + + /* check if the start pfn has page size alignment */ + if (pa & (psize - 1)) + return H_PARAMETER; ptel &= ~(HPTE_R_PP0 - psize); ptel |= pa;