From patchwork Fri Mar 27 03:29:56 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paul Mackerras X-Patchwork-Id: 455283 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 92525140079 for ; Fri, 27 Mar 2015 14:30:32 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752747AbbC0DaV (ORCPT ); Thu, 26 Mar 2015 23:30:21 -0400 Received: from ozlabs.org ([103.22.144.67]:52973 "EHLO ozlabs.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752748AbbC0DaS (ORCPT ); Thu, 26 Mar 2015 23:30:18 -0400 Received: from authenticated.ozlabs.org (localhost [127.0.0.1]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-SHA256 (128/128 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPSA id 1D9F21401DE; Fri, 27 Mar 2015 14:30:12 +1100 (AEDT) From: Paul Mackerras To: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org Cc: Alexander Graf Subject: [PATCH 12/12] KVM: PPC: Book3S HV: Use msgsnd for signalling threads on POWER8 Date: Fri, 27 Mar 2015 14:29:56 +1100 Message-Id: <1427426996-17651-13-git-send-email-paulus@samba.org> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1427426996-17651-1-git-send-email-paulus@samba.org> References: <1427426996-17651-1-git-send-email-paulus@samba.org> Sender: kvm-ppc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm-ppc@vger.kernel.org This uses msgsnd where possible for signalling other threads within the same core on POWER8 systems, rather than IPIs through the XICS interrupt controller. This includes waking secondary threads to run the guest, the interrupts generated by the virtual XICS, and the interrupts to bring the other threads out of the guest when exiting. Aggregated statistics from debugfs across vcpus for a guest with 32 vcpus, 8 threads/vcore, running on a POWER8, show this before the change: rm_entry: 3387.6ns (228 - 86600, 1008969 samples) rm_exit: 4561.5ns (12 - 3477452, 1009402 samples) rm_intr: 1660.0ns (12 - 553050, 3600051 samples) and this after the change: rm_entry: 3060.1ns (212 - 65138, 953873 samples) rm_exit: 4244.1ns (12 - 9693408, 954331 samples) rm_intr: 1342.3ns (12 - 1104718, 3405326 samples) for a test of booting Fedora 20 big-endian to the login prompt. The time taken for a H_PROD hcall (which is handled in the host kernel) went down from about 35 microseconds to about 16 microseconds with this change. The noinline added to kvmppc_run_core turned out to be necessary for good performance, at least with gcc 4.9.2 as packaged with Fedora 21 and a little-endian POWER8 host. Signed-off-by: Paul Mackerras --- Note that this patch depends on the patch "powerpc/powernv: Fixes for hypervisor doorbell handling", which is now upstream in Linus' tree as commit 755563bc79c7, for the definition of PPC_MSGCLR(). arch/powerpc/kernel/asm-offsets.c | 3 ++ arch/powerpc/kvm/book3s_hv.c | 51 ++++++++++++++++++++++----------- arch/powerpc/kvm/book3s_hv_builtin.c | 16 +++++++++-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 22 ++++++++++++-- 4 files changed, 70 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0d07efb..0034b6b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include #include @@ -759,5 +760,7 @@ int main(void) offsetof(struct paca_struct, subcore_sibling_mask)); #endif + DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); + return 0; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1426459..bb29e75 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -84,9 +85,35 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +static bool kvmppc_ipi_thread(int cpu) +{ + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + preempt_disable(); + if (cpu_first_thread_sibling(cpu) == + cpu_first_thread_sibling(smp_processor_id())) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + msg |= cpu_thread_in_core(cpu); + smp_mb(); + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + preempt_enable(); + return true; + } + preempt_enable(); + } + +#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) + if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { + xics_wake_cpu(cpu); + return true; + } +#endif + + return false; +} + static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { - int me; int cpu = vcpu->cpu; wait_queue_head_t *wqp; @@ -96,20 +123,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) ++vcpu->stat.halt_wakeup; } - me = get_cpu(); + if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) + return; /* CPU points to the first thread of the core */ - if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { -#ifdef CONFIG_PPC_ICP_NATIVE - int real_cpu = cpu + vcpu->arch.ptid; - if (paca[real_cpu].kvm_hstate.xics_phys) - xics_wake_cpu(real_cpu); - else -#endif - if (cpu_online(cpu)) - smp_send_reschedule(cpu); - } - put_cpu(); + if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) + smp_send_reschedule(cpu); } /* @@ -1781,10 +1800,8 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */ smp_wmb(); tpaca->kvm_hstate.kvm_vcpu = vcpu; -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu != smp_processor_id()) - xics_wake_cpu(cpu); -#endif + kvmppc_ipi_thread(cpu); } static void kvmppc_wait_for_nap(void) @@ -1933,7 +1950,7 @@ static void post_guest_process(struct kvmppc_vcore *vc) * Run a set of guest threads on a physical core. * Called with vc->lock held. */ -static void kvmppc_run_core(struct kvmppc_vcore *vc) +static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; int i; diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index c42aa55..ed2589d 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #define KVM_CMA_CHUNK_ORDER 18 @@ -193,7 +195,7 @@ static inline void rm_writeb(unsigned long paddr, u8 val) } /* - * Send an interrupt to another CPU. + * Send an interrupt or message to another CPU. * This can only be called in real mode. * The caller needs to include any barrier needed to order writes * to memory vs. the IPI/message. @@ -202,7 +204,17 @@ void kvmhv_rm_send_ipi(int cpu) { unsigned long xics_phys; - /* Poke the target */ + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + cpu_first_thread_sibling(cpu) == + cpu_first_thread_sibling(raw_smp_processor_id())) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + msg |= cpu_thread_in_core(cpu); + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + return; + } + + /* Else poke the target with an IPI */ xics_phys = paca[cpu].kvm_hstate.xics_phys; rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 13e7523..fb5b19c 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1122,6 +1122,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12,BOOK3S_INTERRUPT_SYSCALL beq hcall_try_real_mode + /* Hypervisor doorbell - exit only if host IPI flag set */ + cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL + bne 3f + lbz r0, HSTATE_HOST_IPI(r13) + beq 4f + b guest_exit_cont +3: /* External interrupt ? */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL bne+ guest_exit_cont @@ -1134,7 +1141,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) bgt guest_exit_cont /* Check if any CPU is heading out to the host, if so head out too */ - ld r5, HSTATE_KVM_VCORE(r13) +4: ld r5, HSTATE_KVM_VCORE(r13) lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 mr r4, r9 @@ -2147,7 +2154,7 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ /* * Take a nap until a decrementer or external or doobell interrupt * occurs, with PECE1 and PECE0 set in LPCR. - * On POWER8, if we are ceding, also set PECEDP. + * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. * Also clear the runlatch bit before napping. */ kvm_do_nap: @@ -2160,6 +2167,7 @@ kvm_do_nap: mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION + ori r5, r5, LPCR_PECEDH rlwimi r5, r3, 0, LPCR_PECEDP END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_LPCR,r5 @@ -2298,7 +2306,7 @@ machine_check_realmode: * Returns (in r3): * 0 if nothing needs to be done * 1 if something happened that needs to be handled by the host - * -1 if there was a guest wakeup (IPI) + * -1 if there was a guest wakeup (IPI or msgsnd) * * Also sets r12 to the interrupt vector for any interrupt that needs * to be handled now by the host (0x500 for external interrupt), or zero. @@ -2329,7 +2337,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* hypervisor doorbell */ 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL + /* see if it's a host IPI */ li r3, 1 + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bnelr + /* if not, clear it and return -1 */ + lis r6, (PPC_DBELL_SERVER << (63-36))@h + PPC_MSGCLR(6) + li r3, -1 blr /*