From patchwork Wed Jul 3 12:42:37 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mihai Caraman X-Patchwork-Id: 256612 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 30C942C0401 for ; Wed, 3 Jul 2013 22:43:41 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756075Ab3GCMnk (ORCPT ); Wed, 3 Jul 2013 08:43:40 -0400 Received: from co1ehsobe004.messaging.microsoft.com ([216.32.180.187]:42042 "EHLO co1outboundpool.messaging.microsoft.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754019Ab3GCMnj (ORCPT ); Wed, 3 Jul 2013 08:43:39 -0400 Received: from mail1-co1-R.bigfish.com (10.243.78.225) by CO1EHSOBE011.bigfish.com (10.243.66.74) with Microsoft SMTP Server id 14.1.225.22; Wed, 3 Jul 2013 12:43:39 +0000 Received: from mail1-co1 (localhost [127.0.0.1]) by mail1-co1-R.bigfish.com (Postfix) with ESMTP id DF6AB160401; Wed, 3 Jul 2013 12:43:38 +0000 (UTC) X-Forefront-Antispam-Report: CIP:70.37.183.190; KIP:(null); UIP:(null); IPV:NLI; H:mail.freescale.net; RD:none; EFVD:NLI X-SpamScore: 14 X-BigFish: VS14(z52aescb8kzzz1f42h1ee6h1de0h1fdah2073h1202h1e76h1d1ah1d2ah1fc6hzz8275bhz2dh2a8h668h839hd24he5bhf0ah1288h12a5h12a9h12bdh12e5h137ah139eh13b6h1441h1504h1537h162dh1631h1758h1898h18e1h1946h19b5h1ad9h1b0ah1d0ch1d2eh1d3fh1dfeh1dffh1e23h1155h) Received: from mail1-co1 (localhost.localdomain [127.0.0.1]) by mail1-co1 (MessageSwitch) id 137285538798292_8475; Wed, 3 Jul 2013 12:43:07 +0000 (UTC) Received: from CO1EHSMHS002.bigfish.com (unknown [10.243.78.230]) by mail1-co1.bigfish.com (Postfix) with ESMTP id 1593E6C00CF; Wed, 3 Jul 2013 12:43:07 +0000 (UTC) Received: from mail.freescale.net (70.37.183.190) by CO1EHSMHS002.bigfish.com (10.243.66.12) with Microsoft SMTP Server (TLS) id 14.1.225.23; Wed, 3 Jul 2013 12:43:06 +0000 Received: from az84smr01.freescale.net (10.64.34.197) by 039-SN1MMR1-001.039d.mgd.msft.net (10.84.1.13) with Microsoft SMTP Server (TLS) id 14.2.328.11; Wed, 3 Jul 2013 12:43:05 +0000 Received: from fsr-fed1364-13.ea.freescale.net (fsr-fed1364-13.ea.freescale.net [10.171.81.124]) by az84smr01.freescale.net (8.14.3/8.14.0) with ESMTP id r63CgvO4012922; Wed, 3 Jul 2013 05:43:04 -0700 From: Mihai Caraman To: CC: , , Mihai Caraman Subject: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Date: Wed, 3 Jul 2013 15:42:37 +0300 Message-ID: <1372855359-13452-5-git-send-email-mihai.caraman@freescale.com> X-Mailer: git-send-email 1.7.3.4 In-Reply-To: <1372855359-13452-1-git-send-email-mihai.caraman@freescale.com> References: <1372855359-13452-1-git-send-email-mihai.caraman@freescale.com> MIME-Version: 1.0 X-OriginatorOrg: freescale.com Sender: kvm-ppc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm-ppc@vger.kernel.org Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host infrastructure so follow the same approach for AltiVec. Signed-off-by: Mihai Caraman --- arch/powerpc/kvm/booke.c | 72 ++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 70 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 3cae2e3..c3c3af6 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -98,6 +98,19 @@ static inline bool kvmppc_supports_spe(void) return false; } +/* + * Always returns true is AltiVec unit is present, see + * kvmppc_core_check_processor_compat(). + */ +static inline bool kvmppc_supports_altivec(void) +{ +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + return true; +#endif + return false; +} + #ifdef CONFIG_SPE void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) { @@ -151,6 +164,21 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) } /* + * Simulate AltiVec unavailable fault to load guest state + * from thread to AltiVec unit. + * It requires to be called with preemption disabled. + */ +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) +{ + if (kvmppc_supports_altivec()) { + if (!(current->thread.regs->msr & MSR_VEC)) { + load_up_altivec(NULL); + current->thread.regs->msr |= MSR_VEC; + } + } +} + +/* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing. */ @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) u64 fpr[32]; #endif +#ifdef CONFIG_ALTIVEC + vector128 vr[32]; + vector128 vscr; + int used_vr = 0; +#endif + if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) { + /* Save userspace VEC state in stack */ + enable_kernel_altivec(); + memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); + vscr = current->thread.vscr; + used_vr = current->thread.used_vr; + + /* Restore guest VEC state to thread */ + memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); + current->thread.vscr = vcpu->arch.vscr; + + kvmppc_load_guest_altivec(vcpu); + } +#endif + ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) current->thread.fpexc_mode = fpexc_mode; #endif +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) { + /* Save AltiVec state to thread */ + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); + + /* Save guest state */ + memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr)); + vcpu->arch.vscr = current->thread.vscr; + + /* Restore userspace state */ + memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); + current->thread.vscr = vscr; + current->thread.used_vr = used_vr; + } +#endif + out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; @@ -961,7 +1028,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: { - if (kvmppc_supports_spe()) { + if (kvmppc_supports_altivec() || kvmppc_supports_spe()) { bool enabled = false; #ifndef CONFIG_KVM_BOOKE_HV @@ -987,7 +1054,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST: - if (kvmppc_supports_spe()) { + if (kvmppc_supports_altivec() || kvmppc_supports_spe()) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST); r = RESUME_GUEST; @@ -1205,6 +1272,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } else { kvmppc_lazy_ee_enable(); kvmppc_load_guest_fp(vcpu); + kvmppc_load_guest_altivec(vcpu); } }