From patchwork Thu Feb 28 04:13:13 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bharat Bhushan X-Patchwork-Id: 223768 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 2897E2C0085 for ; Thu, 28 Feb 2013 15:17:13 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752721Ab3B1ERL (ORCPT ); Wed, 27 Feb 2013 23:17:11 -0500 Received: from ch1ehsobe003.messaging.microsoft.com ([216.32.181.183]:33390 "EHLO ch1outboundpool.messaging.microsoft.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751737Ab3B1ERK (ORCPT ); Wed, 27 Feb 2013 23:17:10 -0500 Received: from mail256-ch1-R.bigfish.com (10.43.68.230) by CH1EHSOBE020.bigfish.com (10.43.70.77) with Microsoft SMTP Server id 14.1.225.23; Thu, 28 Feb 2013 04:17:00 +0000 Received: from mail256-ch1 (localhost [127.0.0.1]) by mail256-ch1-R.bigfish.com (Postfix) with ESMTP id 115221A8019F; Thu, 28 Feb 2013 04:17:00 +0000 (UTC) X-Forefront-Antispam-Report: CIP:70.37.183.190; KIP:(null); UIP:(null); IPV:NLI; H:mail.freescale.net; RD:none; EFVD:NLI X-SpamScore: 3 X-BigFish: VS3(zzzz1f42h1ee6h1de0h1202h1e76h1d1ah1d2ah1082kzz8275bhz2dh2a8h668h839hd24he5bhf0ah107ah1288h12a5h12a9h12bdh12e5h137ah139eh13b6h1441h1504h1537h162dh1631h1758h1898h18e1h1946h19b5h1ad9h1155h) Received: from mail256-ch1 (localhost.localdomain [127.0.0.1]) by mail256-ch1 (MessageSwitch) id 1362025017527249_16201; Thu, 28 Feb 2013 04:16:57 +0000 (UTC) Received: from CH1EHSMHS034.bigfish.com (snatpool3.int.messaging.microsoft.com [10.43.68.228]) by mail256-ch1.bigfish.com (Postfix) with ESMTP id 7C8A22C0054; Thu, 28 Feb 2013 04:16:57 +0000 (UTC) Received: from mail.freescale.net (70.37.183.190) by CH1EHSMHS034.bigfish.com (10.43.70.34) with Microsoft SMTP Server (TLS) id 14.1.225.23; Thu, 28 Feb 2013 04:16:57 +0000 Received: from tx30smr01.am.freescale.net (10.81.153.31) by 039-SN1MMR1-005.039d.mgd.msft.net (10.84.1.17) with Microsoft SMTP Server (TLS) id 14.2.328.11; Thu, 28 Feb 2013 04:16:54 +0000 Received: from freescale.com ([10.232.15.72]) by tx30smr01.am.freescale.net (8.14.3/8.14.0) with SMTP id r1S4Gk1w002345; Wed, 27 Feb 2013 21:16:52 -0700 Received: by freescale.com (sSMTP sendmail emulation); Thu, 28 Feb 2013 09:43:36 +0530 From: Bharat Bhushan To: , , , CC: Bharat Bhushan Subject: [PATCH 4/7] booke: Save and restore debug registers on guest entry and exit Date: Thu, 28 Feb 2013 09:43:13 +0530 Message-ID: <1362024796-4237-5-git-send-email-bharat.bhushan@freescale.com> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1362024796-4237-1-git-send-email-bharat.bhushan@freescale.com> References: <1362024796-4237-1-git-send-email-bharat.bhushan@freescale.com> MIME-Version: 1.0 X-OriginatorOrg: freescale.com Sender: kvm-ppc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm-ppc@vger.kernel.org On Guest entry: if guest is wants to use the debug register then save h/w debug register in host_dbg_reg and load the debug registers with shadow_dbg_reg. Otherwise leave h/w debug registers as is. On guest exit: If guest/user-space is using the debug resource then restore the h/w debug register with host_dbg_reg. No need to save guest debug register as shadow_dbg_reg is having required values. If guest is not using the debug resources then no need to restore h/w registers. Signed-off-by: Bharat Bhushan --- arch/powerpc/include/asm/kvm_host.h | 5 ++ arch/powerpc/kernel/asm-offsets.c | 26 ++++++++ arch/powerpc/kvm/booke_interrupts.S | 114 +++++++++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 0 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index f4ba881..a9feeb0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -504,7 +504,12 @@ struct kvm_vcpu_arch { u32 mmucfg; u32 epr; u32 crit_save; + /* guest debug registers*/ struct kvmppc_booke_debug_reg dbg_reg; + /* shadow debug registers */ + struct kvmppc_booke_debug_reg shadow_dbg_reg; + /* host debug registers*/ + struct kvmppc_booke_debug_reg host_dbg_reg; #endif gpa_t paddr_accessed; gva_t vaddr_accessed; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 02048f3..22deda7 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -563,6 +563,32 @@ int main(void) DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); + DEFINE(VCPU_DBSR, offsetof(struct kvm_vcpu, arch.dbsr)); + DEFINE(VCPU_SHADOW_DBG, offsetof(struct kvm_vcpu, arch.shadow_dbg_reg)); + DEFINE(VCPU_HOST_DBG, offsetof(struct kvm_vcpu, arch.host_dbg_reg)); + DEFINE(KVMPPC_DBG_DBCR0, offsetof(struct kvmppc_booke_debug_reg, + dbcr0)); + DEFINE(KVMPPC_DBG_DBCR1, offsetof(struct kvmppc_booke_debug_reg, + dbcr1)); + DEFINE(KVMPPC_DBG_DBCR2, offsetof(struct kvmppc_booke_debug_reg, + dbcr2)); +#ifdef CONFIG_KVM_E500MC + DEFINE(KVMPPC_DBG_DBCR4, offsetof(struct kvmppc_booke_debug_reg, + dbcr4)); +#endif + DEFINE(KVMPPC_DBG_IAC1, offsetof(struct kvmppc_booke_debug_reg, + iac[0])); + DEFINE(KVMPPC_DBG_IAC2, offsetof(struct kvmppc_booke_debug_reg, + iac[1])); + DEFINE(KVMPPC_DBG_IAC3, offsetof(struct kvmppc_booke_debug_reg, + iac[2])); + DEFINE(KVMPPC_DBG_IAC4, offsetof(struct kvmppc_booke_debug_reg, + iac[3])); + DEFINE(KVMPPC_DBG_DAC1, offsetof(struct kvmppc_booke_debug_reg, + dac[0])); + DEFINE(KVMPPC_DBG_DAC2, offsetof(struct kvmppc_booke_debug_reg, + dac[1])); + DEFINE(VCPU_GUEST_DEBUG, offsetof(struct kvm_vcpu, guest_debug)); #endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_KVM */ diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 2c6deb5..6d78e01 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -39,6 +39,8 @@ #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ +#define DBCR0_AC_BITS (DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 | \ + DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W) #define NEED_INST_MASK ((1< 2 + PPC_LD(r7, VCPU_HOST_DBG+KVMPPC_DBG_IAC3, r4) + PPC_LD(r9, VCPU_HOST_DBG+KVMPPC_DBG_IAC4, r4) + mtspr SPRN_IAC3, r3 + mtspr SPRN_IAC4, r4 +#endif + PPC_LD(r7, VCPU_HOST_DBG+KVMPPC_DBG_DAC1, r4) + PPC_LD(r9, VCPU_HOST_DBG+KVMPPC_DBG_DAC2, r4) + mtspr SPRN_DAC1, r7 + mtspr SPRN_DAC2, r9 +skip_load_hw_bkpts: + /* Clear h/w DBSR and save current(guest) DBSR */ + mfspr r9, SPRN_DBSR + mtspr SPRN_DBSR, r9 + isync + andi. r7, r6, NEED_DEBUG_SAVE + beq skip_dbsr_save + /* + * If vcpu->guest_debug flag is set then do not check for + * shared->msr.DE as this debugging (say by QEMU) does not + * depends on shared->msr.de. In these scanerios MSR.DE is + * always set using shared_msr and should be handled always. + */ + lwz r7, VCPU_GUEST_DEBUG(r4) + cmpwi r7, 0 + bne skip_save_trap_event + PPC_LL r3, VCPU_SHARED(r4) + PPC_LD(r3, VCPU_SHARED_MSR, r3) + andi. r3, r3, MSR_DE + bne skip_save_trap_event + andis. r9, r9, DBSR_TIE@h +skip_save_trap_event: + stw r9, VCPU_DBSR(r4) +skip_dbsr_save: + mtspr SPRN_DBCR0, r8 +skip_load_host_debug: + /* Save remaining volatile guest register state to vcpu. */ stw r0, VCPU_GPR(R0)(r4) stw r1, VCPU_GPR(R1)(r4) @@ -468,6 +525,63 @@ lightweight_exit: PPC_LD(r3, VCPU_SHARED_SPRG7, r5) mtspr SPRN_SPRG7W, r3 + mfmsr r7 + rlwinm r7, r7, 0, ~MSR_DE + mtmsr r7 + lwz r6, VCPU_SHADOW_DBG+KVMPPC_DBG_DBCR0(r4) + rlwinm. r7, r6, 0, ~DBCR0_IDM + beq skip_load_guest_debug + mfspr r8, SPRN_DBCR0 + stw r8, VCPU_HOST_DBG+KVMPPC_DBG_DBCR0(r4) + andis. r3, r6, DBCR0_AC_BITS@h + beq skip_hw_bkpts + mfspr r7, SPRN_DBCR1 + stw r7, VCPU_HOST_DBG+KVMPPC_DBG_DBCR1(r4) + mfspr r8, SPRN_DBCR2 + stw r8, VCPU_HOST_DBG+KVMPPC_DBG_DBCR2(r4) + mfspr r7, SPRN_IAC1 + PPC_STD(r7, VCPU_HOST_DBG+KVMPPC_DBG_IAC1, r4) + mfspr r8, SPRN_IAC2 + PPC_STD(r8, VCPU_HOST_DBG+KVMPPC_DBG_IAC2, r4) +#if CONFIG_PPC_ADV_DEBUG_IACS > 2 + mfspr r7, SPRN_IAC3 + PPC_STD(r7, VCPU_HOST_DBG+KVMPPC_DBG_IAC3, r4) + mfspr r8, SPRN_IAC4 + PPC_STD(r8, VCPU_HOST_DBG+KVMPPC_DBG_IAC4, r4) +#endif + mfspr r7, SPRN_DAC1 + PPC_STD(r7, VCPU_HOST_DBG+KVMPPC_DBG_DAC1, r4) + mfspr r8, SPRN_DAC2 + PPC_STD(r8, VCPU_HOST_DBG+KVMPPC_DBG_DAC2, r4) + li r8, 0 + mtspr SPRN_DBCR0, r8 /* disable all debug event */ + PPC_LD(r7, VCPU_SHADOW_DBG+KVMPPC_DBG_DBCR1, r4) + PPC_LD(r8, VCPU_SHADOW_DBG+KVMPPC_DBG_DBCR2, r4) + mtspr SPRN_DBCR1, r7 + mtspr SPRN_DBCR2, r8 + PPC_LD(r7, VCPU_SHADOW_DBG+KVMPPC_DBG_IAC1, r4) + PPC_LD(r8, VCPU_SHADOW_DBG+KVMPPC_DBG_IAC2, r4) + mtspr SPRN_IAC1, r7 + mtspr SPRN_IAC2, r8 +#if CONFIG_PPC_ADV_DEBUG_IACS > 2 + PPC_LD(r7, VCPU_SHADOW_DBG+KVMPPC_DBG_IAC3, r4) + PPC_LD(r8, VCPU_SHADOW_DBG+KVMPPC_DBG_IAC4, r4) + mtspr SPRN_IAC3, r7 + mtspr SPRN_IAC4, r8 +#endif + PPC_LD(r7, VCPU_SHADOW_DBG+KVMPPC_DBG_DAC1, r4) + PPC_LD(r8, VCPU_SHADOW_DBG+KVMPPC_DBG_DAC2, r4) + mtspr SPRN_DAC1, r7 + mtspr SPRN_DAC2, r8 +skip_hw_bkpts: + /* Clear if any deferred debug event */ + mfspr r8, SPRN_DBSR + mtspr SPRN_DBSR, r8 + isync + /* Restore guest DBCR */ + mtspr SPRN_DBCR0, r6 +skip_load_guest_debug: + #ifdef CONFIG_KVM_EXIT_TIMING /* save enter time */ 1: