Message ID | 20111129012004.GB9130@schlenkerla.am.freescale.net |
---|---|
State | New, archived |
Headers | show |
On 29.11.2011, at 02:20, Scott Wood wrote: > From: Liu Yu <yu.liu@freescale.com> > Missing patch description. Alex > Signed-off-by: Liu Yu <yu.liu@freescale.com> > [scottwood@freescale.com: made mas2 64-bit, and added mas8 init] > Signed-off-by: Scott Wood <scottwood@freescale.com> > --- > arch/powerpc/kvm/e500_tlb.c | 10 ++++--- > arch/powerpc/kvm/trace.h | 57 +++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 63 insertions(+), 4 deletions(-) > > diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c > index 5073768..d041f5e 100644 > --- a/arch/powerpc/kvm/e500_tlb.c > +++ b/arch/powerpc/kvm/e500_tlb.c > @@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, > mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); > asm volatile("isync; tlbwe" : : : "memory"); > local_irq_restore(flags); > + > + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, > + stlbe->mas2, stlbe->mas7_3); > } > > /* esel is index into set, not whole array */ > @@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, > MAS0_TLBSEL(1) | > MAS0_ESEL(to_htlb1_esel(esel))); > } > - trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, > - (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32)); > } > > void kvmppc_map_magic(struct kvm_vcpu *vcpu) > @@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) > magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; > magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | > MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; > + magic.mas8 = 0; > > __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); > preempt_enable(); > @@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) > gtlbe->mas2 = vcpu->arch.shared->mas2; > gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; > > - trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2, > - (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32)); > + trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, > + gtlbe->mas2, gtlbe->mas7_3); > > /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ > if (tlbe_is_host_safe(vcpu, gtlbe)) { > diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h > index b135d3d..f2ea44b 100644 > --- a/arch/powerpc/kvm/trace.h > +++ b/arch/powerpc/kvm/trace.h > @@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte, > > #endif /* CONFIG_PPC_BOOK3S */ > > + > +/************************************************************************* > + * Book3E trace points * > + *************************************************************************/ > + > +#ifdef CONFIG_BOOKE > + > +TRACE_EVENT(kvm_booke206_stlb_write, > + TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), > + TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), > + > + TP_STRUCT__entry( > + __field( __u32, mas0 ) > + __field( __u32, mas8 ) > + __field( __u32, mas1 ) > + __field( __u64, mas2 ) > + __field( __u64, mas7_3 ) > + ), > + > + TP_fast_assign( > + __entry->mas0 = mas0; > + __entry->mas8 = mas8; > + __entry->mas1 = mas1; > + __entry->mas2 = mas2; > + __entry->mas7_3 = mas7_3; > + ), > + > + TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", > + __entry->mas0, __entry->mas8, __entry->mas1, > + __entry->mas2, __entry->mas7_3) > +); > + > +TRACE_EVENT(kvm_booke206_gtlb_write, > + TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), > + TP_ARGS(mas0, mas1, mas2, mas7_3), > + > + TP_STRUCT__entry( > + __field( __u32, mas0 ) > + __field( __u32, mas1 ) > + __field( __u64, mas2 ) > + __field( __u64, mas7_3 ) > + ), > + > + TP_fast_assign( > + __entry->mas0 = mas0; > + __entry->mas1 = mas1; > + __entry->mas2 = mas2; > + __entry->mas7_3 = mas7_3; > + ), > + > + TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", > + __entry->mas0, __entry->mas1, > + __entry->mas2, __entry->mas7_3) > +); > + > +#endif > + > #endif /* _TRACE_KVM_H */ > > /* This part must be outside protection */ > -- > 1.7.7.rc3.4.g8d714 > > > -- > To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 5073768..d041f5e 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); asm volatile("isync; tlbwe" : : : "memory"); local_irq_restore(flags); + + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, + stlbe->mas2, stlbe->mas7_3); } /* esel is index into set, not whole array */ @@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel))); } - trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, - (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32)); } void kvmppc_map_magic(struct kvm_vcpu *vcpu) @@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; + magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); preempt_enable(); @@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) gtlbe->mas2 = vcpu->arch.shared->mas2; gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; - trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2, - (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32)); + trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, + gtlbe->mas2, gtlbe->mas7_3); /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index b135d3d..f2ea44b 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte, #endif /* CONFIG_PPC_BOOK3S */ + +/************************************************************************* + * Book3E trace points * + *************************************************************************/ + +#ifdef CONFIG_BOOKE + +TRACE_EVENT(kvm_booke206_stlb_write, + TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas8 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas8 = mas8; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas8, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +TRACE_EVENT(kvm_booke206_gtlb_write, + TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +#endif + #endif /* _TRACE_KVM_H */ /* This part must be outside protection */