Message ID | 1593595262-1433-2-git-send-email-atrajeev@linux.vnet.ibm.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | powerpc/perf: Add support for power10 PMU Hardware | expand |
Athira Rajeev <atrajeev@linux.vnet.ibm.com> writes: ... > diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c > index cd6a742..5c64bd3 100644 > --- a/arch/powerpc/perf/core-book3s.c > +++ b/arch/powerpc/perf/core-book3s.c > @@ -39,10 +39,10 @@ struct cpu_hw_events { > unsigned int flags[MAX_HWEVENTS]; > /* > * The order of the MMCR array is: > - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 > + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3 > * - 32-bit, MMCR0, MMCR1, MMCR2 > */ > - unsigned long mmcr[4]; > + unsigned long mmcr[5]; > struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; > u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; > u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; ... > @@ -1310,6 +1326,10 @@ static void power_pmu_enable(struct pmu *pmu) > if (!cpuhw->n_added) { > mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); > mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); > +#ifdef CONFIG_PPC64 > + if (ppmu->flags & PPMU_ARCH_310S) > + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); > +#endif /* CONFIG_PPC64 */ > goto out_enable; > } > > @@ -1353,6 +1373,11 @@ static void power_pmu_enable(struct pmu *pmu) > if (ppmu->flags & PPMU_ARCH_207S) > mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); > > +#ifdef CONFIG_PPC64 > + if (ppmu->flags & PPMU_ARCH_310S) > + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); > +#endif /* CONFIG_PPC64 */ I don't think you need the #ifdef CONFIG_PPC64? cheers
> On 08-Jul-2020, at 4:32 PM, Michael Ellerman <mpe@ellerman.id.au> wrote: > > Athira Rajeev <atrajeev@linux.vnet.ibm.com> writes: > ... >> diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c >> index cd6a742..5c64bd3 100644 >> --- a/arch/powerpc/perf/core-book3s.c >> +++ b/arch/powerpc/perf/core-book3s.c >> @@ -39,10 +39,10 @@ struct cpu_hw_events { >> unsigned int flags[MAX_HWEVENTS]; >> /* >> * The order of the MMCR array is: >> - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 >> + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3 >> * - 32-bit, MMCR0, MMCR1, MMCR2 >> */ >> - unsigned long mmcr[4]; >> + unsigned long mmcr[5]; >> struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; >> u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; >> u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; > ... >> @@ -1310,6 +1326,10 @@ static void power_pmu_enable(struct pmu *pmu) >> if (!cpuhw->n_added) { >> mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); >> mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); >> +#ifdef CONFIG_PPC64 >> + if (ppmu->flags & PPMU_ARCH_310S) >> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >> +#endif /* CONFIG_PPC64 */ >> goto out_enable; >> } >> >> @@ -1353,6 +1373,11 @@ static void power_pmu_enable(struct pmu *pmu) >> if (ppmu->flags & PPMU_ARCH_207S) >> mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); >> >> +#ifdef CONFIG_PPC64 >> + if (ppmu->flags & PPMU_ARCH_310S) >> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >> +#endif /* CONFIG_PPC64 */ > > I don't think you need the #ifdef CONFIG_PPC64? Hi Michael Thanks for reviewing this series. SPRN_MMCR3 is not defined for PPC32 and we hit build failure for pmac32_defconfig. The #ifdef CONFIG_PPC64 is to address this. Thanks Athira > > cheers
Athira Rajeev <atrajeev@linux.vnet.ibm.com> writes: >> On 08-Jul-2020, at 4:32 PM, Michael Ellerman <mpe@ellerman.id.au> wrote: >> >> Athira Rajeev <atrajeev@linux.vnet.ibm.com> writes: >> ... >>> diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c >>> index cd6a742..5c64bd3 100644 >>> --- a/arch/powerpc/perf/core-book3s.c >>> +++ b/arch/powerpc/perf/core-book3s.c >>> @@ -39,10 +39,10 @@ struct cpu_hw_events { >>> unsigned int flags[MAX_HWEVENTS]; >>> /* >>> * The order of the MMCR array is: >>> - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 >>> + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3 >>> * - 32-bit, MMCR0, MMCR1, MMCR2 >>> */ >>> - unsigned long mmcr[4]; >>> + unsigned long mmcr[5]; >>> struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; >>> u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; >>> u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; >> ... >>> @@ -1310,6 +1326,10 @@ static void power_pmu_enable(struct pmu *pmu) >>> if (!cpuhw->n_added) { >>> mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); >>> mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); >>> +#ifdef CONFIG_PPC64 >>> + if (ppmu->flags & PPMU_ARCH_310S) >>> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >>> +#endif /* CONFIG_PPC64 */ >>> goto out_enable; >>> } >>> >>> @@ -1353,6 +1373,11 @@ static void power_pmu_enable(struct pmu *pmu) >>> if (ppmu->flags & PPMU_ARCH_207S) >>> mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); >>> >>> +#ifdef CONFIG_PPC64 >>> + if (ppmu->flags & PPMU_ARCH_310S) >>> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >>> +#endif /* CONFIG_PPC64 */ >> >> I don't think you need the #ifdef CONFIG_PPC64? > > Hi Michael > > Thanks for reviewing this series. > > SPRN_MMCR3 is not defined for PPC32 and we hit build failure for pmac32_defconfig. > The #ifdef CONFIG_PPC64 is to address this. We like to avoid #ifdefs in the body of the code like that. There's a bunch of existing #defines near the top of the file to make 32-bit work, I think you should just add another for this, so eg: #ifdef CONFIG_PPC32 ... #define SPRN_MMCR3 0 cheers
> On 13-Jul-2020, at 6:20 PM, Michael Ellerman <mpe@ellerman.id.au> wrote: > > Athira Rajeev <atrajeev@linux.vnet.ibm.com <mailto:atrajeev@linux.vnet.ibm.com>> writes: >>> On 08-Jul-2020, at 4:32 PM, Michael Ellerman <mpe@ellerman.id.au> wrote: >>> >>> Athira Rajeev <atrajeev@linux.vnet.ibm.com> writes: >>> ... >>>> diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c >>>> index cd6a742..5c64bd3 100644 >>>> --- a/arch/powerpc/perf/core-book3s.c >>>> +++ b/arch/powerpc/perf/core-book3s.c >>>> @@ -39,10 +39,10 @@ struct cpu_hw_events { >>>> unsigned int flags[MAX_HWEVENTS]; >>>> /* >>>> * The order of the MMCR array is: >>>> - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 >>>> + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3 >>>> * - 32-bit, MMCR0, MMCR1, MMCR2 >>>> */ >>>> - unsigned long mmcr[4]; >>>> + unsigned long mmcr[5]; >>>> struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; >>>> u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; >>>> u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; >>> ... >>>> @@ -1310,6 +1326,10 @@ static void power_pmu_enable(struct pmu *pmu) >>>> if (!cpuhw->n_added) { >>>> mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); >>>> mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); >>>> +#ifdef CONFIG_PPC64 >>>> + if (ppmu->flags & PPMU_ARCH_310S) >>>> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >>>> +#endif /* CONFIG_PPC64 */ >>>> goto out_enable; >>>> } >>>> >>>> @@ -1353,6 +1373,11 @@ static void power_pmu_enable(struct pmu *pmu) >>>> if (ppmu->flags & PPMU_ARCH_207S) >>>> mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); >>>> >>>> +#ifdef CONFIG_PPC64 >>>> + if (ppmu->flags & PPMU_ARCH_310S) >>>> + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); >>>> +#endif /* CONFIG_PPC64 */ >>> >>> I don't think you need the #ifdef CONFIG_PPC64? >> >> Hi Michael >> >> Thanks for reviewing this series. >> >> SPRN_MMCR3 is not defined for PPC32 and we hit build failure for pmac32_defconfig. >> The #ifdef CONFIG_PPC64 is to address this. > > We like to avoid #ifdefs in the body of the code like that. > > There's a bunch of existing #defines near the top of the file to make > 32-bit work, I think you should just add another for this, so eg: > > #ifdef CONFIG_PPC32 > ... > #define SPRN_MMCR3 0 Ok Ok. Found that currently we do same way as you mentioned for MMCRA which is not supported for 32-bit I will work on this change Thanks Athira > > cheers
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3e9703f..895aeaa 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -69,6 +69,7 @@ struct power_pmu { #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ +#define PPMU_ARCH_310S 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 52a6783..a466e94 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -272,6 +272,10 @@ struct thread_struct { unsigned mmcr0; unsigned used_ebb; + unsigned long mmcr3; + unsigned long sier2; + unsigned long sier3; + #endif }; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 88e6c78..21a1b2d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -876,7 +876,9 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCR2 785 +#define SPRN_MMCR3 754 #define SPRN_UMMCR2 769 +#define SPRN_UMMCR3 738 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL @@ -918,6 +920,10 @@ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_SIER2 752 +#define SPRN_SIER3 753 +#define SPRN_USIER2 736 +#define SPRN_USIER3 737 #define SPRN_SIAR 796 #define SPRN_SDAR 797 #define SPRN_TACR 888 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 571b325..46b4ebc 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -622,8 +622,10 @@ void ppc_enable_pmcs(void) SYSFS_PMCSETUP(pmc8, SPRN_PMC8); SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); +SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3); static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); +static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3); #endif /* HAS_PPC_PMC56 */ @@ -886,6 +888,9 @@ static int register_cpu_online(unsigned int cpu) #ifdef CONFIG_PMU_SYSFS if (cpu_has_feature(CPU_FTR_MMCRA)) device_create_file(s, &dev_attr_mmcra); + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + device_create_file(s, &dev_attr_mmcr3); #endif /* CONFIG_PMU_SYSFS */ if (cpu_has_feature(CPU_FTR_PURR)) { @@ -980,6 +985,9 @@ static int unregister_cpu_online(unsigned int cpu) #ifdef CONFIG_PMU_SYSFS if (cpu_has_feature(CPU_FTR_MMCRA)) device_remove_file(s, &dev_attr_mmcra); + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + device_remove_file(s, &dev_attr_mmcr3); #endif /* CONFIG_PMU_SYSFS */ if (cpu_has_feature(CPU_FTR_PURR)) { diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742..5c64bd3 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -39,10 +39,10 @@ struct cpu_hw_events { unsigned int flags[MAX_HWEVENTS]; /* * The order of the MMCR array is: - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3 * - 32-bit, MMCR0, MMCR1, MMCR2 */ - unsigned long mmcr[4]; + unsigned long mmcr[5]; struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; @@ -586,6 +586,11 @@ static void ebb_switch_out(unsigned long mmcr0) current->thread.sdar = mfspr(SPRN_SDAR); current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; + if (ppmu->flags & PPMU_ARCH_310S) { + current->thread.mmcr3 = mfspr(SPRN_MMCR3); + current->thread.sier2 = mfspr(SPRN_SIER2); + current->thread.sier3 = mfspr(SPRN_SIER3); + } } static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) @@ -625,6 +630,12 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) * instead manage the MMCR2 entirely by itself. */ mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); + + if (ppmu->flags & PPMU_ARCH_310S) { + mtspr(SPRN_MMCR3, current->thread.mmcr3); + mtspr(SPRN_SIER2, current->thread.sier2); + mtspr(SPRN_SIER3, current->thread.sier3); + } out: return mmcr0; } @@ -845,6 +856,11 @@ void perf_event_print_debug(void) pr_info("EBBRR: %016lx BESCR: %016lx\n", mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); } + + if (ppmu->flags & PPMU_ARCH_310S) { + pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n", + mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3)); + } #endif pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n", mfspr(SPRN_SIAR), sdar, sier); @@ -1310,6 +1326,10 @@ static void power_pmu_enable(struct pmu *pmu) if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); +#ifdef CONFIG_PPC64 + if (ppmu->flags & PPMU_ARCH_310S) + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); +#endif /* CONFIG_PPC64 */ goto out_enable; } @@ -1353,6 +1373,11 @@ static void power_pmu_enable(struct pmu *pmu) if (ppmu->flags & PPMU_ARCH_207S) mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); +#ifdef CONFIG_PPC64 + if (ppmu->flags & PPMU_ARCH_310S) + mtspr(SPRN_MMCR3, cpuhw->mmcr[4]); +#endif /* CONFIG_PPC64 */ + /* * Read off any pre-existing events that need to move * to another PMC.