@@ -69,6 +69,7 @@ struct power_pmu {
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
+#define PPMU_ARCH_310S 0x00000200 /* Has MMCR3, SIER2 and SIER3 */
/*
* Values for flags to get_alternatives()
@@ -272,6 +272,10 @@ struct thread_struct {
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long mmcr3;
+ unsigned long sier2;
+ unsigned long sier3;
+
#endif
};
@@ -876,7 +876,9 @@
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCR2 785
+#define SPRN_MMCR3 754
#define SPRN_UMMCR2 769
+#define SPRN_UMMCR3 738
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -918,6 +920,10 @@
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIER2 752
+#define SPRN_SIER3 753
+#define SPRN_USIER2 736
+#define SPRN_USIER3 737
#define SPRN_SIAR 796
#define SPRN_SDAR 797
#define SPRN_TACR 888
@@ -622,8 +622,10 @@ void ppc_enable_pmcs(void)
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
+SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
#endif /* HAS_PPC_PMC56 */
@@ -886,6 +888,9 @@ static int register_cpu_online(unsigned int cpu)
#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ device_create_file(s, &dev_attr_mmcr3);
#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR)) {
@@ -980,6 +985,9 @@ static int unregister_cpu_online(unsigned int cpu)
#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_remove_file(s, &dev_attr_mmcra);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ device_remove_file(s, &dev_attr_mmcr3);
#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR)) {
@@ -39,10 +39,10 @@ struct cpu_hw_events {
unsigned int flags[MAX_HWEVENTS];
/*
* The order of the MMCR array is:
- * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
+ * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2, MMCR3
* - 32-bit, MMCR0, MMCR1, MMCR2
*/
- unsigned long mmcr[4];
+ unsigned long mmcr[5];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
@@ -584,6 +584,11 @@ static void ebb_switch_out(unsigned long mmcr0)
current->thread.sdar = mfspr(SPRN_SDAR);
current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
+ if (ppmu->flags & PPMU_ARCH_310S) {
+ current->thread.mmcr3 = mfspr(SPRN_MMCR3);
+ current->thread.sier2 = mfspr(SPRN_SIER2);
+ current->thread.sier3 = mfspr(SPRN_SIER3);
+ }
}
static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
@@ -623,6 +628,12 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
* instead manage the MMCR2 entirely by itself.
*/
mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
+
+ if (ppmu->flags & PPMU_ARCH_310S) {
+ mtspr(SPRN_MMCR3, current->thread.mmcr3);
+ mtspr(SPRN_SIER2, current->thread.sier2);
+ mtspr(SPRN_SIER3, current->thread.sier3);
+ }
out:
return mmcr0;
}
@@ -843,6 +854,11 @@ void perf_event_print_debug(void)
pr_info("EBBRR: %016lx BESCR: %016lx\n",
mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
}
+
+ if (ppmu->flags & PPMU_ARCH_310S) {
+ pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n",
+ mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3));
+ }
#endif
pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
mfspr(SPRN_SIAR), sdar, sier);
@@ -1308,6 +1324,10 @@ static void power_pmu_enable(struct pmu *pmu)
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+#ifdef CONFIG_PPC64
+ if (ppmu->flags & PPMU_ARCH_310S)
+ mtspr(SPRN_MMCR3, cpuhw->mmcr[4]);
+#endif /* CONFIG_PPC64 */
goto out_enable;
}
@@ -1351,6 +1371,11 @@ static void power_pmu_enable(struct pmu *pmu)
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
+#ifdef CONFIG_PPC64
+ if (ppmu->flags & PPMU_ARCH_310S)
+ mtspr(SPRN_MMCR3, cpuhw->mmcr[4]);
+#endif /* CONFIG_PPC64 */
+
/*
* Read off any pre-existing events that need to move
* to another PMC.