@@ -1162,7 +1162,6 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
*/
if (record) {
struct perf_sample_data data = {
- .regs = regs,
.addr = 0,
.period = counter->hw.last_period,
};
@@ -1170,7 +1169,7 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_counter_overflow(counter, nmi, &data)) {
+ if (perf_counter_overflow(counter, nmi, &data, regs)) {
/*
* Interrupts are coming too fast - throttle them
* by setting the counter to 0, so it will be
@@ -493,7 +493,6 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
regs = args->regs;
- data.regs = regs;
data.addr = 0;
cpuc = &__get_cpu_var(cpu_hw_counters);
@@ -513,7 +512,7 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
if (!sparc_perf_counter_set_period(counter, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data))
+ if (perf_counter_overflow(counter, 1, &data, regs))
sparc_pmu_disable_counter(hwc, idx);
}
@@ -849,23 +849,6 @@ static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
-static inline int
-perf_output_begin(struct perf_output_handle *handle, struct perf_counter *c,
- unsigned int size, int nmi, int sample) { }
-static inline void perf_output_end(struct perf_output_handle *handle) { }
-static inline void
-perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len) { }
-static inline void
-perf_output_sample(struct perf_output_handle *handle,
- struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter) { }
-static inline void
-perf_prepare_sample(struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter,
- struct pt_regs *regs) { }
#endif
#define perf_output_put(handle, x) \