@@ -64,6 +64,7 @@ config KPROBES
depends on MODULES
depends on HAVE_KPROBES
select KALLSYMS
+ select TASKS_RCU if PREEMPTION && HAVE_KPROBES_BOOSTER
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -189,6 +190,9 @@ config HAVE_KPROBES
config HAVE_KRETPROBES
bool
+config HAVE_KPROBES_BOOSTER
+ bool
+
config HAVE_OPTPROBES
bool
@@ -25,6 +25,7 @@ config IA64
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_KPROBES
+ select HAVE_KPROBES_BOOSTER
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
@@ -841,7 +841,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
return 1;
}
-#if !defined(CONFIG_PREEMPTION)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
ia64_psr(regs)->ri = p->ainsn.slot;
@@ -853,7 +852,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
preempt_enable_no_resched();
return 1;
}
-#endif
+
prepare_ss(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -181,6 +181,7 @@ config X86
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
select HAVE_KPROBES
+ select HAVE_KPROBES_BOOSTER
select HAVE_KPROBES_ON_FTRACE
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
@@ -587,7 +587,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
if (setup_detour_execution(p, regs, reenter))
return;
-#if !defined(CONFIG_PREEMPTION)
if (p->ainsn.boostable && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
if (!reenter)
@@ -600,7 +599,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
regs->ip = (unsigned long)p->ainsn.insn;
return;
}
-#endif
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
@@ -220,8 +220,8 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip, *next;
- /* Ensure no-one is interrupted on the garbages */
- synchronize_rcu();
+ /* Ensure no-one is running on the garbages. */
+ synchronize_rcu_tasks();
list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
As we did in commit a30b85df7d59 ("kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y"), we can also enable kprobe- booster which depends on trampoline execution buffer as same as optprobe. Before releasing the trampoline buffer (kprobe_insn_page), the garbage collector waits for all potentially preempted tasks on the trampoline bufer using synchronize_rcu_tasks() instead of synchronize_rcu(). This requires to enable CONFIG_TASKS_RCU=y too, so this also introduces HAVE_KPROBES_BOOSTER for the archs which supports kprobe-booster (currently only x86 and ia64.) If both of CONFIG_PREEMPTION and HAVE_KPROBES_BOOSTER is y, CONFIG_KPROBES selects CONFIG_TASKS_RCU=y. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> --- arch/Kconfig | 4 ++++ arch/ia64/Kconfig | 1 + arch/ia64/kernel/kprobes.c | 3 +-- arch/x86/Kconfig | 1 + arch/x86/kernel/kprobes/core.c | 2 -- kernel/kprobes.c | 4 ++-- 6 files changed, 9 insertions(+), 6 deletions(-)