@@ -19,6 +19,7 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/bug.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
@@ -138,13 +139,21 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+ int rc = patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+
+ if (WARN_ON(rc))
+ pr_err("Failed to patch trap at 0x%pK: %d\n",
+ (void *)p->addr, rc);
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, p->opcode);
+ int rc = patch_instruction(p->addr, p->opcode);
+
+ if (WARN_ON(rc))
+ pr_err("Failed to remove trap at 0x%pK: %d\n",
+ (void *)p->addr, rc);
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -139,52 +139,80 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
}
}
+#define PATCH_INSN_OR_GOTO(addr, instr, label) \
+do { \
+ int rc = patch_instruction((unsigned int *)(addr), instr); \
+ if (rc) { \
+ pr_err("%s:%d Error patching instruction at 0x%pK (%pS): %d\n", \
+ __func__, __LINE__, \
+ (void *)(addr), (void *)(addr), rc); \
+ goto label; \
+ } \
+} while (0)
+
/*
* emulate_step() requires insn to be emulated as
* second parameter. Load register 'r4' with the
* instruction.
*/
-void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
+static int patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
{
/* addis r4,0,(insn)@h */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
+ ((val >> 16) & 0xffff),
+ patch_err);
addr++;
/* ori r4,r4,(insn)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
- ___PPC_RS(4) | (val & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(4) |
+ ___PPC_RS(4) | (val & 0xffff),
+ patch_err);
+
+ return 0;
+
+patch_err:
+ return -EFAULT;
}
/*
* Generate instructions to load provided immediate 64-bit value
* to register 'r3' and patch these instructions at 'addr'.
*/
-void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
+static int patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
{
/* lis r3,(op)@highest */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
+ ((val >> 48) & 0xffff),
+ patch_err);
addr++;
/* ori r3,r3,(op)@higher */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 32) & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 32) & 0xffff),
+ patch_err);
addr++;
/* rldicr r3,r3,32,31 */
- patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
- ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
+ ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31),
+ patch_err);
addr++;
/* oris r3,r3,(op)@h */
- patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 16) & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ORIS | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 16) & 0xffff),
+ patch_err);
addr++;
/* ori r3,r3,(op)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | (val & 0xffff));
+ PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | (val & 0xffff),
+ patch_err);
+
+ return 0;
+
+patch_err:
+ return -EFAULT;
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
@@ -216,30 +244,33 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
* be within 32MB on either side of the current instruction.
*/
b_offset = (unsigned long)buff - (unsigned long)p->addr;
- if (!is_offset_in_branch_range(b_offset))
+ if (!is_offset_in_branch_range(b_offset)) {
+ rc = -ERANGE;
goto error;
+ }
/* Check if the return address is also within 32MB range */
b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
(unsigned long)nip;
- if (!is_offset_in_branch_range(b_offset))
+ if (!is_offset_in_branch_range(b_offset)) {
+ rc = -ERANGE;
goto error;
+ }
/* Setup template */
/* We can optimize this via patch_instruction_window later */
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
- for (i = 0; i < size; i++) {
- rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
- if (rc < 0)
- goto error;
- }
+ for (i = 0; i < size; i++)
+ PATCH_INSN_OR_GOTO(buff + i, *(optprobe_template_entry + i),
+ patch_err);
/*
* Fixup the template with instructions to:
* 1. load the address of the actual probepoint
*/
- patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
+ if (patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX))
+ goto patch_err;
/*
* 2. branch to optimized_callback() and emulate_step()
@@ -248,6 +279,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
+ rc = -ERANGE;
goto error;
}
@@ -259,21 +291,29 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
(unsigned long)emulate_step_addr,
BRANCH_SET_LINK);
- if (!branch_op_callback || !branch_emulate_step)
+ if (!branch_op_callback || !branch_emulate_step) {
+ rc = -ERANGE;
goto error;
+ }
+
+ PATCH_INSN_OR_GOTO(buff + TMPL_CALL_HDLR_IDX, branch_op_callback,
+ patch_err);
- patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
- patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
+ PATCH_INSN_OR_GOTO(buff + TMPL_EMULATE_IDX, branch_emulate_step,
+ patch_err);
/*
* 3. load instruction to be emulated into relevant register, and
*/
- patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
+ if (patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX))
+ goto patch_err;
/*
* 4. branch back from trampoline
*/
- patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
+ PATCH_INSN_OR_GOTO(buff + TMPL_RET_IDX,
+ create_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0),
+ patch_err);
flush_icache_range((unsigned long)buff,
(unsigned long)(&buff[TMPL_END_IDX]));
@@ -282,9 +322,11 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
return 0;
+patch_err:
+ rc = -EFAULT;
error:
free_ppc_optinsn_slot(buff, 0);
- return -ERANGE;
+ return rc;
}
@@ -307,6 +349,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
+ int rc;
list_for_each_entry_safe(op, tmp, oplist, list) {
/*
@@ -315,9 +358,13 @@ void arch_optimize_kprobes(struct list_head *oplist)
*/
memcpy(op->optinsn.copied_insn, op->kp.addr,
RELATIVEJUMP_SIZE);
- patch_instruction(op->kp.addr,
+ rc = patch_instruction(op->kp.addr,
create_branch((unsigned int *)op->kp.addr,
(unsigned long)op->optinsn.insn, 0));
+ if (rc)
+ pr_err("%s:%d: Error patching instruction at 0x%pK: %d\n",
+ __func__, __LINE__,
+ (void *)(op->kp.addr), rc);
list_del_init(&op->list);
}
}
patch_instruction() can fail in some scenarios. Add appropriate error checking so that such failures are caught and logged, and suitable error code is returned. Fixes: d07df82c43be8 ("powerpc/kprobes: Move kprobes over to patch_instruction()") Fixes: f3eca95638931 ("powerpc/kprobes/optprobes: Use patch_instruction()") Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> --- arch/powerpc/kernel/kprobes.c | 13 +++- arch/powerpc/kernel/optprobes.c | 109 +++++++++++++++++++++++--------- 2 files changed, 89 insertions(+), 33 deletions(-)