Message ID | 20200226155714.43937-1-broonie@kernel.org |
---|---|
State | New |
Headers | show |
On Wed, Feb 26, 2020 at 03:57:14PM +0000, Mark Brown wrote: > v8.5-BTI introduces the GP field in stage 1 translation tables which > indicates that blocks and pages with it set are guarded pages for which > branch target identification checks should be performed. Decode this > when dumping the page tables to aid debugging. > > Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Kees Cook <keescook@chromium.org> -Kees > --- > arch/arm64/mm/dump.c | 5 +++++ > 1 file changed, 5 insertions(+) > > diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c > index 860c00ec8bd3..78163b7a7dde 100644 > --- a/arch/arm64/mm/dump.c > +++ b/arch/arm64/mm/dump.c > @@ -145,6 +145,11 @@ static const struct prot_bits pte_bits[] = { > .val = PTE_UXN, > .set = "UXN", > .clear = " ", > + }, { > + .mask = PTE_GP, > + .val = PTE_GP, > + .set = "GP", > + .clear = " ", > }, { > .mask = PTE_ATTRINDX_MASK, > .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), > -- > 2.20.1 >
On Wed, Feb 26, 2020 at 03:57:11PM +0000, Mark Brown wrote: > From: Dave Martin <Dave.Martin@arm.com> > > Hoist the IT state handling code earlier in traps.c, to avoid > accumulating forward declarations. > > No functional change. > > Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> -Kees > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/kernel/traps.c | 103 ++++++++++++++++++-------------------- > 1 file changed, 50 insertions(+), 53 deletions(-) > > diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c > index bc9f4292bfc3..3c07a7074145 100644 > --- a/arch/arm64/kernel/traps.c > +++ b/arch/arm64/kernel/traps.c > @@ -272,7 +272,55 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, > } > } > > -static void advance_itstate(struct pt_regs *regs); > +#ifdef CONFIG_COMPAT > +#define PSTATE_IT_1_0_SHIFT 25 > +#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) > +#define PSTATE_IT_7_2_SHIFT 10 > +#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) > + > +static u32 compat_get_it_state(struct pt_regs *regs) > +{ > + u32 it, pstate = regs->pstate; > + > + it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; > + it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; > + > + return it; > +} > + > +static void compat_set_it_state(struct pt_regs *regs, u32 it) > +{ > + u32 pstate_it; > + > + pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; > + pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; > + > + regs->pstate &= ~PSR_AA32_IT_MASK; > + regs->pstate |= pstate_it; > +} > + > +static void advance_itstate(struct pt_regs *regs) > +{ > + u32 it; > + > + /* ARM mode */ > + if (!(regs->pstate & PSR_AA32_T_BIT) || > + !(regs->pstate & PSR_AA32_IT_MASK)) > + return; > + > + it = compat_get_it_state(regs); > + > + /* > + * If this is the last instruction of the block, wipe the IT > + * state. Otherwise advance it. > + */ > + if (!(it & 7)) > + it = 0; > + else > + it = (it & 0xe0) | ((it << 1) & 0x1f); > + > + compat_set_it_state(regs, it); > +} > > void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) > { > @@ -285,7 +333,7 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) > if (user_mode(regs)) > user_fastforward_single_step(current); > > - if (regs->pstate & PSR_MODE32_BIT) > + if (compat_user_mode(regs)) > advance_itstate(regs); > } > > @@ -578,34 +626,6 @@ static const struct sys64_hook sys64_hooks[] = { > {}, > }; > > - > -#ifdef CONFIG_COMPAT > -#define PSTATE_IT_1_0_SHIFT 25 > -#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) > -#define PSTATE_IT_7_2_SHIFT 10 > -#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) > - > -static u32 compat_get_it_state(struct pt_regs *regs) > -{ > - u32 it, pstate = regs->pstate; > - > - it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; > - it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; > - > - return it; > -} > - > -static void compat_set_it_state(struct pt_regs *regs, u32 it) > -{ > - u32 pstate_it; > - > - pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; > - pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; > - > - regs->pstate &= ~PSR_AA32_IT_MASK; > - regs->pstate |= pstate_it; > -} > - > static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) > { > int cond; > @@ -626,29 +646,6 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) > return aarch32_opcode_cond_checks[cond](regs->pstate); > } > > -static void advance_itstate(struct pt_regs *regs) > -{ > - u32 it; > - > - /* ARM mode */ > - if (!(regs->pstate & PSR_AA32_T_BIT) || > - !(regs->pstate & PSR_AA32_IT_MASK)) > - return; > - > - it = compat_get_it_state(regs); > - > - /* > - * If this is the last instruction of the block, wipe the IT > - * state. Otherwise advance it. > - */ > - if (!(it & 7)) > - it = 0; > - else > - it = (it & 0xe0) | ((it << 1) & 0x1f); > - > - compat_set_it_state(regs, it); > -} > - > static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) > { > int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; > -- > 2.20.1 >
On Wed, Feb 26, 2020 at 03:57:10PM +0000, Mark Brown wrote: > From: Dave Martin <Dave.Martin@arm.com> > > Skipping of an instruction on AArch32 works a bit differently from > AArch64, mainly due to the different CPSR/PSTATE semantics. > > Currently arm64_skip_faulting_instruction() is only suitable for > AArch64, and arm64_compat_skip_faulting_instruction() handles the IT > state machine but is local to traps.c. > > Since manual instruction skipping implies a trap, it's a relatively > slow path. > > So, make arm64_skip_faulting_instruction() handle both compat and > native, and get rid of the arm64_compat_skip_faulting_instruction() > special case. > > Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> -Kees > Reviewed-by: Mark Rutland <mark.rutland@arm.com> > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/kernel/traps.c | 18 ++++++++---------- > 1 file changed, 8 insertions(+), 10 deletions(-) > > diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c > index b8c714dda851..bc9f4292bfc3 100644 > --- a/arch/arm64/kernel/traps.c > +++ b/arch/arm64/kernel/traps.c > @@ -272,6 +272,8 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, > } > } > > +static void advance_itstate(struct pt_regs *regs); > + > void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) > { > regs->pc += size; > @@ -282,6 +284,9 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) > */ > if (user_mode(regs)) > user_fastforward_single_step(current); > + > + if (regs->pstate & PSR_MODE32_BIT) > + advance_itstate(regs); > } > > static LIST_HEAD(undef_hook); > @@ -644,19 +649,12 @@ static void advance_itstate(struct pt_regs *regs) > compat_set_it_state(regs, it); > } > > -static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs, > - unsigned int sz) > -{ > - advance_itstate(regs); > - arm64_skip_faulting_instruction(regs, sz); > -} > - > static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) > { > int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; > > pt_regs_write_reg(regs, reg, arch_timer_get_rate()); > - arm64_compat_skip_faulting_instruction(regs, 4); > + arm64_skip_faulting_instruction(regs, 4); > } > > static const struct sys64_hook cp15_32_hooks[] = { > @@ -676,7 +674,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) > > pt_regs_write_reg(regs, rt, lower_32_bits(val)); > pt_regs_write_reg(regs, rt2, upper_32_bits(val)); > - arm64_compat_skip_faulting_instruction(regs, 4); > + arm64_skip_faulting_instruction(regs, 4); > } > > static const struct sys64_hook cp15_64_hooks[] = { > @@ -697,7 +695,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs) > * There is no T16 variant of a CP access, so we > * always advance PC by 4 bytes. > */ > - arm64_compat_skip_faulting_instruction(regs, 4); > + arm64_skip_faulting_instruction(regs, 4); > return; > } > > -- > 2.20.1 >
On Wed, Feb 26, 2020 at 03:57:09PM +0000, Mark Brown wrote: > From: Dave Martin <Dave.Martin@arm.com> > > The current code to print PSTATE symbolically when generating > backtraces etc., does not include the BYTPE field used by Branch > Target Identification. > > So, decode BYTPE and print it too. > > In the interests of human-readability, print the classes of BTI > matched. The symbolic notation, BYTPE (PSTATE[11:10]) and > permitted classes of subsequent instruction are: > > -- (BTYPE=0b00): any insn > jc (BTYPE=0b01): BTI jc, BTI j, BTI c, PACIxSP > -c (BYTPE=0b10): BTI jc, BTI c, PACIxSP > j- (BTYPE=0b11): BTI jc, BTI j > > Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> -Kees > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/kernel/process.c | 17 +++++++++++++++-- > 1 file changed, 15 insertions(+), 2 deletions(-) > > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > index b8e3faa8d406..24af13d7bde6 100644 > --- a/arch/arm64/kernel/process.c > +++ b/arch/arm64/kernel/process.c > @@ -211,6 +211,15 @@ void machine_restart(char *cmd) > while (1); > } > > +#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str > +static const char *const btypes[] = { > + bstr(NONE, "--"), > + bstr( JC, "jc"), > + bstr( C, "-c"), > + bstr( J , "j-") > +}; > +#undef bstr > + > static void print_pstate(struct pt_regs *regs) > { > u64 pstate = regs->pstate; > @@ -229,7 +238,10 @@ static void print_pstate(struct pt_regs *regs) > pstate & PSR_AA32_I_BIT ? 'I' : 'i', > pstate & PSR_AA32_F_BIT ? 'F' : 'f'); > } else { > - printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", > + const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> > + PSR_BTYPE_SHIFT]; > + > + printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n", > pstate, > pstate & PSR_N_BIT ? 'N' : 'n', > pstate & PSR_Z_BIT ? 'Z' : 'z', > @@ -240,7 +252,8 @@ static void print_pstate(struct pt_regs *regs) > pstate & PSR_I_BIT ? 'I' : 'i', > pstate & PSR_F_BIT ? 'F' : 'f', > pstate & PSR_PAN_BIT ? '+' : '-', > - pstate & PSR_UAO_BIT ? '+' : '-'); > + pstate & PSR_UAO_BIT ? '+' : '-', > + btype_str); > } > } > > -- > 2.20.1 >
On Wed, Feb 26, 2020 at 03:57:08PM +0000, Mark Brown wrote: > From: Dave Martin <Dave.Martin@arm.com> > > For BTI protection to be as comprehensive as possible, it is > desirable to have BTI enabled from process startup. If this is not > done, the process must use mprotect() to enable BTI for each of its > executable mappings, but this is painful to do in the libc startup > code. It's simpler and more sound to have the kernel do it > instead. > > To this end, detect BTI support in the executable (or ELF > interpreter, as appropriate), via the > NT_GNU_PROGRAM_PROPERTY_TYPE_0 note, and tweak the initial prot > flags for the process' executable pages to include PROT_BTI as > appropriate. > > Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> -Kees > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/Kconfig | 3 +++ > arch/arm64/include/asm/elf.h | 51 ++++++++++++++++++++++++++++++++++++ > arch/arm64/kernel/process.c | 19 ++++++++++++++ > include/uapi/linux/elf.h | 6 +++++ > 4 files changed, 79 insertions(+) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index e37f4f07b990..d65d226a77ec 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -9,6 +9,7 @@ config ARM64 > select ACPI_MCFG if (ACPI && PCI) > select ACPI_SPCR_TABLE if ACPI > select ACPI_PPTT if ACPI > + select ARCH_BINFMT_ELF_STATE > select ARCH_CLOCKSOURCE_DATA > select ARCH_HAS_DEBUG_VIRTUAL > select ARCH_HAS_DEVMEM_IS_ALLOWED > @@ -33,6 +34,7 @@ config ARM64 > select ARCH_HAS_SYSCALL_WRAPPER > select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT > select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST > + select ARCH_HAVE_ELF_PROT > select ARCH_HAVE_NMI_SAFE_CMPXCHG > select ARCH_INLINE_READ_LOCK if !PREEMPTION > select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION > @@ -62,6 +64,7 @@ config ARM64 > select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION > select ARCH_KEEP_MEMBLOCK > select ARCH_USE_CMPXCHG_LOCKREF > + select ARCH_USE_GNU_PROPERTY if BINFMT_ELF > select ARCH_USE_QUEUED_RWLOCKS > select ARCH_USE_QUEUED_SPINLOCKS > select ARCH_SUPPORTS_MEMORY_FAILURE > diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h > index b618017205a3..c72e381fa86d 100644 > --- a/arch/arm64/include/asm/elf.h > +++ b/arch/arm64/include/asm/elf.h > @@ -114,7 +114,11 @@ > > #ifndef __ASSEMBLY__ > > +#include <uapi/linux/elf.h> > #include <linux/bug.h> > +#include <linux/errno.h> > +#include <linux/fs.h> > +#include <linux/types.h> > #include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */ > > typedef unsigned long elf_greg_t; > @@ -224,6 +228,53 @@ extern int aarch32_setup_additional_pages(struct linux_binprm *bprm, > > #endif /* CONFIG_COMPAT */ > > +struct arch_elf_state { > + int flags; > +}; > + > +#define ARM64_ELF_BTI (1 << 0) > + > +#define INIT_ARCH_ELF_STATE { \ > + .flags = 0, \ > +} > + > +static inline int arch_parse_elf_property(u32 type, const void *data, > + size_t datasz, bool compat, > + struct arch_elf_state *arch) > +{ > + /* No known properties for AArch32 yet */ > + if (IS_ENABLED(CONFIG_COMPAT) && compat) > + return 0; > + > + if (type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { > + const u32 *p = data; > + > + if (datasz != sizeof(*p)) > + return -ENOEXEC; > + > + if (IS_ENABLED(CONFIG_ARM64_BTI) && > + system_supports_bti() && > + (*p & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) > + arch->flags |= ARM64_ELF_BTI; > + } > + > + return 0; > +} > + > +static inline int arch_elf_pt_proc(void *ehdr, void *phdr, > + struct file *f, bool is_interp, > + struct arch_elf_state *state) > +{ > + return 0; > +} > + > +static inline int arch_check_elf(void *ehdr, bool has_interp, > + void *interp_ehdr, > + struct arch_elf_state *state) > +{ > + return 0; > +} > + > #endif /* !__ASSEMBLY__ */ > > #endif > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > index 00626057a384..b8e3faa8d406 100644 > --- a/arch/arm64/kernel/process.c > +++ b/arch/arm64/kernel/process.c > @@ -11,6 +11,7 @@ > > #include <linux/compat.h> > #include <linux/efi.h> > +#include <linux/elf.h> > #include <linux/export.h> > #include <linux/sched.h> > #include <linux/sched/debug.h> > @@ -18,6 +19,7 @@ > #include <linux/sched/task_stack.h> > #include <linux/kernel.h> > #include <linux/lockdep.h> > +#include <linux/mman.h> > #include <linux/mm.h> > #include <linux/stddef.h> > #include <linux/sysctl.h> > @@ -654,3 +656,20 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) > if (system_capabilities_finalized()) > preempt_schedule_irq(); > } > + > +#ifdef CONFIG_BINFMT_ELF > +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, > + bool has_interp, bool is_interp) > +{ > + if (is_interp != has_interp) > + return prot; > + > + if (!(state->flags & ARM64_ELF_BTI)) > + return prot; > + > + if (prot & PROT_EXEC) > + prot |= PROT_BTI; > + > + return prot; > +} > +#endif > diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h > index 20900f4496b7..c6dd0215482e 100644 > --- a/include/uapi/linux/elf.h > +++ b/include/uapi/linux/elf.h > @@ -448,4 +448,10 @@ typedef struct elf64_note { > Elf64_Word n_type; /* Content type */ > } Elf64_Nhdr; > > +/* .note.gnu.property types for EM_AARCH64: */ > +#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 > + > +/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */ > +#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0) > + > #endif /* _UAPI_LINUX_ELF_H */ > -- > 2.20.1 >
On Wed, Feb 26, 2020 at 03:57:03PM +0000, Mark Brown wrote: > This patch series implements support for ARMv8.5-A Branch Target > Identification (BTI), which is a control flow integrity protection > feature introduced as part of the ARMv8.5-A extensions. > > Changes: > > v7: > - Rebase onto v5.6-rc3. > - Move comment about keeping NT_GNU_PROPERTY_TYPE_0 internal into first > patch. > - Add an explicit check for system_supports_bti() when parsing BTI ELF > property for improved robustness. Looks good. I sent a few more Reviewed-bys where I could. Who is expected to pick this up? Catalin? Will? I'm excited to have both the ELF parser and BTI landed. :) -Kees
Hi Mark, On 2/26/20 9:27 PM, Mark Brown wrote: > From: Dave Martin <Dave.Martin@arm.com> > > For BTI protection to be as comprehensive as possible, it is > desirable to have BTI enabled from process startup. If this is not > done, the process must use mprotect() to enable BTI for each of its > executable mappings, but this is painful to do in the libc startup > code. It's simpler and more sound to have the kernel do it > instead. > > To this end, detect BTI support in the executable (or ELF > interpreter, as appropriate), via the > NT_GNU_PROGRAM_PROPERTY_TYPE_0 note, and tweak the initial prot > flags for the process' executable pages to include PROT_BTI as > appropriate. > > Signed-off-by: Dave Martin <Dave.Martin@arm.com> > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/Kconfig | 3 +++ > arch/arm64/include/asm/elf.h | 51 ++++++++++++++++++++++++++++++++++++ > arch/arm64/kernel/process.c | 19 ++++++++++++++ > include/uapi/linux/elf.h | 6 +++++ > 4 files changed, 79 insertions(+) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index e37f4f07b990..d65d226a77ec 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -9,6 +9,7 @@ config ARM64 > select ACPI_MCFG if (ACPI && PCI) > select ACPI_SPCR_TABLE if ACPI > select ACPI_PPTT if ACPI > + select ARCH_BINFMT_ELF_STATE > select ARCH_CLOCKSOURCE_DATA > select ARCH_HAS_DEBUG_VIRTUAL > select ARCH_HAS_DEVMEM_IS_ALLOWED > @@ -33,6 +34,7 @@ config ARM64 > select ARCH_HAS_SYSCALL_WRAPPER > select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT > select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST > + select ARCH_HAVE_ELF_PROT > select ARCH_HAVE_NMI_SAFE_CMPXCHG > select ARCH_INLINE_READ_LOCK if !PREEMPTION > select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION > @@ -62,6 +64,7 @@ config ARM64 > select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION > select ARCH_KEEP_MEMBLOCK > select ARCH_USE_CMPXCHG_LOCKREF > + select ARCH_USE_GNU_PROPERTY if BINFMT_ELF > select ARCH_USE_QUEUED_RWLOCKS > select ARCH_USE_QUEUED_SPINLOCKS > select ARCH_SUPPORTS_MEMORY_FAILURE > diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h > index b618017205a3..c72e381fa86d 100644 > --- a/arch/arm64/include/asm/elf.h > +++ b/arch/arm64/include/asm/elf.h > @@ -114,7 +114,11 @@ > > #ifndef __ASSEMBLY__ > > +#include <uapi/linux/elf.h> > #include <linux/bug.h> > +#include <linux/errno.h> > +#include <linux/fs.h> > +#include <linux/types.h> > #include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */ > > typedef unsigned long elf_greg_t; > @@ -224,6 +228,53 @@ extern int aarch32_setup_additional_pages(struct linux_binprm *bprm, > > #endif /* CONFIG_COMPAT */ > > +struct arch_elf_state { > + int flags; > +}; > + > +#define ARM64_ELF_BTI (1 << 0) > + > +#define INIT_ARCH_ELF_STATE { \ > + .flags = 0, \ > +} > + > +static inline int arch_parse_elf_property(u32 type, const void *data, > + size_t datasz, bool compat, > + struct arch_elf_state *arch) > +{ > + /* No known properties for AArch32 yet */ > + if (IS_ENABLED(CONFIG_COMPAT) && compat) > + return 0; > + > + if (type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { > + const u32 *p = data; > + > + if (datasz != sizeof(*p)) > + return -ENOEXEC; > + > + if (IS_ENABLED(CONFIG_ARM64_BTI) && > + system_supports_bti() && system_supports_bti() has inbuilt CONFIG_ARM64_BTI config check. For all the patch in the series. Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com> Cheers, Amit > + (*p & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) > + arch->flags |= ARM64_ELF_BTI; > + } > + > + return 0; > +} > + > +static inline int arch_elf_pt_proc(void *ehdr, void *phdr, > + struct file *f, bool is_interp, > + struct arch_elf_state *state) > +{ > + return 0; > +} > + > +static inline int arch_check_elf(void *ehdr, bool has_interp, > + void *interp_ehdr, > + struct arch_elf_state *state) > +{ > + return 0; > +} > + > #endif /* !__ASSEMBLY__ */ > > #endif > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > index 00626057a384..b8e3faa8d406 100644 > --- a/arch/arm64/kernel/process.c > +++ b/arch/arm64/kernel/process.c > @@ -11,6 +11,7 @@ > > #include <linux/compat.h> > #include <linux/efi.h> > +#include <linux/elf.h> > #include <linux/export.h> > #include <linux/sched.h> > #include <linux/sched/debug.h> > @@ -18,6 +19,7 @@ > #include <linux/sched/task_stack.h> > #include <linux/kernel.h> > #include <linux/lockdep.h> > +#include <linux/mman.h> > #include <linux/mm.h> > #include <linux/stddef.h> > #include <linux/sysctl.h> > @@ -654,3 +656,20 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) > if (system_capabilities_finalized()) > preempt_schedule_irq(); > } > + > +#ifdef CONFIG_BINFMT_ELF > +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, > + bool has_interp, bool is_interp) > +{ > + if (is_interp != has_interp) > + return prot; > + > + if (!(state->flags & ARM64_ELF_BTI)) > + return prot; > + > + if (prot & PROT_EXEC) > + prot |= PROT_BTI; > + > + return prot; > +} > +#endif > diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h > index 20900f4496b7..c6dd0215482e 100644 > --- a/include/uapi/linux/elf.h > +++ b/include/uapi/linux/elf.h > @@ -448,4 +448,10 @@ typedef struct elf64_note { > Elf64_Word n_type; /* Content type */ > } Elf64_Nhdr; > > +/* .note.gnu.property types for EM_AARCH64: */ > +#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 > + > +/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */ > +#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0) > + > #endif /* _UAPI_LINUX_ELF_H */ >
On Wed, Feb 26, 2020 at 01:44:59PM -0800, Kees Cook wrote: > Looks good. I sent a few more Reviewed-bys where I could. Who is > expected to pick this up? Catalin? Will? Thanks, I'm expecting it'll go through the arm64 tree.