@@ -202,6 +202,7 @@ config PPC
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP if PCI
select GENERIC_PTDUMP
+ select GENERIC_ENTRY
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_TIME_NS
@@ -216,6 +216,11 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+/*common entry*/
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
+{
+ return arch_irqs_disabled();
+}
static inline void set_pmi_irq_pending(void)
{
/*
@@ -383,6 +383,12 @@ int validate_sp(unsigned long sp, struct task_struct *p);
int validate_sp_size(unsigned long sp, struct task_struct *p,
unsigned long nbytes);
+/*for common entry*/
+static __always_inline bool on_thread_stack(void)
+{
+ return validate_sp(current_stack_pointer, current);
+}
+
/*
* Prefetch macros.
*/
@@ -119,4 +119,9 @@ static inline int syscall_get_arch(struct task_struct *task)
else
return AUDIT_ARCH_PPC64;
}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
#endif /* _ASM_SYSCALL_H */
@@ -58,6 +58,7 @@ struct thread_info {
unsigned int cpu;
#endif
unsigned long local_flags; /* private flags for thread */
+ unsigned long syscall_work;
#ifdef CONFIG_LIVEPATCH_64
unsigned long *livepatch_sp;
#endif
@@ -3,6 +3,7 @@
#include <linux/compat.h>
#include <linux/context_tracking.h>
#include <linux/randomize_kstack.h>
+#include <linux/entry-common.h>
#include <asm/interrupt.h>
#include <asm/kup.h>
@@ -131,7 +132,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
* and the test against NR_syscalls will fail and the return
* value to be used is in regs->gpr[3].
*/
- r0 = do_syscall_trace_enter(regs);
+ r0 = syscall_enter_from_user_mode(regs, r0);
if (unlikely(r0 >= NR_syscalls))
return regs->gpr[3];
@@ -184,6 +185,8 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
* So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
*/
choose_random_kstack_offset(mftb());
+ /*common entry*/
+ syscall_exit_to_user_mode(regs);
return ret;
}
@@ -23,6 +23,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/entry-common.h>
#include <linux/highmem.h>
#include <linux/extable.h>
#include <linux/kprobes.h>
@@ -577,7 +578,9 @@ static __always_inline void __do_page_fault(struct pt_regs *regs)
DEFINE_INTERRUPT_HANDLER(do_page_fault)
{
+ irqentry_state_t state = irqentry_enter(regs);
__do_page_fault(regs);
+ irqentry_exit(regs, state);
}
#ifdef CONFIG_PPC_BOOK3S_64
convert powerpc entry code in syscall and fault to use syscall_work and irqentry_state as well as common calls implemented in generic entry infrastructure. Signed-off-by: Luming Yu <luming.yu@shingroup.cn> --- v1->v2: fix ppc syscall entry seccomp_bpf test errors for common entry conversion. --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/hw_irq.h | 5 +++++ arch/powerpc/include/asm/processor.h | 6 ++++++ arch/powerpc/include/asm/syscall.h | 5 +++++ arch/powerpc/include/asm/thread_info.h | 1 + arch/powerpc/kernel/syscall.c | 5 ++++- arch/powerpc/mm/fault.c | 3 +++ 7 files changed, 25 insertions(+), 1 deletion(-)