diff mbox series

[2/3] powerpc: support dynamic preemption

Message ID 20241125042212.1522315-3-sshegde@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc: Enable dynamic preemption | expand

Commit Message

Shrikanth Hegde Nov. 25, 2024, 4:22 a.m. UTC
Once the lazy preemption is supported, it would be desirable to change
the preemption models at runtime. So this change adds support for dynamic
preemption using DYNAMIC_KEY.

In irq-exit to kernel path, use preempt_model_preemptible for decision.
Other way would be using static key based decision. Keeping it
simpler since key based change didn't show performance improvement.

Tested lightly on Power10 LPAR. Performance numbers indicate that,
preempt=none(no dynamic) and preempt=none(dynamic) are similar.
Only hackbench pipe shows a regression. There is slight overhead of code
check if it is preemptible kernel. hackbench pipe is prone to such 
patterns[1]

cat /sys/kernel/debug/sched/preempt
(none) voluntary full lazy
perf stat -e probe:__cond_resched -a sleep 1
 Performance counter stats for 'system wide':
             1,253      probe:__cond_resched

echo full > /sys/kernel/debug/sched/preempt
cat /sys/kernel/debug/sched/preempt
none voluntary (full) lazy
perf stat -e probe:__cond_resched -a sleep 1
 Performance counter stats for 'system wide':
                 0      probe:__cond_resched

echo lazy > /sys/kernel/debug/sched/preempt
cat /sys/kernel/debug/sched/preempt
none voluntary full (lazy)
perf stat -e probe:__cond_resched -a sleep 1
 Performance counter stats for 'system wide':
                 0      probe:__cond_resched

[1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b-e4b93eb077b8@linux.ibm.com/

Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
 arch/powerpc/Kconfig               | 1 +
 arch/powerpc/include/asm/preempt.h | 1 +
 arch/powerpc/kernel/interrupt.c    | 6 +++++-
 arch/powerpc/lib/vmx-helper.c      | 2 +-
 4 files changed, 8 insertions(+), 2 deletions(-)

Comments

Christophe Leroy Nov. 26, 2024, 10:48 a.m. UTC | #1
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> Once the lazy preemption is supported, it would be desirable to change
> the preemption models at runtime. So this change adds support for dynamic
> preemption using DYNAMIC_KEY.
> 
> In irq-exit to kernel path, use preempt_model_preemptible for decision.
> Other way would be using static key based decision. Keeping it
> simpler since key based change didn't show performance improvement.
> 
> Tested lightly on Power10 LPAR. Performance numbers indicate that,
> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
> Only hackbench pipe shows a regression. There is slight overhead of code
> check if it is preemptible kernel. hackbench pipe is prone to such
> patterns[1]
> 
> cat /sys/kernel/debug/sched/preempt
> (none) voluntary full lazy
> perf stat -e probe:__cond_resched -a sleep 1
>   Performance counter stats for 'system wide':
>               1,253      probe:__cond_resched
> 
> echo full > /sys/kernel/debug/sched/preempt
> cat /sys/kernel/debug/sched/preempt
> none voluntary (full) lazy
> perf stat -e probe:__cond_resched -a sleep 1
>   Performance counter stats for 'system wide':
>                   0      probe:__cond_resched
> 
> echo lazy > /sys/kernel/debug/sched/preempt
> cat /sys/kernel/debug/sched/preempt
> none voluntary full (lazy)
> perf stat -e probe:__cond_resched -a sleep 1
>   Performance counter stats for 'system wide':
>                   0      probe:__cond_resched
> 
> [1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b-e4b93eb077b8@linux.ibm.com/
> 
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
>   arch/powerpc/Kconfig               | 1 +
>   arch/powerpc/include/asm/preempt.h | 1 +
>   arch/powerpc/kernel/interrupt.c    | 6 +++++-
>   arch/powerpc/lib/vmx-helper.c      | 2 +-
>   4 files changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 6d6bbd93abab..01c58f5258c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -270,6 +270,7 @@ config PPC
>   	select HAVE_PERF_EVENTS_NMI		if PPC64
>   	select HAVE_PERF_REGS
>   	select HAVE_PERF_USER_STACK_DUMP
> +	select HAVE_PREEMPT_DYNAMIC_KEY
>   	select HAVE_RETHOOK			if KPROBES
>   	select HAVE_REGS_AND_STACK_ACCESS_API
>   	select HAVE_RELIABLE_STACKTRACE
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> index 51f8f3881523..c0a19ff3f78c 100644
> --- a/arch/powerpc/include/asm/preempt.h
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>   
>   #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>   
> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>   void dynamic_preempt_schedule(void);
>   void dynamic_preempt_schedule_notrace(void);
>   #define __preempt_schedule()		dynamic_preempt_schedule()
> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
> index 8f4acc55407b..0fb01019d7e0 100644
> --- a/arch/powerpc/kernel/interrupt.c
> +++ b/arch/powerpc/kernel/interrupt.c
> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
>   }
>   #endif
>   
> +#ifdef CONFIG_PREEMPT_DYNAMIC
> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> +#endif

Why is that needed at all ? It isn't used.

> +
>   /*
>    * local irqs must be disabled. Returns false if the caller must re-enable
>    * them, check for new work, and try again.
> @@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
>   		/* Returning to a kernel context with local irqs enabled. */
>   		WARN_ON_ONCE(!(regs->msr & MSR_EE));
>   again:
> -		if (IS_ENABLED(CONFIG_PREEMPTION)) {
> +		if (preempt_model_preemptible()) {
>   			/* Return to preemptible kernel context */
>   			if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
>   				if (preempt_count() == 0)
> diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
> index 58ed6bd613a6..7b069c832ce2 100644
> --- a/arch/powerpc/lib/vmx-helper.c
> +++ b/arch/powerpc/lib/vmx-helper.c
> @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
>   	 * set and we are preemptible. The hack here is to schedule a
>   	 * decrementer to fire here and reschedule for us if necessary.
>   	 */
> -	if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
> +	if (preempt_model_preemptible() && need_resched())
>   		set_dec(1);
>   	return 0;
>   }
Shrikanth Hegde Nov. 26, 2024, 11:15 a.m. UTC | #2
On 11/26/24 16:18, Christophe Leroy wrote:
> 
> 

Hi Christophe, Thanks for taking a look at this.

> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> Once the lazy preemption is supported, it would be desirable to change
>> the preemption models at runtime. So this change adds support for dynamic
>> preemption using DYNAMIC_KEY.
>>
>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>> Other way would be using static key based decision. Keeping it
>> simpler since key based change didn't show performance improvement.
>>
>> Tested lightly on Power10 LPAR. Performance numbers indicate that,
>> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
>> Only hackbench pipe shows a regression. There is slight overhead of code
>> check if it is preemptible kernel. hackbench pipe is prone to such
>> patterns[1]
>>
>> cat /sys/kernel/debug/sched/preempt
>> (none) voluntary full lazy
>> perf stat -e probe:__cond_resched -a sleep 1
>>   Performance counter stats for 'system wide':
>>               1,253      probe:__cond_resched
>>
>> echo full > /sys/kernel/debug/sched/preempt
>> cat /sys/kernel/debug/sched/preempt
>> none voluntary (full) lazy
>> perf stat -e probe:__cond_resched -a sleep 1
>>   Performance counter stats for 'system wide':
>>                   0      probe:__cond_resched
>>
>> echo lazy > /sys/kernel/debug/sched/preempt
>> cat /sys/kernel/debug/sched/preempt
>> none voluntary full (lazy)
>> perf stat -e probe:__cond_resched -a sleep 1
>>   Performance counter stats for 'system wide':
>>                   0      probe:__cond_resched
>>
>> [1]: https://lore.kernel.org/all/1a973dda-c79e-4d95-935b- 
>> e4b93eb077b8@linux.ibm.com/
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>> ---
>>   arch/powerpc/Kconfig               | 1 +
>>   arch/powerpc/include/asm/preempt.h | 1 +
>>   arch/powerpc/kernel/interrupt.c    | 6 +++++-
>>   arch/powerpc/lib/vmx-helper.c      | 2 +-
>>   4 files changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 6d6bbd93abab..01c58f5258c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -270,6 +270,7 @@ config PPC
>>       select HAVE_PERF_EVENTS_NMI        if PPC64
>>       select HAVE_PERF_REGS
>>       select HAVE_PERF_USER_STACK_DUMP
>> +    select HAVE_PREEMPT_DYNAMIC_KEY
>>       select HAVE_RETHOOK            if KPROBES
>>       select HAVE_REGS_AND_STACK_ACCESS_API
>>       select HAVE_RELIABLE_STACKTRACE
>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/ 
>> include/asm/preempt.h
>> index 51f8f3881523..c0a19ff3f78c 100644
>> --- a/arch/powerpc/include/asm/preempt.h
>> +++ b/arch/powerpc/include/asm/preempt.h
>> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>>   #if defined(CONFIG_PREEMPT_DYNAMIC) && 
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>   void dynamic_preempt_schedule(void);
>>   void dynamic_preempt_schedule_notrace(void);
>>   #define __preempt_schedule()        dynamic_preempt_schedule()
>> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/ 
>> interrupt.c
>> index 8f4acc55407b..0fb01019d7e0 100644
>> --- a/arch/powerpc/kernel/interrupt.c
>> +++ b/arch/powerpc/kernel/interrupt.c
>> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
>>   }
>>   #endif
>> +#ifdef CONFIG_PREEMPT_DYNAMIC
>> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> +#endif
> 
> Why is that needed at all ? It isn't used.

This is needed else compilation fails.

It has be defined by arch if it doesn't use kernel infra of entry/exit.
So if an arch does enable, CONFIG_HAVE_PREEMPT_DYNAMIC_KEY it has to be 
define this key has well. The generic sched/core enables this flag.

This was one of the point I was requesting answer for. Either to use 
preempt_model_preemptible or define macros based on this key. Other 
archs are doing the later and hence the generic code enables this key.

It can be done in either way. if we do the later way, then this variable 
will be used as well.

I hope this answers the question on patch 1 as well. Otherwise i have to 
declare that in one of other arch/asm/.h file.

> 
>> +
>>   /*
>>    * local irqs must be disabled. Returns false if the caller must re- 
>> enable
>>    * them, check for new work, and try again.
>> @@ -396,7 +400,7 @@ notrace unsigned long 
>> interrupt_exit_kernel_prepare(struct pt_regs *regs)
>>           /* Returning to a kernel context with local irqs enabled. */
>>           WARN_ON_ONCE(!(regs->msr & MSR_EE));
>>   again:
>> -        if (IS_ENABLED(CONFIG_PREEMPTION)) {
>> +        if (preempt_model_preemptible()) {
>>               /* Return to preemptible kernel context */
>>               if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
>>                   if (preempt_count() == 0)
>> diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx- 
>> helper.c
>> index 58ed6bd613a6..7b069c832ce2 100644
>> --- a/arch/powerpc/lib/vmx-helper.c
>> +++ b/arch/powerpc/lib/vmx-helper.c
>> @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
>>        * set and we are preemptible. The hack here is to schedule a
>>        * decrementer to fire here and reschedule for us if necessary.
>>        */
>> -    if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
>> +    if (preempt_model_preemptible() && need_resched())
>>           set_dec(1);
>>       return 0;
>>   }
Christophe Leroy Nov. 27, 2024, 6:28 a.m. UTC | #3
Le 26/11/2024 à 12:15, Shrikanth Hegde a écrit :
> 
> 
> On 11/26/24 16:18, Christophe Leroy wrote:
>>
>>
> 
> Hi Christophe, Thanks for taking a look at this.
> 
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> Once the lazy preemption is supported, it would be desirable to change
>>> the preemption models at runtime. So this change adds support for 
>>> dynamic
>>> preemption using DYNAMIC_KEY.
>>>
>>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>>> Other way would be using static key based decision. Keeping it
>>> simpler since key based change didn't show performance improvement.
>>>
>>> Tested lightly on Power10 LPAR. Performance numbers indicate that,
>>> preempt=none(no dynamic) and preempt=none(dynamic) are similar.
>>> Only hackbench pipe shows a regression. There is slight overhead of code
>>> check if it is preemptible kernel. hackbench pipe is prone to such
>>> patterns[1]
>>>
>>> cat /sys/kernel/debug/sched/preempt
>>> (none) voluntary full lazy
>>> perf stat -e probe:__cond_resched -a sleep 1
>>>   Performance counter stats for 'system wide':
>>>               1,253      probe:__cond_resched
>>>
>>> echo full > /sys/kernel/debug/sched/preempt
>>> cat /sys/kernel/debug/sched/preempt
>>> none voluntary (full) lazy
>>> perf stat -e probe:__cond_resched -a sleep 1
>>>   Performance counter stats for 'system wide':
>>>                   0      probe:__cond_resched
>>>
>>> echo lazy > /sys/kernel/debug/sched/preempt
>>> cat /sys/kernel/debug/sched/preempt
>>> none voluntary full (lazy)
>>> perf stat -e probe:__cond_resched -a sleep 1
>>>   Performance counter stats for 'system wide':
>>>                   0      probe:__cond_resched
>>>
>>> [1]: 
>>> https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flore.kernel.org%2Fall%2F1a973dda-c79e-4d95-935b-&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7Cf0474c2567834b69dfd908dd0e0bb554%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638682165690258507%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=Gcw6nRSPkp78lGEkG8NX04KWW%2FjCZm0oA%2BTGTjpUZUc%3D&reserved=0 e4b93eb077b8@linux.ibm.com/
>>>
>>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>>> ---
>>>   arch/powerpc/Kconfig               | 1 +
>>>   arch/powerpc/include/asm/preempt.h | 1 +
>>>   arch/powerpc/kernel/interrupt.c    | 6 +++++-
>>>   arch/powerpc/lib/vmx-helper.c      | 2 +-
>>>   4 files changed, 8 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 6d6bbd93abab..01c58f5258c9 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -270,6 +270,7 @@ config PPC
>>>       select HAVE_PERF_EVENTS_NMI        if PPC64
>>>       select HAVE_PERF_REGS
>>>       select HAVE_PERF_USER_STACK_DUMP
>>> +    select HAVE_PREEMPT_DYNAMIC_KEY
>>>       select HAVE_RETHOOK            if KPROBES
>>>       select HAVE_REGS_AND_STACK_ACCESS_API
>>>       select HAVE_RELIABLE_STACKTRACE
>>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/ 
>>> include/asm/preempt.h
>>> index 51f8f3881523..c0a19ff3f78c 100644
>>> --- a/arch/powerpc/include/asm/preempt.h
>>> +++ b/arch/powerpc/include/asm/preempt.h
>>> @@ -84,6 +84,7 @@ extern asmlinkage void preempt_schedule_notrace(void);
>>>   #if defined(CONFIG_PREEMPT_DYNAMIC) && 
>>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>>> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>>   void dynamic_preempt_schedule(void);
>>>   void dynamic_preempt_schedule_notrace(void);
>>>   #define __preempt_schedule()        dynamic_preempt_schedule()
>>> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/ 
>>> interrupt.c
>>> index 8f4acc55407b..0fb01019d7e0 100644
>>> --- a/arch/powerpc/kernel/interrupt.c
>>> +++ b/arch/powerpc/kernel/interrupt.c
>>> @@ -38,6 +38,10 @@ static inline bool exit_must_hard_disable(void)
>>>   }
>>>   #endif
>>> +#ifdef CONFIG_PREEMPT_DYNAMIC
>>> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>> +#endif
>>
>> Why is that needed at all ? It isn't used.
> 
> This is needed else compilation fails.
> 
> It has be defined by arch if it doesn't use kernel infra of entry/exit.
> So if an arch does enable, CONFIG_HAVE_PREEMPT_DYNAMIC_KEY it has to be 
> define this key has well. The generic sched/core enables this flag.
> 
> This was one of the point I was requesting answer for. Either to use 
> preempt_model_preemptible or define macros based on this key. Other 
> archs are doing the later and hence the generic code enables this key.
> 
> It can be done in either way. if we do the later way, then this variable 
> will be used as well.
> 

Ah right, I did a grep on sk_dynamic_irqentry_exit_cond_resched but 
indeed it is triggered by static_key_enable(&sk_dynamic_##f.key)

Christophe
Christophe Leroy Nov. 27, 2024, 6:44 a.m. UTC | #4
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> Once the lazy preemption is supported, it would be desirable to change
> the preemption models at runtime. So this change adds support for dynamic
> preemption using DYNAMIC_KEY.
> 
> In irq-exit to kernel path, use preempt_model_preemptible for decision.
> Other way would be using static key based decision. Keeping it
> simpler since key based change didn't show performance improvement.

What about static_call, wouldn't it improve performance ?

> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 6d6bbd93abab..01c58f5258c9 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -270,6 +270,7 @@ config PPC
>   	select HAVE_PERF_EVENTS_NMI		if PPC64
>   	select HAVE_PERF_REGS
>   	select HAVE_PERF_USER_STACK_DUMP
> +	select HAVE_PREEMPT_DYNAMIC_KEY

Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more 
performant.

I know static calls are not in for PPC64 yet, you can restart from 
http://patchwork.ozlabs.org/project/linuxppc-dev/cover/20221010002957.128276-1-bgray@linux.ibm.com/ 
and https://github.com/linuxppc/issues/issues/416

Christophe
Shrikanth Hegde Dec. 1, 2024, 7:45 p.m. UTC | #5
On 11/27/24 12:14, Christophe Leroy wrote:
> 
> 
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> Once the lazy preemption is supported, it would be desirable to change
>> the preemption models at runtime. So this change adds support for dynamic
>> preemption using DYNAMIC_KEY.
>>
>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>> Other way would be using static key based decision. Keeping it
>> simpler since key based change didn't show performance improvement.
> 
> What about static_call, wouldn't it improve performance ?
> 
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 6d6bbd93abab..01c58f5258c9 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -270,6 +270,7 @@ config PPC
>>       select HAVE_PERF_EVENTS_NMI        if PPC64
>>       select HAVE_PERF_REGS
>>       select HAVE_PERF_USER_STACK_DUMP
>> +    select HAVE_PREEMPT_DYNAMIC_KEY
> 
> Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more 
> performant.
> 
> I know static calls are not in for PPC64 yet, you can restart from 
> http://patchwork.ozlabs.org/project/linuxppc-dev/ 
> cover/20221010002957.128276-1-bgray@linux.ibm.com/ and https:// 
> github.com/linuxppc/issues/issues/416
> 

Thanks Christophe, I will take a look and understand.

May be stupid question, do the concerns of arm still valid for ppc64/ppc32 out-line static calls?
https://lore.kernel.org/all/20220214165216.2231574-6-mark.rutland@arm.com/

As I understood, that is the reason they went ahead with DYNAMIC_KEY.

> Christophe
Christophe Leroy Dec. 3, 2024, 7:53 p.m. UTC | #6
Le 01/12/2024 à 20:45, Shrikanth Hegde a écrit :
> 
> 
> On 11/27/24 12:14, Christophe Leroy wrote:
>>
>>
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> Once the lazy preemption is supported, it would be desirable to change
>>> the preemption models at runtime. So this change adds support for 
>>> dynamic
>>> preemption using DYNAMIC_KEY.
>>>
>>> In irq-exit to kernel path, use preempt_model_preemptible for decision.
>>> Other way would be using static key based decision. Keeping it
>>> simpler since key based change didn't show performance improvement.
>>
>> What about static_call, wouldn't it improve performance ?
>>
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 6d6bbd93abab..01c58f5258c9 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -270,6 +270,7 @@ config PPC
>>>       select HAVE_PERF_EVENTS_NMI        if PPC64
>>>       select HAVE_PERF_REGS
>>>       select HAVE_PERF_USER_STACK_DUMP
>>> +    select HAVE_PREEMPT_DYNAMIC_KEY
>>
>> Can you use HAVE_PREEPT_DYNAMIC_CALL instead ? That should be more 
>> performant.
>>
>> I know static calls are not in for PPC64 yet, you can restart from 
>> https://eur01.safelinks.protection.outlook.com/? 
>> url=http%3A%2F%2Fpatchwork.ozlabs.org%2Fproject%2Flinuxppc- 
>> dev%2F&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7C696bf56dcb544f3e49a408dd1240c477%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638686791595217076%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=iUwKXhmoU3YgqNI%2Bi7vwi%2Fz4obxMXD0au%2Fclo1m23ng%3D&reserved=0 cover/20221010002957.128276-1-bgray@linux.ibm.com/ and https:// github.com/linuxppc/issues/issues/416
>>
> 
> Thanks Christophe, I will take a look and understand.
> 
> May be stupid question, do the concerns of arm still valid for ppc64/ 
> ppc32 out-line static calls?

Not sure. Don't know what they call landing pad.

Limited branch range is a limitation for sure, but whatever the method 
when the called function is too far indirect call is required. And on 
powerpc the max distance is 32 Mb which is quite big for a standard 
kernel. Only modules should be concerned, but do we have scheduling code 
in modules ?

CFI I don't know.

Anyway, I resurected the series I sent to implement inline static calls 
on PPC32. I'm sure we can then amplify it to PPC64.


> https://eur01.safelinks.protection.outlook.com/? 
> url=https%3A%2F%2Flore.kernel.org%2Fall%2F20220214165216.2231574-6- 
> mark.rutland%40arm.com%2F&data=05%7C02%7Cchristophe.leroy%40csgroup.eu%7C696bf56dcb544f3e49a408dd1240c477%7C8b87af7d86474dc78df45f69a2011bb5%7C0%7C0%7C638686791595233955%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIlYiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C0%7C%7C%7C&sdata=8jT7JHp7HgNIwVEEL7gAI8JiDvCFDShI1eIeARWwbVo%3D&reserved=0
> 
> As I understood, that is the reason they went ahead with DYNAMIC_KEY.

In their commit they have comparisons of assembly code. Can you do the 
same for powerpc to get a better picture of what we are talking about ?


Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6d6bbd93abab..01c58f5258c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -270,6 +270,7 @@  config PPC
 	select HAVE_PERF_EVENTS_NMI		if PPC64
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
+	select HAVE_PREEMPT_DYNAMIC_KEY
 	select HAVE_RETHOOK			if KPROBES
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE
diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
index 51f8f3881523..c0a19ff3f78c 100644
--- a/arch/powerpc/include/asm/preempt.h
+++ b/arch/powerpc/include/asm/preempt.h
@@ -84,6 +84,7 @@  extern asmlinkage void preempt_schedule_notrace(void);
 
 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
 
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
 void dynamic_preempt_schedule(void);
 void dynamic_preempt_schedule_notrace(void);
 #define __preempt_schedule()		dynamic_preempt_schedule()
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 8f4acc55407b..0fb01019d7e0 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -38,6 +38,10 @@  static inline bool exit_must_hard_disable(void)
 }
 #endif
 
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
+
 /*
  * local irqs must be disabled. Returns false if the caller must re-enable
  * them, check for new work, and try again.
@@ -396,7 +400,7 @@  notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
 		/* Returning to a kernel context with local irqs enabled. */
 		WARN_ON_ONCE(!(regs->msr & MSR_EE));
 again:
-		if (IS_ENABLED(CONFIG_PREEMPTION)) {
+		if (preempt_model_preemptible()) {
 			/* Return to preemptible kernel context */
 			if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
 				if (preempt_count() == 0)
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 58ed6bd613a6..7b069c832ce2 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -45,7 +45,7 @@  int exit_vmx_usercopy(void)
 	 * set and we are preemptible. The hack here is to schedule a
 	 * decrementer to fire here and reschedule for us if necessary.
 	 */
-	if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
+	if (preempt_model_preemptible() && need_resched())
 		set_dec(1);
 	return 0;
 }