diff mbox series

[1/3] powerpc: copy preempt.h into arch/include/asm

Message ID 20241125042212.1522315-2-sshegde@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc: Enable dynamic preemption | expand

Commit Message

Shrikanth Hegde Nov. 25, 2024, 4:22 a.m. UTC
PowerPC uses asm-generic preempt definitions as of now.
Copy that into arch/asm so that arch specific changes can be done.
This would help the next patch for enabling dynamic preemption.

No functional changes intended.

Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
 arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
 1 file changed, 100 insertions(+)
 create mode 100644 arch/powerpc/include/asm/preempt.h

Comments

Christophe Leroy Nov. 26, 2024, 10:49 a.m. UTC | #1
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> PowerPC uses asm-generic preempt definitions as of now.
> Copy that into arch/asm so that arch specific changes can be done.
> This would help the next patch for enabling dynamic preemption.

I can't see any valid use in following patches. The only modification 
you do to that file is in patch 2 and it is unused.

> 
> No functional changes intended.
> 
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
>   arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
>   1 file changed, 100 insertions(+)
>   create mode 100644 arch/powerpc/include/asm/preempt.h
> 
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> new file mode 100644
> index 000000000000..51f8f3881523
> --- /dev/null
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_PREEMPT_H
> +#define __ASM_PREEMPT_H
> +
> +#include <linux/thread_info.h>
> +
> +#define PREEMPT_ENABLED	(0)
> +
> +static __always_inline int preempt_count(void)
> +{
> +	return READ_ONCE(current_thread_info()->preempt_count);
> +}
> +
> +static __always_inline volatile int *preempt_count_ptr(void)
> +{
> +	return &current_thread_info()->preempt_count;
> +}
> +
> +static __always_inline void preempt_count_set(int pc)
> +{
> +	*preempt_count_ptr() = pc;
> +}
> +
> +/*
> + * must be macros to avoid header recursion hell
> + */
> +#define init_task_preempt_count(p) do { \
> +	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
> +} while (0)
> +
> +#define init_idle_preempt_count(p, cpu) do { \
> +	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
> +} while (0)
> +
> +static __always_inline void set_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline void clear_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline bool test_preempt_need_resched(void)
> +{
> +	return false;
> +}
> +
> +/*
> + * The various preempt_count add/sub methods
> + */
> +
> +static __always_inline void __preempt_count_add(int val)
> +{
> +	*preempt_count_ptr() += val;
> +}
> +
> +static __always_inline void __preempt_count_sub(int val)
> +{
> +	*preempt_count_ptr() -= val;
> +}
> +
> +static __always_inline bool __preempt_count_dec_and_test(void)
> +{
> +	/*
> +	 * Because of load-store architectures cannot do per-cpu atomic
> +	 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
> +	 * lost.
> +	 */
> +	return !--*preempt_count_ptr() && tif_need_resched();
> +}
> +
> +/*
> + * Returns true when we need to resched and can (barring IRQ state).
> + */
> +static __always_inline bool should_resched(int preempt_offset)
> +{
> +	return unlikely(preempt_count() == preempt_offset &&
> +			tif_need_resched());
> +}
> +
> +#ifdef CONFIG_PREEMPTION
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_notrace(void);
> +
> +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> +
> +void dynamic_preempt_schedule(void);
> +void dynamic_preempt_schedule_notrace(void);
> +#define __preempt_schedule()		dynamic_preempt_schedule()
> +#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
> +
> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +
> +#define __preempt_schedule() preempt_schedule()
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> +
> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +#endif /* CONFIG_PREEMPTION */
> +
> +#endif /* __ASM_PREEMPT_H */
Christophe Leroy Nov. 27, 2024, 6:37 a.m. UTC | #2
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> PowerPC uses asm-generic preempt definitions as of now.
> Copy that into arch/asm so that arch specific changes can be done.
> This would help the next patch for enabling dynamic preemption.

Instead of copying all the content of asm-generic version, can you just 
create a receptacle for your new macros, that will include 
asm-generic/preempt.h ?

Look at arch/powerpc/include/asm/percpu.h for exemple.

> 
> No functional changes intended.
> 
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
>   arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
>   1 file changed, 100 insertions(+)
>   create mode 100644 arch/powerpc/include/asm/preempt.h
> 
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> new file mode 100644
> index 000000000000..51f8f3881523
> --- /dev/null
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_PREEMPT_H
> +#define __ASM_PREEMPT_H

Should be __ASM_POWERPC_PREEMPT_H

> +
> +#include <linux/thread_info.h>
> +
> +#define PREEMPT_ENABLED	(0)
> +
> +static __always_inline int preempt_count(void)
> +{
> +	return READ_ONCE(current_thread_info()->preempt_count);
> +}
> +
> +static __always_inline volatile int *preempt_count_ptr(void)
> +{
> +	return &current_thread_info()->preempt_count;
> +}
> +
> +static __always_inline void preempt_count_set(int pc)
> +{
> +	*preempt_count_ptr() = pc;
> +}
> +
> +/*
> + * must be macros to avoid header recursion hell
> + */
> +#define init_task_preempt_count(p) do { \
> +	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
> +} while (0)
> +
> +#define init_idle_preempt_count(p, cpu) do { \
> +	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
> +} while (0)
> +
> +static __always_inline void set_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline void clear_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline bool test_preempt_need_resched(void)
> +{
> +	return false;
> +}
> +
> +/*
> + * The various preempt_count add/sub methods
> + */
> +
> +static __always_inline void __preempt_count_add(int val)
> +{
> +	*preempt_count_ptr() += val;
> +}
> +
> +static __always_inline void __preempt_count_sub(int val)
> +{
> +	*preempt_count_ptr() -= val;
> +}
> +
> +static __always_inline bool __preempt_count_dec_and_test(void)
> +{
> +	/*
> +	 * Because of load-store architectures cannot do per-cpu atomic
> +	 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
> +	 * lost.
> +	 */
> +	return !--*preempt_count_ptr() && tif_need_resched();
> +}
> +
> +/*
> + * Returns true when we need to resched and can (barring IRQ state).
> + */
> +static __always_inline bool should_resched(int preempt_offset)
> +{
> +	return unlikely(preempt_count() == preempt_offset &&
> +			tif_need_resched());
> +}
> +
> +#ifdef CONFIG_PREEMPTION
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_notrace(void);
> +
> +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> +
> +void dynamic_preempt_schedule(void);
> +void dynamic_preempt_schedule_notrace(void);
> +#define __preempt_schedule()		dynamic_preempt_schedule()
> +#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
> +
> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +
> +#define __preempt_schedule() preempt_schedule()
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> +
> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +#endif /* CONFIG_PREEMPTION */
> +
> +#endif /* __ASM_PREEMPT_H */
Shrikanth Hegde Dec. 2, 2024, 2:05 p.m. UTC | #3
On 11/27/24 12:07, Christophe Leroy wrote:
> 
> 
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> PowerPC uses asm-generic preempt definitions as of now.
>> Copy that into arch/asm so that arch specific changes can be done.
>> This would help the next patch for enabling dynamic preemption.
> 

The reason I want the content instead was to allow future patches where 
I thought of making preempt count per paca for ppc64 atleast. generic 
code assumes it is per thread. If this change is to be done at that 
point, that is fair too. I am okay with it.


> Instead of copying all the content of asm-generic version, can you just 
> create a receptacle for your new macros, that will include asm-generic/ 
> preempt.h ?
> 
> Look at arch/powerpc/include/asm/percpu.h for exemple.
>

You mean something like below right?


#ifndef __ASM_POWERPC_PREEMPT_H
#define __ASM_POWERPC_PREEMPT_H

#include <asm-generic/preempt.h>

#if defined(CONFIG_PREEMPT_DYNAMIC) && 
defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
#endif

#endif /* __ASM_POWERPC_PREEMPT_H */



>>
>> No functional changes intended.
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>> ---
>>   arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
>>   1 file changed, 100 insertions(+)
>>   create mode 100644 arch/powerpc/include/asm/preempt.h
>>
>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/ 
>> include/asm/preempt.h
>> new file mode 100644
>> index 000000000000..51f8f3881523
>> --- /dev/null
>> +++ b/arch/powerpc/include/asm/preempt.h
>> @@ -0,0 +1,100 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_PREEMPT_H
>> +#define __ASM_PREEMPT_H
> 
> Should be __ASM_POWERPC_PREEMPT_H

thanks for catching this.

> 
>> +
>> +#include <linux/thread_info.h>
>> +
>> +#define PREEMPT_ENABLED    (0)
>> +
>> +static __always_inline int preempt_count(void)
>> +{
>> +    return READ_ONCE(current_thread_info()->preempt_count);
>> +}
>> +
>> +static __always_inline volatile int *preempt_count_ptr(void)
>> +{
>> +    return &current_thread_info()->preempt_count;
>> +}
>> +
>> +static __always_inline void preempt_count_set(int pc)
>> +{
>> +    *preempt_count_ptr() = pc;
>> +}
>> +
>> +/*
>> + * must be macros to avoid header recursion hell
>> + */
>> +#define init_task_preempt_count(p) do { \
>> +    task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
>> +} while (0)
>> +
>> +#define init_idle_preempt_count(p, cpu) do { \
>> +    task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
>> +} while (0)
>> +
>> +static __always_inline void set_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline void clear_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline bool test_preempt_need_resched(void)
>> +{
>> +    return false;
>> +}
>> +
>> +/*
>> + * The various preempt_count add/sub methods
>> + */
>> +
>> +static __always_inline void __preempt_count_add(int val)
>> +{
>> +    *preempt_count_ptr() += val;
>> +}
>> +
>> +static __always_inline void __preempt_count_sub(int val)
>> +{
>> +    *preempt_count_ptr() -= val;
>> +}
>> +
>> +static __always_inline bool __preempt_count_dec_and_test(void)
>> +{
>> +    /*
>> +     * Because of load-store architectures cannot do per-cpu atomic
>> +     * operations; we cannot use PREEMPT_NEED_RESCHED because it 
>> might get
>> +     * lost.
>> +     */
>> +    return !--*preempt_count_ptr() && tif_need_resched();
>> +}
>> +
>> +/*
>> + * Returns true when we need to resched and can (barring IRQ state).
>> + */
>> +static __always_inline bool should_resched(int preempt_offset)
>> +{
>> +    return unlikely(preempt_count() == preempt_offset &&
>> +            tif_need_resched());
>> +}
>> +
>> +#ifdef CONFIG_PREEMPTION
>> +extern asmlinkage void preempt_schedule(void);
>> +extern asmlinkage void preempt_schedule_notrace(void);
>> +
>> +#if defined(CONFIG_PREEMPT_DYNAMIC) && 
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> +
>> +void dynamic_preempt_schedule(void);
>> +void dynamic_preempt_schedule_notrace(void);
>> +#define __preempt_schedule()        dynamic_preempt_schedule()
>> +#define __preempt_schedule_notrace()    
>> dynamic_preempt_schedule_notrace()
>> +
>> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +
>> +#define __preempt_schedule() preempt_schedule()
>> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
>> +
>> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +#endif /* CONFIG_PREEMPTION */
>> +
>> +#endif /* __ASM_PREEMPT_H */
Christophe Leroy Dec. 2, 2024, 6:17 p.m. UTC | #4
Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
> 
> 
> On 11/27/24 12:07, Christophe Leroy wrote:
>>
>>
>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>> PowerPC uses asm-generic preempt definitions as of now.
>>> Copy that into arch/asm so that arch specific changes can be done.
>>> This would help the next patch for enabling dynamic preemption.
>>
> 
> The reason I want the content instead was to allow future patches where 
> I thought of making preempt count per paca for ppc64 atleast. generic 
> code assumes it is per thread. If this change is to be done at that 
> point, that is fair too. I am okay with it.

I think it is better to keep series minimal and consistent. If you have 
a futur plan, no problem, keep it future and do everything at once 
unless it is heavy and better done in two steps.

As we say in French, a lot of water will have flowed under the bridge by 
then.

I'm sure there will be a lot of discussion when you do that and maybe at 
the end you will end up with something completely different than what 
you have in mind at the moment.

> 
> 
>> Instead of copying all the content of asm-generic version, can you 
>> just create a receptacle for your new macros, that will include asm- 
>> generic/ preempt.h ?
>>
>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>
> 
> You mean something like below right?
> 
> 
> #ifndef __ASM_POWERPC_PREEMPT_H
> #define __ASM_POWERPC_PREEMPT_H
> 
> #include <asm-generic/preempt.h>
> 
> #if defined(CONFIG_PREEMPT_DYNAMIC) && 
> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> #endif
> 
> #endif /* __ASM_POWERPC_PREEMPT_H */

Yes exactly.
Shrikanth Hegde Dec. 3, 2024, 2 p.m. UTC | #5
On 12/2/24 23:47, Christophe Leroy wrote:
> 
> 
> Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
>>
>>
>> On 11/27/24 12:07, Christophe Leroy wrote:
>>>
>>>
>>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>>> PowerPC uses asm-generic preempt definitions as of now.
>>>> Copy that into arch/asm so that arch specific changes can be done.
>>>> This would help the next patch for enabling dynamic preemption.
>>>
>>
>> The reason I want the content instead was to allow future patches 
>> where I thought of making preempt count per paca for ppc64 atleast. 
>> generic code assumes it is per thread. If this change is to be done at 
>> that point, that is fair too. I am okay with it.
> 
> I think it is better to keep series minimal and consistent. If you have 
> a futur plan, no problem, keep it future and do everything at once 
> unless it is heavy and better done in two steps.
> 
> As we say in French, a lot of water will have flowed under the bridge by 
> then.
> 
> I'm sure there will be a lot of discussion when you do that and maybe at 
> the end you will end up with something completely different than what 
> you have in mind at the moment.
> 

ok.

>>
>>
>>> Instead of copying all the content of asm-generic version, can you 
>>> just create a receptacle for your new macros, that will include asm- 
>>> generic/ preempt.h ?
>>>
>>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>>
>>
>> You mean something like below right?
>>
>>
>> #ifndef __ASM_POWERPC_PREEMPT_H
>> #define __ASM_POWERPC_PREEMPT_H
>>
>> #include <asm-generic/preempt.h>
>>
>> #if defined(CONFIG_PREEMPT_DYNAMIC) && 
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> #endif
>>
>> #endif /* __ASM_POWERPC_PREEMPT_H */
> 
> Yes exactly.
> 
> 

Should I send v2 with this and using DYNAMIC_KEY?
Christophe Leroy Dec. 3, 2024, 7:47 p.m. UTC | #6
Le 03/12/2024 à 15:00, Shrikanth Hegde a écrit :
> 
> 
> On 12/2/24 23:47, Christophe Leroy wrote:
>>
>>
>> Le 02/12/2024 à 15:05, Shrikanth Hegde a écrit :
>>>
>>>
>>> On 11/27/24 12:07, Christophe Leroy wrote:
>>>>
>>>>
>>>> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>>>>> PowerPC uses asm-generic preempt definitions as of now.
>>>>> Copy that into arch/asm so that arch specific changes can be done.
>>>>> This would help the next patch for enabling dynamic preemption.
>>>>
>>>
>>> The reason I want the content instead was to allow future patches 
>>> where I thought of making preempt count per paca for ppc64 atleast. 
>>> generic code assumes it is per thread. If this change is to be done 
>>> at that point, that is fair too. I am okay with it.
>>
>> I think it is better to keep series minimal and consistent. If you 
>> have a futur plan, no problem, keep it future and do everything at 
>> once unless it is heavy and better done in two steps.
>>
>> As we say in French, a lot of water will have flowed under the bridge 
>> by then.
>>
>> I'm sure there will be a lot of discussion when you do that and maybe 
>> at the end you will end up with something completely different than 
>> what you have in mind at the moment.
>>
> 
> ok.
> 
>>>
>>>
>>>> Instead of copying all the content of asm-generic version, can you 
>>>> just create a receptacle for your new macros, that will include asm- 
>>>> generic/ preempt.h ?
>>>>
>>>> Look at arch/powerpc/include/asm/percpu.h for exemple.
>>>>
>>>
>>> You mean something like below right?
>>>
>>>
>>> #ifndef __ASM_POWERPC_PREEMPT_H
>>> #define __ASM_POWERPC_PREEMPT_H
>>>
>>> #include <asm-generic/preempt.h>
>>>
>>> #if defined(CONFIG_PREEMPT_DYNAMIC) && 
>>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>>> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>>> #endif
>>>
>>> #endif /* __ASM_POWERPC_PREEMPT_H */
>>
>> Yes exactly.
>>
>>
> 
> Should I send v2 with this and using DYNAMIC_KEY?


Yes you can do that, but I guess it is not urgent as it requires the 
lazy patches to be merged first and spend some time in linux-next ?

Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
new file mode 100644
index 000000000000..51f8f3881523
--- /dev/null
+++ b/arch/powerpc/include/asm/preempt.h
@@ -0,0 +1,100 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_ENABLED	(0)
+
+static __always_inline int preempt_count(void)
+{
+	return READ_ONCE(current_thread_info()->preempt_count);
+}
+
+static __always_inline volatile int *preempt_count_ptr(void)
+{
+	return &current_thread_info()->preempt_count;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+	*preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define init_task_preempt_count(p) do { \
+	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+static __always_inline void set_preempt_need_resched(void)
+{
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+	return false;
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+	*preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+	*preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+	/*
+	 * Because of load-store architectures cannot do per-cpu atomic
+	 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+	 * lost.
+	 */
+	return !--*preempt_count_ptr() && tif_need_resched();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(int preempt_offset)
+{
+	return unlikely(preempt_count() == preempt_offset &&
+			tif_need_resched());
+}
+
+#ifdef CONFIG_PREEMPTION
+extern asmlinkage void preempt_schedule(void);
+extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule()		dynamic_preempt_schedule()
+#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+#endif /* CONFIG_PREEMPTION */
+
+#endif /* __ASM_PREEMPT_H */