@@ -115,7 +115,7 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
}
__monitor((void *)¤t_thread_info()->flags, 0, 0);
- if (!need_resched())
+ if (!need_resched_or_ipi())
__mwait(eax, ecx);
}
current_clr_polling();
@@ -925,7 +925,7 @@ static __cpuidle void mwait_idle(void)
}
__monitor((void *)¤t_thread_info()->flags, 0, 0);
- if (!need_resched()) {
+ if (!need_resched_or_ipi()) {
__sti_mwait(0, 0);
raw_local_irq_disable();
}
@@ -79,7 +79,7 @@ static int snooze_loop(struct cpuidle_device *dev,
dev->poll_time_limit = false;
ppc64_runlatch_off();
HMT_very_low();
- while (!need_resched()) {
+ while (!need_resched_or_ipi()) {
if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
/*
* Task has not woken up but we are exiting the polling
@@ -46,7 +46,7 @@ int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv,
snooze_exit_time = get_tb() + snooze_timeout;
dev->poll_time_limit = false;
- while (!need_resched()) {
+ while (!need_resched_or_ipi()) {
HMT_low();
HMT_very_low();
if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
@@ -26,7 +26,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
limit = cpuidle_poll_time(drv, dev);
- while (!need_resched()) {
+ while (!need_resched_or_ipi()) {
cpu_relax();
if (loop_count++ < POLL_IDLE_RELAX_COUNT)
continue;
@@ -2258,6 +2258,11 @@ static __always_inline bool need_resched(void)
return unlikely(tif_need_resched());
}
+static __always_inline bool need_resched_or_ipi(void)
+{
+ return unlikely(tif_need_resched() || tif_notify_ipi());
+}
+
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
@@ -63,7 +63,7 @@ static __always_inline bool __must_check current_set_polling_and_test(void)
*/
smp_mb__after_atomic();
- return unlikely(tif_need_resched());
+ return unlikely(need_resched_or_ipi());
}
static __always_inline bool __must_check current_clr_polling_and_test(void)
@@ -76,7 +76,7 @@ static __always_inline bool __must_check current_clr_polling_and_test(void)
*/
smp_mb__after_atomic();
- return unlikely(tif_need_resched());
+ return unlikely(need_resched_or_ipi());
}
#else
@@ -57,7 +57,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
ct_cpuidle_enter();
raw_local_irq_enable();
- while (!tif_need_resched() &&
+ while (!need_resched_or_ipi() &&
(cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
raw_local_irq_disable();
@@ -153,7 +153,7 @@ static void cpuidle_idle_call(void)
* Check if the idle task must be rescheduled. If it is the
* case, exit the function after re-enabling the local irq.
*/
- if (need_resched()) {
+ if (need_resched_or_ipi()) {
local_irq_enable();
return;
}
@@ -255,7 +255,7 @@ static void do_idle(void)
__current_set_polling();
tick_nohz_idle_enter();
- while (!need_resched()) {
+ while (!need_resched_or_ipi()) {
rmb();
/*
@@ -336,6 +336,7 @@ static void do_idle(void)
* RCU relies on this call to be done outside of an RCU read-side
* critical section.
*/
+ current_clr_notify_ipi();
flush_smp_call_function_queue();
schedule_idle();