cpuidle, cpu_pm: Remove RCU fiddling from cpu_pm_{enter,exit}()
All callers should still have RCU enabled. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Tony Lindgren <tony@atomide.com> Tested-by: Ulf Hansson <ulf.hansson@linaro.org> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20230112195540.190860672@infradead.org
This commit is contained in:
parent
a01353cf18
commit
924aed1646
|
@ -30,16 +30,9 @@ static int cpu_pm_notify(enum cpu_pm_event event)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This introduces a RCU read critical section, which could be
|
||||
* disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
|
||||
* this.
|
||||
*/
|
||||
ct_irq_enter_irqson();
|
||||
rcu_read_lock();
|
||||
ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
|
||||
rcu_read_unlock();
|
||||
ct_irq_exit_irqson();
|
||||
|
||||
return notifier_to_errno(ret);
|
||||
}
|
||||
|
@ -49,11 +42,9 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ct_irq_enter_irqson();
|
||||
raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
|
||||
ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
|
||||
raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
|
||||
ct_irq_exit_irqson();
|
||||
|
||||
return notifier_to_errno(ret);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue