timers: Implement the hierarchical pull model
Placing timers at enqueue time on a target CPU based on dubious heuristics does not make any sense: 1) Most timer wheel timers are canceled or rearmed before they expire. 2) The heuristics to predict which CPU will be busy when the timer expires are wrong by definition. So placing the timers at enqueue wastes precious cycles. The proper solution to this problem is to always queue the timers on the local CPU and allow the non pinned timers to be pulled onto a busy CPU at expiry time. Therefore split the timer storage into local pinned and global timers: Local pinned timers are always expired on the CPU on which they have been queued. Global timers can be expired on any CPU. As long as a CPU is busy it expires both local and global timers. When a CPU goes idle it arms for the first expiring local timer. If the first expiring pinned (local) timer is before the first expiring movable timer, then no action is required because the CPU will wake up before the first movable timer expires. If the first expiring movable timer is before the first expiring pinned (local) timer, then this timer is queued into an idle timerqueue and eventually expired by another active CPU. To avoid global locking the timerqueues are implemented as a hierarchy. The lowest level of the hierarchy holds the CPUs. The CPUs are associated to groups of 8, which are separated per node. If more than one CPU group exist, then a second level in the hierarchy collects the groups. Depending on the size of the system more than 2 levels are required. Each group has a "migrator" which checks the timerqueue during the tick for remote expirable timers. If the last CPU in a group goes idle it reports the first expiring event in the group up to the next group(s) in the hierarchy. If the last CPU goes idle it arms its timer for the first system wide expiring timer to ensure that no timer event is missed. Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20240222103710.32582-1-anna-maria@linutronix.de
This commit is contained in:
parent
57e95a5c41
commit
7ee9887703
|
@ -231,6 +231,7 @@ enum cpuhp_state {
|
||||||
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
|
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
|
||||||
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
|
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
|
||||||
CPUHP_AP_PERF_CSKY_ONLINE,
|
CPUHP_AP_PERF_CSKY_ONLINE,
|
||||||
|
CPUHP_AP_TMIGR_ONLINE,
|
||||||
CPUHP_AP_WATCHDOG_ONLINE,
|
CPUHP_AP_WATCHDOG_ONLINE,
|
||||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||||
CPUHP_AP_RANDOM_ONLINE,
|
CPUHP_AP_RANDOM_ONLINE,
|
||||||
|
|
|
@ -17,6 +17,9 @@ endif
|
||||||
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
|
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
|
||||||
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
|
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
|
||||||
obj-$(CONFIG_LEGACY_TIMER_TICK) += tick-legacy.o
|
obj-$(CONFIG_LEGACY_TIMER_TICK) += tick-legacy.o
|
||||||
|
ifeq ($(CONFIG_SMP),y)
|
||||||
|
obj-$(CONFIG_NO_HZ_COMMON) += timer_migration.o
|
||||||
|
endif
|
||||||
obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o
|
obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o
|
||||||
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
|
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
|
||||||
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
|
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
|
||||||
|
|
|
@ -166,6 +166,7 @@ extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
|
||||||
extern void timer_lock_remote_bases(unsigned int cpu);
|
extern void timer_lock_remote_bases(unsigned int cpu);
|
||||||
extern void timer_unlock_remote_bases(unsigned int cpu);
|
extern void timer_unlock_remote_bases(unsigned int cpu);
|
||||||
extern bool timer_base_is_idle(void);
|
extern bool timer_base_is_idle(void);
|
||||||
|
extern void timer_expire_remote(unsigned int cpu);
|
||||||
# endif
|
# endif
|
||||||
#else /* CONFIG_NO_HZ_COMMON */
|
#else /* CONFIG_NO_HZ_COMMON */
|
||||||
static inline void timers_update_nohz(void) { }
|
static inline void timers_update_nohz(void) { }
|
||||||
|
|
|
@ -53,6 +53,7 @@
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
#include "tick-internal.h"
|
#include "tick-internal.h"
|
||||||
|
#include "timer_migration.h"
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/timer.h>
|
#include <trace/events/timer.h>
|
||||||
|
@ -2169,6 +2170,64 @@ bool timer_base_is_idle(void)
|
||||||
{
|
{
|
||||||
return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
|
return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __run_timer_base(struct timer_base *base);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* timer_expire_remote() - expire global timers of cpu
|
||||||
|
* @cpu: Remote CPU
|
||||||
|
*
|
||||||
|
* Expire timers of global base of remote CPU.
|
||||||
|
*/
|
||||||
|
void timer_expire_remote(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
|
||||||
|
|
||||||
|
__run_timer_base(base);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void timer_use_tmigr(unsigned long basej, u64 basem,
|
||||||
|
unsigned long *nextevt, bool *tick_stop_path,
|
||||||
|
bool timer_base_idle, struct timer_events *tevt)
|
||||||
|
{
|
||||||
|
u64 next_tmigr;
|
||||||
|
|
||||||
|
if (timer_base_idle)
|
||||||
|
next_tmigr = tmigr_cpu_new_timer(tevt->global);
|
||||||
|
else if (tick_stop_path)
|
||||||
|
next_tmigr = tmigr_cpu_deactivate(tevt->global);
|
||||||
|
else
|
||||||
|
next_tmigr = tmigr_quick_check(tevt->global);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the CPU is the last going idle in timer migration hierarchy, make
|
||||||
|
* sure the CPU will wake up in time to handle remote timers.
|
||||||
|
* next_tmigr == KTIME_MAX if other CPUs are still active.
|
||||||
|
*/
|
||||||
|
if (next_tmigr < tevt->local) {
|
||||||
|
u64 tmp;
|
||||||
|
|
||||||
|
/* If we missed a tick already, force 0 delta */
|
||||||
|
if (next_tmigr < basem)
|
||||||
|
next_tmigr = basem;
|
||||||
|
|
||||||
|
tmp = div_u64(next_tmigr - basem, TICK_NSEC);
|
||||||
|
|
||||||
|
*nextevt = basej + (unsigned long)tmp;
|
||||||
|
tevt->local = next_tmigr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# else
|
||||||
|
static void timer_use_tmigr(unsigned long basej, u64 basem,
|
||||||
|
unsigned long *nextevt, bool *tick_stop_path,
|
||||||
|
bool timer_base_idle, struct timer_events *tevt)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Make sure first event is written into tevt->local to not miss a
|
||||||
|
* timer on !SMP systems.
|
||||||
|
*/
|
||||||
|
tevt->local = min_t(u64, tevt->local, tevt->global);
|
||||||
|
}
|
||||||
# endif /* CONFIG_SMP */
|
# endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
|
@ -2177,7 +2236,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
|
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
|
||||||
struct timer_base *base_local, *base_global;
|
struct timer_base *base_local, *base_global;
|
||||||
unsigned long nextevt;
|
unsigned long nextevt;
|
||||||
u64 expires;
|
bool idle_is_possible;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pretend that there is no timer pending if the cpu is offline.
|
* Pretend that there is no timer pending if the cpu is offline.
|
||||||
|
@ -2198,6 +2257,22 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
|
nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
|
||||||
base_global, &tevt);
|
base_global, &tevt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the next event is only one jiffie ahead there is no need to call
|
||||||
|
* timer migration hierarchy related functions. The value for the next
|
||||||
|
* global timer in @tevt struct equals then KTIME_MAX. This is also
|
||||||
|
* true, when the timer base is idle.
|
||||||
|
*
|
||||||
|
* The proper timer migration hierarchy function depends on the callsite
|
||||||
|
* and whether timer base is idle or not. @nextevt will be updated when
|
||||||
|
* this CPU needs to handle the first timer migration hierarchy
|
||||||
|
* event. See timer_use_tmigr() for detailed information.
|
||||||
|
*/
|
||||||
|
idle_is_possible = time_after(nextevt, basej + 1);
|
||||||
|
if (idle_is_possible)
|
||||||
|
timer_use_tmigr(basej, basem, &nextevt, idle,
|
||||||
|
base_local->is_idle, &tevt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have a fresh next event. Check whether we can forward the
|
* We have a fresh next event. Check whether we can forward the
|
||||||
* base.
|
* base.
|
||||||
|
@ -2210,7 +2285,10 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
*/
|
*/
|
||||||
if (idle) {
|
if (idle) {
|
||||||
/*
|
/*
|
||||||
* Bases are idle if the next event is more than a tick away.
|
* Bases are idle if the next event is more than a tick
|
||||||
|
* away. Caution: @nextevt could have changed by enqueueing a
|
||||||
|
* global timer into timer migration hierarchy. Therefore a new
|
||||||
|
* check is required here.
|
||||||
*
|
*
|
||||||
* If the base is marked idle then any timer add operation must
|
* If the base is marked idle then any timer add operation must
|
||||||
* forward the base clk itself to keep granularity small. This
|
* forward the base clk itself to keep granularity small. This
|
||||||
|
@ -2223,14 +2301,23 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
trace_timer_base_idle(true, base_local->cpu);
|
trace_timer_base_idle(true, base_local->cpu);
|
||||||
}
|
}
|
||||||
*idle = base_local->is_idle;
|
*idle = base_local->is_idle;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When timer base is not set idle, undo the effect of
|
||||||
|
* tmigr_cpu_deactivate() to prevent inconsitent states - active
|
||||||
|
* timer base but inactive timer migration hierarchy.
|
||||||
|
*
|
||||||
|
* When timer base was already marked idle, nothing will be
|
||||||
|
* changed here.
|
||||||
|
*/
|
||||||
|
if (!base_local->is_idle && idle_is_possible)
|
||||||
|
tmigr_cpu_activate();
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock(&base_global->lock);
|
raw_spin_unlock(&base_global->lock);
|
||||||
raw_spin_unlock(&base_local->lock);
|
raw_spin_unlock(&base_local->lock);
|
||||||
|
|
||||||
expires = min_t(u64, tevt.local, tevt.global);
|
return cmp_next_hrtimer_event(basem, tevt.local);
|
||||||
|
|
||||||
return cmp_next_hrtimer_event(basem, expires);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2238,8 +2325,11 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
|
||||||
* @basej: base time jiffies
|
* @basej: base time jiffies
|
||||||
* @basem: base time clock monotonic
|
* @basem: base time clock monotonic
|
||||||
*
|
*
|
||||||
* Returns the tick aligned clock monotonic time of the next pending
|
* Returns the tick aligned clock monotonic time of the next pending timer or
|
||||||
* timer or KTIME_MAX if no timer is pending.
|
* KTIME_MAX if no timer is pending. If timer of global base was queued into
|
||||||
|
* timer migration hierarchy, first global timer is not taken into account. If
|
||||||
|
* it was the last CPU of timer migration hierarchy going idle, first global
|
||||||
|
* event is taken into account.
|
||||||
*/
|
*/
|
||||||
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||||
{
|
{
|
||||||
|
@ -2281,6 +2371,9 @@ void timer_clear_idle(void)
|
||||||
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
|
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
|
||||||
__this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
|
__this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
|
||||||
trace_timer_base_idle(false, smp_processor_id());
|
trace_timer_base_idle(false, smp_processor_id());
|
||||||
|
|
||||||
|
/* Activate without holding the timer_base->lock */
|
||||||
|
tmigr_cpu_activate();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -2350,6 +2443,9 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
||||||
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
|
||||||
run_timer_base(BASE_GLOBAL);
|
run_timer_base(BASE_GLOBAL);
|
||||||
run_timer_base(BASE_DEF);
|
run_timer_base(BASE_DEF);
|
||||||
|
|
||||||
|
if (is_timers_nohz_active())
|
||||||
|
tmigr_handle_remote();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2364,7 +2460,8 @@ static void run_local_timers(void)
|
||||||
|
|
||||||
for (int i = 0; i < NR_BASES; i++, base++) {
|
for (int i = 0; i < NR_BASES; i++, base++) {
|
||||||
/* Raise the softirq only if required. */
|
/* Raise the softirq only if required. */
|
||||||
if (time_after_eq(jiffies, base->next_expiry)) {
|
if (time_after_eq(jiffies, base->next_expiry) ||
|
||||||
|
(i == BASE_DEF && tmigr_requires_handle_remote())) {
|
||||||
raise_softirq(TIMER_SOFTIRQ);
|
raise_softirq(TIMER_SOFTIRQ);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,140 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
#ifndef _KERNEL_TIME_MIGRATION_H
|
||||||
|
#define _KERNEL_TIME_MIGRATION_H
|
||||||
|
|
||||||
|
/* Per group capacity. Must be a power of 2! */
|
||||||
|
#define TMIGR_CHILDREN_PER_GROUP 8
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct tmigr_event - a timer event associated to a CPU
|
||||||
|
* @nextevt: The node to enqueue an event in the parent group queue
|
||||||
|
* @cpu: The CPU to which this event belongs
|
||||||
|
* @ignore: Hint whether the event could be ignored; it is set when
|
||||||
|
* CPU or group is active;
|
||||||
|
*/
|
||||||
|
struct tmigr_event {
|
||||||
|
struct timerqueue_node nextevt;
|
||||||
|
unsigned int cpu;
|
||||||
|
bool ignore;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct tmigr_group - timer migration hierarchy group
|
||||||
|
* @lock: Lock protecting the event information and group hierarchy
|
||||||
|
* information during setup
|
||||||
|
* @parent: Pointer to the parent group
|
||||||
|
* @groupevt: Next event of the group which is only used when the
|
||||||
|
* group is !active. The group event is then queued into
|
||||||
|
* the parent timer queue.
|
||||||
|
* Ignore bit of @groupevt is set when the group is active.
|
||||||
|
* @next_expiry: Base monotonic expiry time of the next event of the
|
||||||
|
* group; It is used for the racy lockless check whether a
|
||||||
|
* remote expiry is required; it is always reliable
|
||||||
|
* @events: Timer queue for child events queued in the group
|
||||||
|
* @migr_state: State of the group (see union tmigr_state)
|
||||||
|
* @level: Hierarchy level of the group; Required during setup
|
||||||
|
* @numa_node: Required for setup only to make sure CPU and low level
|
||||||
|
* group information is NUMA local. It is set to NUMA node
|
||||||
|
* as long as the group level is per NUMA node (level <
|
||||||
|
* tmigr_crossnode_level); otherwise it is set to
|
||||||
|
* NUMA_NO_NODE
|
||||||
|
* @num_children: Counter of group children to make sure the group is only
|
||||||
|
* filled with TMIGR_CHILDREN_PER_GROUP; Required for setup
|
||||||
|
* only
|
||||||
|
* @childmask: childmask of the group in the parent group; is set
|
||||||
|
* during setup and will never change; can be read
|
||||||
|
* lockless
|
||||||
|
* @list: List head that is added to the per level
|
||||||
|
* tmigr_level_list; is required during setup when a
|
||||||
|
* new group needs to be connected to the existing
|
||||||
|
* hierarchy groups
|
||||||
|
*/
|
||||||
|
struct tmigr_group {
|
||||||
|
raw_spinlock_t lock;
|
||||||
|
struct tmigr_group *parent;
|
||||||
|
struct tmigr_event groupevt;
|
||||||
|
u64 next_expiry;
|
||||||
|
struct timerqueue_head events;
|
||||||
|
atomic_t migr_state;
|
||||||
|
unsigned int level;
|
||||||
|
int numa_node;
|
||||||
|
unsigned int num_children;
|
||||||
|
u8 childmask;
|
||||||
|
struct list_head list;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct tmigr_cpu - timer migration per CPU group
|
||||||
|
* @lock: Lock protecting the tmigr_cpu group information
|
||||||
|
* @online: Indicates whether the CPU is online; In deactivate path
|
||||||
|
* it is required to know whether the migrator in the top
|
||||||
|
* level group is to be set offline, while a timer is
|
||||||
|
* pending. Then another online CPU needs to be notified to
|
||||||
|
* take over the migrator role. Furthermore the information
|
||||||
|
* is required in CPU hotplug path as the CPU is able to go
|
||||||
|
* idle before the timer migration hierarchy hotplug AP is
|
||||||
|
* reached. During this phase, the CPU has to handle the
|
||||||
|
* global timers on its own and must not act as a migrator.
|
||||||
|
* @idle: Indicates whether the CPU is idle in the timer migration
|
||||||
|
* hierarchy
|
||||||
|
* @remote: Is set when timers of the CPU are expired remotely
|
||||||
|
* @tmgroup: Pointer to the parent group
|
||||||
|
* @childmask: childmask of tmigr_cpu in the parent group
|
||||||
|
* @wakeup: Stores the first timer when the timer migration
|
||||||
|
* hierarchy is completely idle and remote expiry was done;
|
||||||
|
* is returned to timer code in the idle path and is only
|
||||||
|
* used in idle path.
|
||||||
|
* @cpuevt: CPU event which could be enqueued into the parent group
|
||||||
|
*/
|
||||||
|
struct tmigr_cpu {
|
||||||
|
raw_spinlock_t lock;
|
||||||
|
bool online;
|
||||||
|
bool idle;
|
||||||
|
bool remote;
|
||||||
|
struct tmigr_group *tmgroup;
|
||||||
|
u8 childmask;
|
||||||
|
u64 wakeup;
|
||||||
|
struct tmigr_event cpuevt;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* union tmigr_state - state of tmigr_group
|
||||||
|
* @state: Combined version of the state - only used for atomic
|
||||||
|
* read/cmpxchg function
|
||||||
|
* @struct: Split version of the state - only use the struct members to
|
||||||
|
* update information to stay independent of endianness
|
||||||
|
*/
|
||||||
|
union tmigr_state {
|
||||||
|
u32 state;
|
||||||
|
/**
|
||||||
|
* struct - split state of tmigr_group
|
||||||
|
* @active: Contains each childmask bit of the active children
|
||||||
|
* @migrator: Contains childmask of the child which is migrator
|
||||||
|
* @seq: Sequence counter needs to be increased when an update
|
||||||
|
* to the tmigr_state is done. It prevents a race when
|
||||||
|
* updates in the child groups are propagated in changed
|
||||||
|
* order. Detailed information about the scenario is
|
||||||
|
* given in the documentation at the begin of
|
||||||
|
* timer_migration.c.
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
u8 active;
|
||||||
|
u8 migrator;
|
||||||
|
u16 seq;
|
||||||
|
} __packed;
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||||
|
extern void tmigr_handle_remote(void);
|
||||||
|
extern bool tmigr_requires_handle_remote(void);
|
||||||
|
extern void tmigr_cpu_activate(void);
|
||||||
|
extern u64 tmigr_cpu_deactivate(u64 nextevt);
|
||||||
|
extern u64 tmigr_cpu_new_timer(u64 nextevt);
|
||||||
|
extern u64 tmigr_quick_check(u64 nextevt);
|
||||||
|
#else
|
||||||
|
static inline void tmigr_handle_remote(void) { }
|
||||||
|
static inline bool tmigr_requires_handle_remote(void) { return false; }
|
||||||
|
static inline void tmigr_cpu_activate(void) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
Loading…
Reference in New Issue