stackleak: allow to specify arch specific stackleak poison function

Factor out the code that fills the stack with the stackleak poison value
in order to allow architectures to provide a faster implementation.

Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230405130841.1350565-2-hca@linux.ibm.com
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2023-04-05 15:08:40 +02:00 committed by Vasily Gorbik
parent ccf7c3fb61
commit 491a78663e
1 changed files with 13 additions and 4 deletions

View File

@ -70,6 +70,18 @@ late_initcall(stackleak_sysctls_init);
#define skip_erasing() false #define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
#ifndef __stackleak_poison
static __always_inline void __stackleak_poison(unsigned long erase_low,
unsigned long erase_high,
unsigned long poison)
{
while (erase_low < erase_high) {
*(unsigned long *)erase_low = poison;
erase_low += sizeof(unsigned long);
}
}
#endif
static __always_inline void __stackleak_erase(bool on_task_stack) static __always_inline void __stackleak_erase(bool on_task_stack)
{ {
const unsigned long task_stack_low = stackleak_task_low_bound(current); const unsigned long task_stack_low = stackleak_task_low_bound(current);
@ -101,10 +113,7 @@ static __always_inline void __stackleak_erase(bool on_task_stack)
else else
erase_high = task_stack_high; erase_high = task_stack_high;
while (erase_low < erase_high) { __stackleak_poison(erase_low, erase_high, STACKLEAK_POISON);
*(unsigned long *)erase_low = STACKLEAK_POISON;
erase_low += sizeof(unsigned long);
}
/* Reset the 'lowest_stack' value for the next syscall */ /* Reset the 'lowest_stack' value for the next syscall */
current->lowest_stack = task_stack_high; current->lowest_stack = task_stack_high;