2022-08-06 04:10:04 -04:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Stack trace management functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/stacktrace.h>
|
2022-08-06 04:10:05 -04:00
|
|
|
#include <linux/uaccess.h>
|
2022-08-06 04:10:04 -04:00
|
|
|
|
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/unwind.h>
|
|
|
|
|
|
|
|
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|
|
|
struct task_struct *task, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
struct pt_regs dummyregs;
|
|
|
|
struct unwind_state state;
|
|
|
|
|
2023-09-06 10:54:16 -04:00
|
|
|
if (!regs) {
|
|
|
|
regs = &dummyregs;
|
2022-08-06 04:10:04 -04:00
|
|
|
|
2023-09-06 10:54:16 -04:00
|
|
|
if (task == current) {
|
|
|
|
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
|
|
|
|
regs->csr_era = (unsigned long)__builtin_return_address(0);
|
|
|
|
} else {
|
|
|
|
regs->regs[3] = thread_saved_fp(task);
|
|
|
|
regs->csr_era = thread_saved_ra(task);
|
|
|
|
}
|
|
|
|
regs->regs[1] = 0;
|
2024-03-11 10:23:47 -04:00
|
|
|
regs->regs[22] = 0;
|
2022-08-06 04:10:04 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unwind_start(&state, task, regs);
|
2023-12-09 02:49:15 -05:00
|
|
|
!unwind_done(&state); unwind_next_frame(&state)) {
|
2022-08-06 04:10:04 -04:00
|
|
|
addr = unwind_get_return_address(&state);
|
|
|
|
if (!addr || !consume_entry(cookie, addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2022-08-06 04:10:05 -04:00
|
|
|
|
2024-03-11 10:23:47 -04:00
|
|
|
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
|
|
|
void *cookie, struct task_struct *task)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
struct pt_regs dummyregs;
|
|
|
|
struct pt_regs *regs = &dummyregs;
|
|
|
|
struct unwind_state state;
|
|
|
|
|
|
|
|
if (task == current) {
|
|
|
|
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
|
|
|
|
regs->csr_era = (unsigned long)__builtin_return_address(0);
|
|
|
|
} else {
|
|
|
|
regs->regs[3] = thread_saved_fp(task);
|
|
|
|
regs->csr_era = thread_saved_ra(task);
|
|
|
|
}
|
|
|
|
regs->regs[1] = 0;
|
|
|
|
regs->regs[22] = 0;
|
|
|
|
|
|
|
|
for (unwind_start(&state, task, regs);
|
|
|
|
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
|
|
|
|
addr = unwind_get_return_address(&state);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A NULL or invalid return address probably means there's some
|
|
|
|
* generated code which __kernel_text_address() doesn't know about.
|
|
|
|
*/
|
|
|
|
if (!addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!consume_entry(cookie, addr))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for stack corruption */
|
|
|
|
if (unwind_error(&state))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-08-06 04:10:05 -04:00
|
|
|
static int
|
|
|
|
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
unsigned long err;
|
|
|
|
unsigned long __user *user_frame_tail;
|
|
|
|
|
|
|
|
user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
|
|
|
|
if (!access_ok(user_frame_tail, sizeof(*frame)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pagefault_disable();
|
|
|
|
err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
|
|
|
|
if (err || (unsigned long)user_frame_tail >= frame->fp)
|
|
|
|
ret = 0;
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
|
|
|
|
const struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long fp = regs->regs[22];
|
|
|
|
|
|
|
|
while (fp && !((unsigned long)fp & 0xf)) {
|
|
|
|
struct stack_frame frame;
|
|
|
|
|
|
|
|
frame.fp = 0;
|
|
|
|
frame.ra = 0;
|
|
|
|
if (!copy_stack_frame(fp, &frame))
|
|
|
|
break;
|
|
|
|
if (!frame.ra)
|
|
|
|
break;
|
|
|
|
if (!consume_entry(cookie, frame.ra))
|
|
|
|
break;
|
|
|
|
fp = frame.fp;
|
|
|
|
}
|
|
|
|
}
|