Merge branch 'asm-generic-mmiotrace' into asm-generic

A patch series from Sai Prakash Ranjan, who explains:

Generic MMIO read/write i.e., __raw_{read,write}{b,l,w,q} accessors
are typically used to read/write from/to memory mapped registers
and can cause hangs or some undefined behaviour in following cases,

* If the access to the register space is unclocked, for example: if
  there is an access to multimedia(MM) block registers without MM
  clocks.

* If the register space is protected and not set to be accessible from
  non-secure world, for example: only EL3 (EL: Exception level) access
  is allowed and any EL2/EL1 access is forbidden.

* If xPU(memory/register protection units) is controlling access to
  certain memory/register space for specific clients.

and more...

Such cases usually results in instant reboot/SErrors/NOC or interconnect
hangs and tracing these register accesses can be very helpful to debug
such issues during initial development stages and also in later stages.

So use ftrace trace events to log such MMIO register accesses which
provides rich feature set such as early enablement of trace events,
filtering capability, dumping ftrace logs on console and many more.

Sample output:

rwmmio_write: __qcom_geni_serial_console_write+0x160/0x1e0 width=32 val=0xa0d5d addr=0xfffffbfffdbff700
rwmmio_post_write: __qcom_geni_serial_console_write+0x160/0x1e0 width=32 val=0xa0d5d addr=0xfffffbfffdbff700
rwmmio_read: qcom_geni_serial_poll_bit+0x94/0x138 width=32 addr=0xfffffbfffdbff610
rwmmio_post_read: qcom_geni_serial_poll_bit+0x94/0x138 width=32 val=0x0 addr=0xfffffbfffdbff610

This series is a follow-up for the series [1] and a recent series [2] making use
of both.

[1] https://lore.kernel.org/lkml/cover.1536430404.git.saiprakash.ranjan@codeaurora.org/
[2] https://lore.kernel.org/lkml/1604631386-178312-1-git-send-email-psodagud@codeaurora.org/

Note in v4 version, Arnd suggested to benchmark and compare size with callback
based implementation, please see [3] for more details on that with brief comparison below.

**Inline version with CONFIG_FTRACE=y and CONFIG_TRACE_MMIO_ACCESS=y**
$ size vmlinux
   text           data             bss     dec             hex         filename
 23884219        14284468         532568 38701255        24e88c7        vmlinux

**Callback version with CONFIG_FTRACE=y and CONFIG_TRACE_MMIO_ACCESS=y**
$ size vmlinux
    text          data             bss     dec             hex        filename
 24108179        14279596         532568 38920343        251e097       vmlinux

$ ./scripts/bloat-o-meter inline-vmlinux callback-vmlinux
add/remove: 8/3 grow/shrink: 4889/89 up/down: 242244/-11564 (230680)
Total: Before=25812612, After=26043292, chg +0.89%

[3] https://lore.kernel.org/lkml/466449a1-36da-aaa9-7e4f-477f36b52c9e@quicinc.com/

Link: https://lore.kernel.org/lkml/cover.1652891705.git.quic_saipraka@quicinc.com/

* asm-generic-mmiotrace:
  soc: qcom: geni: Disable MMIO tracing for GENI SE
  serial: qcom_geni_serial: Disable MMIO tracing for geni serial
  asm-generic/io: Add logging support for MMIO accessors
  KVM: arm64: Add a flag to disable MMIO trace for nVHE KVM
  lib: Add register read/write tracing support
  drm/meson: Fix overflow implicit truncation warnings
  irqchip/tegra: Fix overflow implicit truncation warnings
  coresight: etm4x: Use asm-generic IO memory barriers
  arm64: io: Use asm-generic high level MMIO accessors
  arch/*: Disable softirq stacks on PREEMPT_RT.
This commit is contained in:
Arnd Bergmann 2022-06-27 09:07:07 +02:00
commit 93929fb6c4
22 changed files with 303 additions and 65 deletions

View File

@ -1396,6 +1396,9 @@ config ARCH_HAS_ELFCORE_COMPAT
config ARCH_HAS_PARANOID_L1D_FLUSH
bool
config ARCH_HAVE_TRACE_MMIO_ACCESS
bool
config DYNAMIC_SIGFRAME
bool

View File

@ -70,6 +70,7 @@ static void __init init_irq_stacks(void)
}
}
#ifndef CONFIG_PREEMPT_RT
static void ____do_softirq(void *arg)
{
__do_softirq();
@ -80,7 +81,7 @@ void do_softirq_own_stack(void)
call_with_stack(____do_softirq, NULL,
__this_cpu_read(irq_stack_ptr));
}
#endif
#endif
int arch_show_interrupts(struct seq_file *p, int prec)

View File

@ -49,6 +49,7 @@ config ARM64
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAVE_TRACE_MMIO_ACCESS
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION

View File

@ -91,7 +91,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
/* IO barriers */
#define __iormb(v) \
#define __io_ar(v) \
({ \
unsigned long tmp; \
\
@ -108,40 +108,15 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
#define __io_par(v) __iormb(v)
#define __iowmb() dma_wmb()
#define __io_bw() dma_wmb()
#define __io_br(v)
#define __io_aw(v)
/* arm64-specific, don't use in portable drivers */
#define __iormb(v) __io_ar(v)
#define __iowmb() __io_bw()
#define __iomb() dma_mb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses.
*/
#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access.
*/
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
/*
* I/O port access primitives.
*/

View File

@ -4,7 +4,12 @@
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
# Tracepoint and MMIO logging symbols should not be visible at nVHE KVM as
# there is no way to execute them and any such MMIO access from nVHE KVM
# will explode instantly (Words of Marc Zyngier). So introduce a generic flag
# __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include

View File

@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1;
}
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
#endif
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */

View File

@ -611,6 +611,7 @@ static inline void check_stack_overflow(void)
}
}
#ifndef CONFIG_PREEMPT_RT
static __always_inline void call_do_softirq(const void *sp)
{
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
@ -629,6 +630,7 @@ static __always_inline void call_do_softirq(const void *sp)
"r11", "r12"
);
}
#endif
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{
@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
call_do_softirq(softirq_ctx[smp_processor_id()]);
}
#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{

View File

@ -5,9 +5,10 @@
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
#ifndef CONFIG_PREEMPT_RT
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
}
#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */

View File

@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
}
#endif
#else
static inline void handle_one_irq(unsigned int irq)
{

View File

@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)

View File

@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
VIU_OSD_BLEND_REORDER(1, 0) |
VIU_OSD_BLEND_REORDER(2, 0) |
VIU_OSD_BLEND_REORDER(3, 0) |
VIU_OSD_BLEND_DIN_EN(1) |
VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
VIU_OSD_BLEND_HOLD_LINES(4),
priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
(u32)VIU_OSD_BLEND_REORDER(1, 0) |
(u32)VIU_OSD_BLEND_REORDER(2, 0) |
(u32)VIU_OSD_BLEND_REORDER(3, 0) |
(u32)VIU_OSD_BLEND_DIN_EN(1) |
(u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
(u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
(u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
(u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
(u32)VIU_OSD_BLEND_HOLD_LINES(4);
writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));

View File

@ -98,7 +98,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
__iormb(res); /* Imitate the !relaxed I/O helpers */
__io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@ -106,7 +106,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
__iowmb(); /* Imitate the !relaxed I/O helpers */
__io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@ -130,7 +130,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
__iormb(res); /* Imitate the !relaxed I/O helpers */
__io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@ -138,7 +138,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
__iowmb(); /* Imitate the !relaxed I/O helpers */
__io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);

View File

@ -546,14 +546,14 @@
#define etm4x_read32(csa, offset) \
({ \
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
__iormb(__val); \
__io_ar(__val); \
__val; \
})
#define etm4x_read64(csa, offset) \
({ \
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
__iormb(__val); \
__io_ar(__val); \
__val; \
})
@ -577,13 +577,13 @@
#define etm4x_write32(csa, val, offset) \
do { \
__iowmb(); \
__io_bw(); \
etm4x_relaxed_write32((csa), (val), (offset)); \
} while (0)
#define etm4x_write64(csa, val, offset) \
do { \
__iowmb(); \
__io_bw(); \
etm4x_relaxed_write64((csa), (val), (offset)); \
} while (0)

View File

@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);

View File

@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/slab.h>

View File

@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>

View File

@ -10,6 +10,7 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@ -61,6 +62,44 @@
#define __io_par(v) __io_ar(v)
#endif
/*
* "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
* specific kernel drivers in case of excessive/unwanted logging.
*
* Usage: Add a #define flag at the beginning of the driver file.
* Ex: #define __DISABLE_TRACE_MMIO__
* #include <...>
* ...
*/
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
#include <linux/tracepoint-defs.h>
DECLARE_TRACEPOINT(rwmmio_write);
DECLARE_TRACEPOINT(rwmmio_post_write);
DECLARE_TRACEPOINT(rwmmio_read);
DECLARE_TRACEPOINT(rwmmio_post_read);
void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr);
void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr);
void log_read_mmio(u8 width, const volatile void __iomem *addr,
unsigned long caller_addr);
void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
unsigned long caller_addr);
#else
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr) {}
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr) {}
static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
unsigned long caller_addr) {}
static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
unsigned long caller_addr) {}
#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
log_read_mmio(8, addr, _THIS_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
log_post_read_mmio(val, 8, addr, _THIS_IP_);
return val;
}
#endif
@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
log_read_mmio(16, addr, _THIS_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
log_post_read_mmio(val, 16, addr, _THIS_IP_);
return val;
}
#endif
@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
log_read_mmio(32, addr, _THIS_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
log_post_read_mmio(val, 32, addr, _THIS_IP_);
return val;
}
#endif
@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
log_read_mmio(64, addr, _THIS_IP_);
__io_br();
val = __le64_to_cpu(__raw_readq(addr));
__io_ar(val);
log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
}
#endif
@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
log_write_mmio(value, 8, addr, _THIS_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
log_write_mmio(value, 16, addr, _THIS_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
log_write_mmio(value, 32, addr, _THIS_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
log_write_mmio(value, 64, addr, _THIS_IP_);
__io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
__io_aw();
log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
return __raw_readb(addr);
u8 val;
log_read_mmio(8, addr, _THIS_IP_);
val = __raw_readb(addr);
log_post_read_mmio(val, 8, addr, _THIS_IP_);
return val;
}
#endif
@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
return __le16_to_cpu(__raw_readw(addr));
u16 val;
log_read_mmio(16, addr, _THIS_IP_);
val = __le16_to_cpu(__raw_readw(addr));
log_post_read_mmio(val, 16, addr, _THIS_IP_);
return val;
}
#endif
@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
return __le32_to_cpu(__raw_readl(addr));
u32 val;
log_read_mmio(32, addr, _THIS_IP_);
val = __le32_to_cpu(__raw_readl(addr));
log_post_read_mmio(val, 32, addr, _THIS_IP_);
return val;
}
#endif
@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
return __le64_to_cpu(__raw_readq(addr));
u64 val;
log_read_mmio(64, addr, _THIS_IP_);
val = __le64_to_cpu(__raw_readq(addr));
log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
}
#endif
@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
log_write_mmio(value, 8, addr, _THIS_IP_);
__raw_writeb(value, addr);
log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
log_write_mmio(value, 16, addr, _THIS_IP_);
__raw_writew(cpu_to_le16(value), addr);
log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
log_write_mmio(value, 32, addr, _THIS_IP_);
__raw_writel(__cpu_to_le32(value), addr);
log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
log_write_mmio(value, 64, addr, _THIS_IP_);
__raw_writeq(__cpu_to_le64(value), addr);
log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif

View File

@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)

View File

@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rwmmio
#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RWMMIO_H
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(rwmmio_rw_template,
TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
TP_ARGS(caller, val, width, addr),
TP_STRUCT__entry(
__field(unsigned long, caller)
__field(unsigned long, addr)
__field(u64, val)
__field(u8, width)
),
TP_fast_assign(
__entry->caller = caller;
__entry->val = val;
__entry->addr = (unsigned long)addr;
__entry->width = width;
),
TP_printk("%pS width=%d val=%#llx addr=%#lx",
(void *)__entry->caller, __entry->width,
__entry->val, __entry->addr)
);
DEFINE_EVENT(rwmmio_rw_template, rwmmio_write,
TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
TP_ARGS(caller, val, width, addr)
);
DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write,
TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
TP_ARGS(caller, val, width, addr)
);
TRACE_EVENT(rwmmio_read,
TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr),
TP_ARGS(caller, width, addr),
TP_STRUCT__entry(
__field(unsigned long, caller)
__field(unsigned long, addr)
__field(u8, width)
),
TP_fast_assign(
__entry->caller = caller;
__entry->addr = (unsigned long)addr;
__entry->width = width;
),
TP_printk("%pS width=%d addr=%#lx",
(void *)__entry->caller, __entry->width, __entry->addr)
);
TRACE_EVENT(rwmmio_post_read,
TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr),
TP_ARGS(caller, val, width, addr),
TP_STRUCT__entry(
__field(unsigned long, caller)
__field(unsigned long, addr)
__field(u64, val)
__field(u8, width)
),
TP_fast_assign(
__entry->caller = caller;
__entry->val = val;
__entry->addr = (unsigned long)addr;
__entry->width = width;
),
TP_printk("%pS width=%d val=%#llx addr=%#lx",
(void *)__entry->caller, __entry->width,
__entry->val, __entry->addr)
);
#endif /* _TRACE_RWMMIO_H */
#include <trace/define_trace.h>

View File

@ -118,6 +118,13 @@ config INDIRECT_IOMEM_FALLBACK
mmio accesses when the IO memory address is not a registered
emulated region.
config TRACE_MMIO_ACCESS
bool "Register read/write tracing"
depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
help
Create tracepoints for MMIO read/write operations. These trace events
can be used for logging all MMIO read/write operations.
source "lib/crypto/Kconfig"
config CRC_CCITT

View File

@ -151,6 +151,8 @@ lib-y += logic_pio.o
lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o

47
lib/trace_readwrite.c Normal file
View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Register read and write tracepoints
*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ftrace.h>
#include <linux/module.h>
#include <asm-generic/io.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rwmmio.h>
#ifdef CONFIG_TRACE_MMIO_ACCESS
void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr)
{
trace_rwmmio_write(caller_addr, val, width, addr);
}
EXPORT_SYMBOL_GPL(log_write_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr)
{
trace_rwmmio_post_write(caller_addr, val, width, addr);
}
EXPORT_SYMBOL_GPL(log_post_write_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
void log_read_mmio(u8 width, const volatile void __iomem *addr,
unsigned long caller_addr)
{
trace_rwmmio_read(caller_addr, width, addr);
}
EXPORT_SYMBOL_GPL(log_read_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
unsigned long caller_addr)
{
trace_rwmmio_post_read(caller_addr, val, width, addr);
}
EXPORT_SYMBOL_GPL(log_post_read_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read);
#endif /* CONFIG_TRACE_MMIO_ACCESS */