Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Adjacent changes: drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c23c93c3b62
("bnxt_en: do not map packet buffers twice")6d1add9553
("bnxt_en: Modify TX ring indexing logic.") tools/testing/selftests/net/Makefile2258b66648
("selftests: add vlan hw filter tests")a0bc96c0cd
("selftests: net: verify fq per-band packet limit") Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
56794e5358
4
.mailmap
4
.mailmap
|
@ -191,6 +191,10 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
|
|||
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
|
||||
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
|
||||
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
|
||||
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
|
||||
|
|
|
@ -42,6 +42,8 @@ properties:
|
|||
- lg,acx467akm-7
|
||||
# LG Corporation 7" WXGA TFT LCD panel
|
||||
- lg,ld070wx3-sl01
|
||||
# LG Corporation 5" HD TFT LCD panel
|
||||
- lg,lh500wx1-sd03
|
||||
# One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
|
||||
- osddisplays,osd101t2587-53ts
|
||||
# Panasonic 10" WUXGA TFT LCD panel
|
||||
|
|
|
@ -208,8 +208,6 @@ properties:
|
|||
- lemaker,bl035-rgb-002
|
||||
# LG 7" (800x480 pixels) TFT LCD panel
|
||||
- lg,lb070wv8
|
||||
# LG Corporation 5" HD TFT LCD panel
|
||||
- lg,lh500wx1-sd03
|
||||
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
|
||||
- lg,lp079qx1-sp0v
|
||||
# LG 9.7" (2048x1536 pixels) TFT LCD panel
|
||||
|
|
|
@ -6050,10 +6050,8 @@ M: Mikulas Patocka <mpatocka@redhat.com>
|
|||
M: dm-devel@lists.linux.dev
|
||||
L: dm-devel@lists.linux.dev
|
||||
S: Maintained
|
||||
W: http://sources.redhat.com/dm
|
||||
Q: http://patchwork.kernel.org/project/dm-devel/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
|
||||
T: quilt http://people.redhat.com/agk/patches/linux/editing/
|
||||
F: Documentation/admin-guide/device-mapper/
|
||||
F: drivers/md/Kconfig
|
||||
F: drivers/md/Makefile
|
||||
|
@ -9534,6 +9532,7 @@ F: drivers/bus/hisi_lpc.c
|
|||
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
|
||||
M: Yisen Zhuang <yisen.zhuang@huawei.com>
|
||||
M: Salil Mehta <salil.mehta@huawei.com>
|
||||
M: Jijie Shao <shaojijie@huawei.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.hisilicon.com
|
||||
|
@ -12201,6 +12200,8 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
|
|||
M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Christophe Leroy <christophe.leroy@csgroup.eu>
|
||||
R: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
|
||||
R: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
W: https://github.com/linuxppc/wiki/wiki
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -49,7 +49,6 @@ config ARC
|
|||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select PCI_SYSCALL if PCI
|
||||
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
|
||||
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
|
||||
select TRACE_IRQFLAGS_SUPPORT
|
||||
|
||||
|
@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
|
|||
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
|
||||
Global DISABLE + Per Page ENABLE won't work
|
||||
|
||||
config ARC_CACHE_VIPT_ALIASING
|
||||
bool "Support VIPT Aliasing D$"
|
||||
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
|
||||
|
||||
endif #ARC_CACHE
|
||||
|
||||
config ARC_HAS_ICCM
|
||||
|
|
|
@ -44,31 +44,10 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
|||
|
||||
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
|
||||
|
||||
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
|
||||
|
||||
#define flush_cache_mm(mm) /* called on munmap/exit */
|
||||
#define flush_cache_range(mm, u_vstart, u_vend)
|
||||
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
|
||||
|
||||
#else /* VIPT aliasing dcache */
|
||||
|
||||
/* To clear out stale userspace mappings */
|
||||
void flush_cache_mm(struct mm_struct *mm);
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start,unsigned long end);
|
||||
void flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long user_addr, unsigned long page);
|
||||
|
||||
/*
|
||||
* To make sure that userspace mapping is flushed to memory before
|
||||
* get_user_pages() uses a kernel mapping to access the page
|
||||
*/
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
void flush_anon_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long u_vaddr);
|
||||
|
||||
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
|
||||
|
||||
/*
|
||||
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
|
||||
* This works around some PIO based drivers which don't call flush_dcache_page
|
||||
|
@ -76,28 +55,6 @@ void flush_anon_page(struct vm_area_struct *vma,
|
|||
*/
|
||||
#define PG_dc_clean PG_arch_1
|
||||
|
||||
#define CACHE_COLORS_NUM 4
|
||||
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
|
||||
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
|
||||
|
||||
/*
|
||||
* Simple wrapper over config option
|
||||
* Bootup code ensures that hardware matches kernel configuration
|
||||
*/
|
||||
static inline int cache_is_vipt_aliasing(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
}
|
||||
|
||||
/*
|
||||
* checks if two addresses (after page aligning) index into same cache set
|
||||
*/
|
||||
#define addr_not_cache_congruent(addr1, addr2) \
|
||||
({ \
|
||||
cache_is_vipt_aliasing() ? \
|
||||
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
|
||||
})
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
memcpy(dst, src, len); \
|
||||
|
|
|
@ -291,4 +291,36 @@
|
|||
/* M = 8-1 N = 8 */
|
||||
.endm
|
||||
|
||||
.macro SAVE_ABI_CALLEE_REGS
|
||||
push r13
|
||||
push r14
|
||||
push r15
|
||||
push r16
|
||||
push r17
|
||||
push r18
|
||||
push r19
|
||||
push r20
|
||||
push r21
|
||||
push r22
|
||||
push r23
|
||||
push r24
|
||||
push r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ABI_CALLEE_REGS
|
||||
pop r25
|
||||
pop r24
|
||||
pop r23
|
||||
pop r22
|
||||
pop r21
|
||||
pop r20
|
||||
pop r19
|
||||
pop r18
|
||||
pop r17
|
||||
pop r16
|
||||
pop r15
|
||||
pop r14
|
||||
pop r13
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,91 @@
|
|||
#include <asm/irqflags-compact.h>
|
||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||
|
||||
/* Note on the LD/ST addr modes with addr reg wback
|
||||
*
|
||||
* LD.a same as LD.aw
|
||||
*
|
||||
* LD.a reg1, [reg2, x] => Pre Incr
|
||||
* Eff Addr for load = [reg2 + x]
|
||||
*
|
||||
* LD.ab reg1, [reg2, x] => Post Incr
|
||||
* Eff Addr for load = [reg2]
|
||||
*/
|
||||
|
||||
.macro PUSHAX aux
|
||||
lr r9, [\aux]
|
||||
push r9
|
||||
.endm
|
||||
|
||||
.macro POPAX aux
|
||||
pop r9
|
||||
sr r9, [\aux]
|
||||
.endm
|
||||
|
||||
.macro SAVE_R0_TO_R12
|
||||
push r0
|
||||
push r1
|
||||
push r2
|
||||
push r3
|
||||
push r4
|
||||
push r5
|
||||
push r6
|
||||
push r7
|
||||
push r8
|
||||
push r9
|
||||
push r10
|
||||
push r11
|
||||
push r12
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R12_TO_R0
|
||||
pop r12
|
||||
pop r11
|
||||
pop r10
|
||||
pop r9
|
||||
pop r8
|
||||
pop r7
|
||||
pop r6
|
||||
pop r5
|
||||
pop r4
|
||||
pop r3
|
||||
pop r2
|
||||
pop r1
|
||||
pop r0
|
||||
.endm
|
||||
|
||||
.macro SAVE_ABI_CALLEE_REGS
|
||||
push r13
|
||||
push r14
|
||||
push r15
|
||||
push r16
|
||||
push r17
|
||||
push r18
|
||||
push r19
|
||||
push r20
|
||||
push r21
|
||||
push r22
|
||||
push r23
|
||||
push r24
|
||||
push r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ABI_CALLEE_REGS
|
||||
pop r25
|
||||
pop r24
|
||||
pop r23
|
||||
pop r22
|
||||
pop r21
|
||||
pop r20
|
||||
pop r19
|
||||
pop r18
|
||||
pop r17
|
||||
pop r16
|
||||
pop r15
|
||||
pop r14
|
||||
pop r13
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Switch to Kernel Mode stack if SP points to User Mode stack
|
||||
*
|
||||
|
@ -235,7 +320,7 @@
|
|||
SWITCH_TO_KERNEL_STK
|
||||
|
||||
|
||||
PUSH 0x003\LVL\()abcd /* Dummy ECR */
|
||||
st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
|
||||
sub sp, sp, 8 /* skip orig_r0 (not needed)
|
||||
skip pt_regs->sp, already saved above */
|
||||
|
||||
|
|
|
@ -21,114 +21,12 @@
|
|||
#include <asm/entry-arcv2.h>
|
||||
#endif
|
||||
|
||||
/* Note on the LD/ST addr modes with addr reg wback
|
||||
*
|
||||
* LD.a same as LD.aw
|
||||
*
|
||||
* LD.a reg1, [reg2, x] => Pre Incr
|
||||
* Eff Addr for load = [reg2 + x]
|
||||
*
|
||||
* LD.ab reg1, [reg2, x] => Post Incr
|
||||
* Eff Addr for load = [reg2]
|
||||
*/
|
||||
|
||||
.macro PUSH reg
|
||||
st.a \reg, [sp, -4]
|
||||
.endm
|
||||
|
||||
.macro PUSHAX aux
|
||||
lr r9, [\aux]
|
||||
PUSH r9
|
||||
.endm
|
||||
|
||||
.macro POP reg
|
||||
ld.ab \reg, [sp, 4]
|
||||
.endm
|
||||
|
||||
.macro POPAX aux
|
||||
POP r9
|
||||
sr r9, [\aux]
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Helpers to save/restore Scratch Regs:
|
||||
* used by Interrupt/Exception Prologue/Epilogue
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_R0_TO_R12
|
||||
PUSH r0
|
||||
PUSH r1
|
||||
PUSH r2
|
||||
PUSH r3
|
||||
PUSH r4
|
||||
PUSH r5
|
||||
PUSH r6
|
||||
PUSH r7
|
||||
PUSH r8
|
||||
PUSH r9
|
||||
PUSH r10
|
||||
PUSH r11
|
||||
PUSH r12
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R12_TO_R0
|
||||
POP r12
|
||||
POP r11
|
||||
POP r10
|
||||
POP r9
|
||||
POP r8
|
||||
POP r7
|
||||
POP r6
|
||||
POP r5
|
||||
POP r4
|
||||
POP r3
|
||||
POP r2
|
||||
POP r1
|
||||
POP r0
|
||||
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Helpers to save/restore callee-saved regs:
|
||||
* used by several macros below
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_R13_TO_R25
|
||||
PUSH r13
|
||||
PUSH r14
|
||||
PUSH r15
|
||||
PUSH r16
|
||||
PUSH r17
|
||||
PUSH r18
|
||||
PUSH r19
|
||||
PUSH r20
|
||||
PUSH r21
|
||||
PUSH r22
|
||||
PUSH r23
|
||||
PUSH r24
|
||||
PUSH r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R25_TO_R13
|
||||
POP r25
|
||||
POP r24
|
||||
POP r23
|
||||
POP r22
|
||||
POP r21
|
||||
POP r20
|
||||
POP r19
|
||||
POP r18
|
||||
POP r17
|
||||
POP r16
|
||||
POP r15
|
||||
POP r14
|
||||
POP r13
|
||||
.endm
|
||||
|
||||
/*
|
||||
* save user mode callee regs as struct callee_regs
|
||||
* - needed by fork/do_signal/unaligned-access-emulation.
|
||||
*/
|
||||
.macro SAVE_CALLEE_SAVED_USER
|
||||
SAVE_R13_TO_R25
|
||||
SAVE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -136,18 +34,18 @@
|
|||
* - could have been changed by ptrace tracer or unaligned-access fixup
|
||||
*/
|
||||
.macro RESTORE_CALLEE_SAVED_USER
|
||||
RESTORE_R25_TO_R13
|
||||
RESTORE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*
|
||||
* save/restore kernel mode callee regs at the time of context switch
|
||||
*/
|
||||
.macro SAVE_CALLEE_SAVED_KERNEL
|
||||
SAVE_R13_TO_R25
|
||||
SAVE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
.macro RESTORE_CALLEE_SAVED_KERNEL
|
||||
RESTORE_R25_TO_R13
|
||||
RESTORE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
|
|
|
@ -10,6 +10,13 @@
|
|||
#include <linux/types.h>
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/*
|
||||
* Hugetlb definitions.
|
||||
*/
|
||||
#define HPAGE_SHIFT PMD_SHIFT
|
||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
{
|
||||
return __pte(pmd_val(pmd));
|
||||
|
|
|
@ -54,6 +54,10 @@ struct pt_regs {
|
|||
ecr_reg ecr;
|
||||
};
|
||||
|
||||
struct callee_regs {
|
||||
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
|
||||
|
||||
#else
|
||||
|
@ -92,16 +96,14 @@ struct pt_regs {
|
|||
unsigned long status32;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
|
||||
|
||||
#endif
|
||||
|
||||
/* Callee saved registers - need to be saved only when you are scheduled out */
|
||||
|
||||
struct callee_regs {
|
||||
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
|
||||
|
||||
#endif
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->ret)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
|
|||
{
|
||||
int n = 0;
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
const char *release, *cpu_nm, *isa_nm = "ARCv2";
|
||||
const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
|
||||
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
|
||||
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
|
||||
char mpy_nm[16], lpb_nm[32];
|
||||
|
@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
|
|||
* releases only update it.
|
||||
*/
|
||||
|
||||
cpu_nm = "HS38";
|
||||
|
||||
if (info->arcver > 0x50 && info->arcver <= 0x53) {
|
||||
release = arc_hs_rel[info->arcver - 0x51].str;
|
||||
} else {
|
||||
|
|
|
@ -62,7 +62,7 @@ struct rt_sigframe {
|
|||
unsigned int sigret_magic;
|
||||
};
|
||||
|
||||
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
|
@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
|||
#else
|
||||
v2abi.r58 = v2abi.r59 = 0;
|
||||
#endif
|
||||
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
|
||||
err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
|
|
|
@ -145,10 +145,9 @@ dc_chk:
|
|||
p_dc->sz_k = 1 << (dbcr.sz - 1);
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
|
||||
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
|
||||
p_dc->sz_k, assoc, p_dc->line_len,
|
||||
vipt ? "VIPT" : "PIPT",
|
||||
p_dc->colors > 1 ? " aliasing" : "",
|
||||
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
|
||||
|
||||
slc_chk:
|
||||
|
@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
|
|||
* Exported APIs
|
||||
*/
|
||||
|
||||
/*
|
||||
* Handle cache congruency of kernel and userspace mappings of page when kernel
|
||||
* writes-to/reads-from
|
||||
*
|
||||
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
|
||||
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
|
||||
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
|
||||
* -In SMP, if hardware caches are coherent
|
||||
*
|
||||
* There's a corollary case, where kernel READs from a userspace mapped page.
|
||||
* If the U-mapping is not congruent to K-mapping, former needs flushing.
|
||||
*/
|
||||
void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
if (!cache_is_vipt_aliasing()) {
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* don't handle anon pages here */
|
||||
mapping = folio_flush_mapping(folio);
|
||||
if (!mapping)
|
||||
return;
|
||||
|
||||
/*
|
||||
* pagecache page, file not yet mapped to userspace
|
||||
* Make a note that K-mapping is dirty
|
||||
*/
|
||||
if (!mapping_mapped(mapping)) {
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
} else if (folio_mapped(folio)) {
|
||||
/* kernel reading from page with U-mapping */
|
||||
phys_addr_t paddr = (unsigned long)folio_address(folio);
|
||||
unsigned long vaddr = folio_pos(folio);
|
||||
|
||||
/*
|
||||
* vaddr is not actually the virtual address, but is
|
||||
* congruent to every user mapping.
|
||||
*/
|
||||
if (addr_not_cache_congruent(paddr, vaddr))
|
||||
__flush_dcache_pages(paddr, vaddr,
|
||||
folio_nr_pages(folio));
|
||||
}
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_folio);
|
||||
|
||||
|
@ -921,44 +879,6 @@ noinline void flush_cache_all(void)
|
|||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
phys_addr_t paddr = pfn << PAGE_SHIFT;
|
||||
|
||||
u_vaddr &= PAGE_MASK;
|
||||
|
||||
__flush_dcache_pages(paddr, u_vaddr, 1);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__inv_icache_pages(paddr, u_vaddr, 1);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long u_vaddr)
|
||||
{
|
||||
/* TBD: do we really need to clear the kernel mapping */
|
||||
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
|
||||
__flush_dcache_pages((phys_addr_t)page_address(page),
|
||||
(phys_addr_t)page_address(page), 1);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long u_vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
|
@ -966,46 +886,11 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|||
struct folio *dst = page_folio(to);
|
||||
void *kfrom = kmap_atomic(from);
|
||||
void *kto = kmap_atomic(to);
|
||||
int clean_src_k_mappings = 0;
|
||||
|
||||
/*
|
||||
* If SRC page was already mapped in userspace AND it's U-mapping is
|
||||
* not congruent with K-mapping, sync former to physical page so that
|
||||
* K-mapping in memcpy below, sees the right data
|
||||
*
|
||||
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
|
||||
* equally valid for SRC page as well
|
||||
*
|
||||
* For !VIPT cache, all of this gets compiled out as
|
||||
* addr_not_cache_congruent() is 0
|
||||
*/
|
||||
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
|
||||
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
|
||||
clean_src_k_mappings = 1;
|
||||
}
|
||||
|
||||
copy_page(kto, kfrom);
|
||||
|
||||
/*
|
||||
* Mark DST page K-mapping as dirty for a later finalization by
|
||||
* update_mmu_cache(). Although the finalization could have been done
|
||||
* here as well (given that both vaddr/paddr are available).
|
||||
* But update_mmu_cache() already has code to do that for other
|
||||
* non copied user pages (e.g. read faults which wire in pagecache page
|
||||
* directly).
|
||||
*/
|
||||
clear_bit(PG_dc_clean, &dst->flags);
|
||||
|
||||
/*
|
||||
* if SRC was already usermapped and non-congruent to kernel mapping
|
||||
* sync the kernel mapping back to physical page
|
||||
*/
|
||||
if (clean_src_k_mappings) {
|
||||
__flush_dcache_pages((unsigned long)kfrom,
|
||||
(unsigned long)kfrom, 1);
|
||||
} else {
|
||||
clear_bit(PG_dc_clean, &src->flags);
|
||||
}
|
||||
clear_bit(PG_dc_clean, &src->flags);
|
||||
|
||||
kunmap_atomic(kto);
|
||||
kunmap_atomic(kfrom);
|
||||
|
@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
|
|||
dc->line_len, L1_CACHE_BYTES);
|
||||
|
||||
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
|
||||
if (is_isa_arcompact()) {
|
||||
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
|
||||
if (dc->colors > 1) {
|
||||
if (!handled)
|
||||
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
if (CACHE_COLORS_NUM != dc->colors)
|
||||
panic("CACHE_COLORS_NUM not optimized for config\n");
|
||||
} else if (handled && dc->colors == 1) {
|
||||
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
}
|
||||
if (is_isa_arcompact() && dc->colors > 1) {
|
||||
panic("Aliasing VIPT cache not supported\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,10 +14,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define COLOUR_ALIGN(addr, pgoff) \
|
||||
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
|
||||
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
|
||||
|
||||
/*
|
||||
* Ensure that shared mappings are correctly aligned to
|
||||
* avoid aliasing issues with VIPT caches.
|
||||
|
@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int do_align = 0;
|
||||
int aliasing = cache_is_vipt_aliasing();
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
/*
|
||||
* We only need to do colour alignment if D cache aliases.
|
||||
*/
|
||||
if (aliasing)
|
||||
do_align = filp || (flags & MAP_SHARED);
|
||||
|
||||
/*
|
||||
* We enforce the MAP_FIXED case.
|
||||
*/
|
||||
if (flags & MAP_FIXED) {
|
||||
if (aliasing && flags & MAP_SHARED &&
|
||||
if (flags & MAP_SHARED &&
|
||||
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
|
@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
return -ENOMEM;
|
||||
|
||||
if (addr) {
|
||||
if (do_align)
|
||||
addr = COLOUR_ALIGN(addr, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
|
@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
|
|
@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
|||
|
||||
create_tlb(vma, vaddr, ptep);
|
||||
|
||||
if (page == ZERO_PAGE(0)) {
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Exec page : Independent of aliasing/page-color considerations,
|
||||
* since icache doesn't snoop dcache on ARC, any dirty
|
||||
* K-mapping of a code page needs to be wback+inv so that
|
||||
* icache fetch by userspace sees code correctly.
|
||||
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
|
||||
* so userspace sees the right data.
|
||||
* (Avoids the flush for Non-exec + congruent mapping case)
|
||||
* For executable pages, since icache doesn't snoop dcache, any
|
||||
* dirty K-mapping of a code page needs to be wback+inv so that
|
||||
* icache fetch by userspace sees code correctly.
|
||||
*/
|
||||
if ((vma->vm_flags & VM_EXEC) ||
|
||||
addr_not_cache_congruent(paddr, vaddr)) {
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
struct folio *folio = page_folio(page);
|
||||
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
|
||||
if (dirty) {
|
||||
|
|
|
@ -359,6 +359,7 @@
|
|||
<SYSC_IDLE_NO>,
|
||||
<SYSC_IDLE_SMART>,
|
||||
<SYSC_IDLE_SMART_WKUP>;
|
||||
ti,sysc-delay-us = <2>;
|
||||
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -147,7 +147,7 @@
|
|||
|
||||
l3-noc@44000000 {
|
||||
compatible = "ti,dra7-l3-noc";
|
||||
reg = <0x44000000 0x1000>,
|
||||
reg = <0x44000000 0x1000000>,
|
||||
<0x45000000 0x1000>;
|
||||
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
|
|
@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
|
|||
|
||||
soc_dev_attr->machine = soc_name;
|
||||
soc_dev_attr->family = omap_get_family();
|
||||
if (!soc_dev_attr->family) {
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
soc_dev_attr->revision = soc_rev;
|
||||
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
|
||||
|
||||
soc_dev = soc_device_register(soc_dev_attr);
|
||||
if (IS_ERR(soc_dev)) {
|
||||
kfree(soc_dev_attr->family);
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -68,10 +68,7 @@
|
|||
&emac0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&ext_rgmii_pins>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
allwinner,rx-delay-ps = <3100>;
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
};
|
||||
|
||||
&emac0 {
|
||||
allwinner,rx-delay-ps = <3100>;
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
phy-mode = "rgmii";
|
||||
phy-supply = <®_dcdce>;
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
};
|
||||
|
||||
&emac0 {
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
phy-mode = "rgmii-rxid";
|
||||
phy-supply = <®_dldo1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -238,6 +238,7 @@
|
|||
mt6360: pmic@34 {
|
||||
compatible = "mediatek,mt6360";
|
||||
reg = <0x34>;
|
||||
interrupt-parent = <&pio>;
|
||||
interrupts = <128 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupt-names = "IRQB";
|
||||
interrupt-controller;
|
||||
|
|
|
@ -834,6 +834,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
|
||||
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
/*
|
||||
* If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
|
||||
* dirtiness again.
|
||||
*/
|
||||
if (pte_sw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,9 +44,6 @@
|
|||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
|
@ -81,6 +78,5 @@
|
|||
}
|
||||
|
||||
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
|
||||
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* __ASM_SYSCALL_WRAPPER_H */
|
||||
|
|
|
@ -57,7 +57,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
|
|||
|
||||
obj-$(CONFIG_RELOCATABLE) += relocate.o
|
||||
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
|
||||
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef _ASM_M68K_KEXEC_H
|
||||
#define _ASM_M68K_KEXEC_H
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
|
||||
/* Maximum physical address we can use pages from */
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
|
||||
|
@ -25,6 +25,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
|||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#endif /* _ASM_M68K_KEXEC_H */
|
||||
|
|
|
@ -25,7 +25,7 @@ obj-$(CONFIG_PCI) += pcibios.o
|
|||
|
||||
obj-$(CONFIG_M68K_NONCOHERENT_DMA) += dma.o
|
||||
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o
|
||||
obj-$(CONFIG_UBOOT) += uboot.o
|
||||
|
||||
|
|
|
@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_smp_ops = {
|
|||
.cpu_disable = octeon_cpu_disable,
|
||||
.cpu_die = octeon_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = {
|
|||
.cpu_disable = octeon_cpu_disable,
|
||||
.cpu_die = octeon_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -31,7 +31,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
|||
prepare_frametrace(newregs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
struct kimage;
|
||||
extern unsigned long kexec_args[4];
|
||||
extern int (*_machine_kexec_prepare)(struct kimage *);
|
||||
|
|
|
@ -35,7 +35,7 @@ struct plat_smp_ops {
|
|||
void (*cpu_die)(unsigned int cpu);
|
||||
void (*cleanup_dead_cpu)(unsigned cpu);
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
void (*kexec_nonboot_cpu)(void);
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned int cpu)
|
|||
extern void __noreturn play_dead(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static inline void kexec_nonboot_cpu(void)
|
||||
{
|
||||
extern const struct plat_smp_ops *mp_ops; /* private */
|
||||
|
|
|
@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
|
|||
|
||||
obj-$(CONFIG_RELOCATABLE) += relocate.o
|
||||
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
|
||||
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o crash.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
|
||||
|
|
|
@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
|
|||
.cpu_disable = bmips_cpu_disable,
|
||||
.cpu_die = bmips_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_ops = {
|
|||
.cpu_disable = bmips_cpu_disable,
|
||||
.cpu_die = bmips_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -392,7 +392,7 @@ static void cps_smp_finish(void)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
|
||||
|
||||
enum cpu_death {
|
||||
CPU_DEATH_HALT,
|
||||
|
@ -429,7 +429,7 @@ static void cps_shutdown_this_cpu(enum cpu_death death)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
|
||||
static void cps_kexec_nonboot_cpu(void)
|
||||
{
|
||||
|
@ -439,9 +439,9 @@ static void cps_kexec_nonboot_cpu(void)
|
|||
cps_shutdown_this_cpu(CPU_DEATH_POWER);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
|
||||
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
|
@ -610,7 +610,7 @@ static const struct plat_smp_ops cps_smp_ops = {
|
|||
.cpu_die = cps_cpu_die,
|
||||
.cleanup_dead_cpu = cps_cleanup_dead_cpu,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -53,7 +53,7 @@ static void loongson_halt(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
|
||||
/* 0X80000000~0X80200000 is safe */
|
||||
#define MAX_ARGS 64
|
||||
|
@ -158,7 +158,7 @@ static int __init mips_reboot_setup(void)
|
|||
_machine_halt = loongson_halt;
|
||||
pm_power_off = loongson_poweroff;
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
|
||||
if (WARN_ON(!kexec_argv))
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -864,7 +864,7 @@ const struct plat_smp_ops loongson3_smp_ops = {
|
|||
.cpu_disable = loongson3_cpu_disable,
|
||||
.cpu_die = loongson3_cpu_die,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -301,7 +301,6 @@ CONFIG_WQ_WATCHDOG=y
|
|||
CONFIG_DEBUG_SG=y
|
||||
CONFIG_DEBUG_NOTIFIERS=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_DEBUG_CREDENTIALS=y
|
||||
# CONFIG_FTRACE is not set
|
||||
CONFIG_XMON=y
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
||||
|
|
|
@ -385,11 +385,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
|
|||
* same fault IRQ is not freed by the OS before.
|
||||
*/
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
if (migration_in_progress)
|
||||
if (migration_in_progress) {
|
||||
rc = -EBUSY;
|
||||
else
|
||||
} else {
|
||||
rc = allocate_setup_window(txwin, (u64 *)&domain[0],
|
||||
cop_feat_caps->win_type);
|
||||
if (!rc)
|
||||
caps->nr_open_wins_progress++;
|
||||
}
|
||||
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -404,8 +408,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
|
|||
goto out_free;
|
||||
|
||||
txwin->win_type = cop_feat_caps->win_type;
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
|
||||
/*
|
||||
* The migration SUSPEND thread sets migration_in_progress and
|
||||
* closes all open windows from the list. But the window is
|
||||
* added to the list after open and modify HCALLs. So possible
|
||||
* that migration_in_progress is set before modify HCALL which
|
||||
* may cause some windows are still open when the hypervisor
|
||||
* initiates the migration.
|
||||
* So checks the migration_in_progress flag again and close all
|
||||
* open windows.
|
||||
*
|
||||
* Possible to lose the acquired credit with DLPAR core
|
||||
* removal after the window is opened. So if there are any
|
||||
* closed windows (means with lost credits), do not give new
|
||||
|
@ -413,9 +426,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
|
|||
* after the existing windows are reopened when credits are
|
||||
* available.
|
||||
*/
|
||||
if (!caps->nr_close_wins) {
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
if (!caps->nr_close_wins && !migration_in_progress) {
|
||||
list_add(&txwin->win_list, &caps->list);
|
||||
caps->nr_open_windows++;
|
||||
caps->nr_open_wins_progress--;
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
|
||||
return &txwin->vas_win;
|
||||
|
@ -433,6 +448,12 @@ out_free:
|
|||
*/
|
||||
free_irq_setup(txwin);
|
||||
h_deallocate_vas_window(txwin->vas_win.winid);
|
||||
/*
|
||||
* Hold mutex and reduce nr_open_wins_progress counter.
|
||||
*/
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
caps->nr_open_wins_progress--;
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
out:
|
||||
atomic_dec(&cop_feat_caps->nr_used_credits);
|
||||
kfree(txwin);
|
||||
|
@ -937,14 +958,14 @@ int vas_migration_handler(int action)
|
|||
struct vas_caps *vcaps;
|
||||
int i, rc = 0;
|
||||
|
||||
pr_info("VAS migration event %d\n", action);
|
||||
|
||||
/*
|
||||
* NX-GZIP is not enabled. Nothing to do for migration.
|
||||
*/
|
||||
if (!copypaste_feat)
|
||||
return rc;
|
||||
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
|
||||
if (action == VAS_SUSPEND)
|
||||
migration_in_progress = true;
|
||||
else
|
||||
|
@ -990,12 +1011,27 @@ int vas_migration_handler(int action)
|
|||
|
||||
switch (action) {
|
||||
case VAS_SUSPEND:
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
|
||||
true);
|
||||
/*
|
||||
* Windows are included in the list after successful
|
||||
* open. So wait for closing these in-progress open
|
||||
* windows in vas_allocate_window() which will be
|
||||
* done if the migration_in_progress is set.
|
||||
*/
|
||||
while (vcaps->nr_open_wins_progress) {
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
msleep(10);
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
}
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
break;
|
||||
case VAS_RESUME:
|
||||
mutex_lock(&vas_pseries_mutex);
|
||||
atomic_set(&caps->nr_total_credits, new_nr_creds);
|
||||
rc = reconfig_open_windows(vcaps, new_nr_creds, true);
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
break;
|
||||
default:
|
||||
/* should not happen */
|
||||
|
@ -1011,8 +1047,9 @@ int vas_migration_handler(int action)
|
|||
goto out;
|
||||
}
|
||||
|
||||
pr_info("VAS migration event (%d) successful\n", action);
|
||||
|
||||
out:
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,6 +91,8 @@ struct vas_cop_feat_caps {
|
|||
struct vas_caps {
|
||||
struct vas_cop_feat_caps caps;
|
||||
struct list_head list; /* List of open windows */
|
||||
int nr_open_wins_progress; /* Number of open windows in */
|
||||
/* progress. Used in migration */
|
||||
int nr_close_wins; /* closed windows in the hypervisor for DLPAR */
|
||||
int nr_open_windows; /* Number of successful open windows */
|
||||
u8 feat; /* Feature type */
|
||||
|
|
|
@ -685,7 +685,7 @@ config RISCV_BOOT_SPINWAIT
|
|||
If unsure what to do here, say N.
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC
|
||||
def_bool MMU
|
||||
def_bool y
|
||||
|
||||
config ARCH_SELECTS_KEXEC
|
||||
def_bool y
|
||||
|
@ -693,7 +693,7 @@ config ARCH_SELECTS_KEXEC
|
|||
select HOTPLUG_CPU if SMP
|
||||
|
||||
config ARCH_SUPPORTS_KEXEC_FILE
|
||||
def_bool 64BIT && MMU
|
||||
def_bool 64BIT
|
||||
|
||||
config ARCH_SELECTS_KEXEC_FILE
|
||||
def_bool y
|
||||
|
|
|
@ -899,7 +899,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
|
|||
#define PAGE_KERNEL __pgprot(0)
|
||||
#define swapper_pg_dir NULL
|
||||
#define TASK_SIZE 0xffffffffUL
|
||||
#define VMALLOC_START 0
|
||||
#define VMALLOC_START _AC(0, UL)
|
||||
#define VMALLOC_END TASK_SIZE
|
||||
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
|
|
@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
|
|||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
|
@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
|
|||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* __ASM_SYSCALL_WRAPPER_H */
|
||||
|
|
|
@ -5,17 +5,19 @@
|
|||
|
||||
void arch_crash_save_vmcoreinfo(void)
|
||||
{
|
||||
VMCOREINFO_NUMBER(VA_BITS);
|
||||
VMCOREINFO_NUMBER(phys_ram_base);
|
||||
|
||||
vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
|
||||
vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
|
||||
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
|
||||
#ifdef CONFIG_MMU
|
||||
VMCOREINFO_NUMBER(VA_BITS);
|
||||
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
|
||||
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
|
||||
#ifdef CONFIG_64BIT
|
||||
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
|
||||
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
|
||||
#endif
|
||||
#endif
|
||||
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
|
||||
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
|
||||
|
|
|
@ -44,8 +44,7 @@ CONFIG_KEXEC_FILE=y
|
|||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
|
@ -76,7 +75,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
|
@ -93,6 +91,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
@ -619,6 +618,9 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
|
|||
CONFIG_BTRFS_DEBUG=y
|
||||
CONFIG_BTRFS_ASSERT=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_BCACHEFS_FS=y
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
|
@ -691,7 +693,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
|
|||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_FORTIFY_SOURCE=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
|
@ -834,7 +835,6 @@ CONFIG_DEBUG_IRQFLAGS=y
|
|||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_SG=y
|
||||
CONFIG_DEBUG_NOTIFIERS=y
|
||||
CONFIG_DEBUG_CREDENTIALS=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_REF_SCALE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
||||
|
|
|
@ -42,8 +42,7 @@ CONFIG_KEXEC_FILE=y
|
|||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
|
@ -71,7 +70,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
|
@ -88,6 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
|
@ -605,6 +604,9 @@ CONFIG_OCFS2_FS=m
|
|||
CONFIG_BTRFS_FS=y
|
||||
CONFIG_BTRFS_FS_POSIX_ACL=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_BCACHEFS_FS=m
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
|
@ -677,7 +679,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
|
|||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_LOCKDOWN_LSM=y
|
||||
|
|
|
@ -9,8 +9,7 @@ CONFIG_BPF_SYSCALL=y
|
|||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
# CONFIG_COMPAT is not set
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
|
|
|
@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
|
|||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
|
||||
|
||||
struct kernel_fpu;
|
||||
|
||||
|
|
|
@ -63,10 +63,6 @@
|
|||
cond_syscall(__s390x_sys_##name); \
|
||||
cond_syscall(__s390_sys_##name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
|
||||
SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
|
||||
long __s390_compat_sys##name(struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
|
||||
|
@ -85,15 +81,11 @@
|
|||
|
||||
/*
|
||||
* As some compat syscalls may not be implemented, we need to expand
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
|
||||
* kernel/time/posix-stubs.c to cover this case as well.
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define COND_SYSCALL_COMPAT(name) \
|
||||
cond_syscall(__s390_compat_sys_##name)
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define __S390_SYS_STUBx(x, name, ...) \
|
||||
long __s390_sys##name(struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
|
||||
|
@ -124,9 +116,6 @@
|
|||
#define COND_SYSCALL(name) \
|
||||
cond_syscall(__s390x_sys_##name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define __S390_SYS_STUBx(x, fullname, name, ...)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
/* The native architecture */
|
||||
#define KEXEC_ARCH KEXEC_ARCH_SH
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/* arch/sh/kernel/machine_kexec.c */
|
||||
void reserve_crashkernel(void);
|
||||
|
||||
|
@ -67,6 +67,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
|||
}
|
||||
#else
|
||||
static inline void reserve_crashkernel(void) { }
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#endif /* __ASM_SH_KEXEC_H */
|
||||
|
|
|
@ -33,7 +33,7 @@ obj-$(CONFIG_SMP) += smp.o
|
|||
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
|
||||
|
|
|
@ -63,7 +63,7 @@ struct machine_ops machine_ops = {
|
|||
.shutdown = native_machine_shutdown,
|
||||
.restart = native_machine_restart,
|
||||
.halt = native_machine_halt,
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.crash_shutdown = native_machine_crash_shutdown,
|
||||
#endif
|
||||
};
|
||||
|
@ -88,7 +88,7 @@ void machine_halt(void)
|
|||
machine_ops.halt();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
machine_ops.crash_shutdown(regs);
|
||||
|
|
|
@ -220,7 +220,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
|||
request_resource(res, &code_resource);
|
||||
request_resource(res, &data_resource);
|
||||
request_resource(res, &bss_resource);
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
request_resource(res, &crashk_res);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ static unsigned long get_cmdline_acpi_rsdp(void)
|
|||
{
|
||||
unsigned long addr = 0;
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
char val[MAX_ADDR_LEN] = { };
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -86,9 +86,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define __SYS_NI(abi, name) \
|
||||
SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __X64_SYS_STUB0(name) \
|
||||
__SYS_STUB0(x64, sys_##name)
|
||||
|
@ -100,13 +97,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
#define __X64_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(x64, sys_##name)
|
||||
|
||||
#define __X64_SYS_NI(name) \
|
||||
__SYS_NI(x64, sys_##name)
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define __X64_SYS_STUB0(name)
|
||||
#define __X64_SYS_STUBx(x, name, ...)
|
||||
#define __X64_COND_SYSCALL(name)
|
||||
#define __X64_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||
|
@ -120,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
#define __IA32_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(ia32, sys_##name)
|
||||
|
||||
#define __IA32_SYS_NI(name) \
|
||||
__SYS_NI(ia32, sys_##name)
|
||||
#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
|
||||
#define __IA32_SYS_STUB0(name)
|
||||
#define __IA32_SYS_STUBx(x, name, ...)
|
||||
#define __IA32_COND_SYSCALL(name)
|
||||
#define __IA32_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
|
@ -135,8 +126,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
* additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
|
||||
* ia32 regs in the proper order for shared or "common" syscalls. As some
|
||||
* syscalls may not be implemented, we need to expand COND_SYSCALL in
|
||||
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
|
||||
* case as well.
|
||||
* kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define __IA32_COMPAT_SYS_STUB0(name) \
|
||||
__SYS_STUB0(ia32, compat_sys_##name)
|
||||
|
@ -148,14 +138,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
#define __IA32_COMPAT_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(ia32, compat_sys_##name)
|
||||
|
||||
#define __IA32_COMPAT_SYS_NI(name) \
|
||||
__SYS_NI(ia32, compat_sys_##name)
|
||||
|
||||
#else /* CONFIG_IA32_EMULATION */
|
||||
#define __IA32_COMPAT_SYS_STUB0(name)
|
||||
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
|
||||
#define __IA32_COMPAT_COND_SYSCALL(name)
|
||||
#define __IA32_COMPAT_SYS_NI(name)
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
|
||||
|
||||
|
@ -175,13 +161,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
#define __X32_COMPAT_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(x64, compat_sys_##name)
|
||||
|
||||
#define __X32_COMPAT_SYS_NI(name) \
|
||||
__SYS_NI(x64, compat_sys_##name)
|
||||
#else /* CONFIG_X86_X32_ABI */
|
||||
#define __X32_COMPAT_SYS_STUB0(name)
|
||||
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
|
||||
#define __X32_COMPAT_COND_SYSCALL(name)
|
||||
#define __X32_COMPAT_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_X32_ABI */
|
||||
|
||||
|
||||
|
@ -212,17 +195,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
|
||||
/*
|
||||
* As some compat syscalls may not be implemented, we need to expand
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
|
||||
* kernel/time/posix-stubs.c to cover this case as well.
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define COND_SYSCALL_COMPAT(name) \
|
||||
__IA32_COMPAT_COND_SYSCALL(name) \
|
||||
__X32_COMPAT_COND_SYSCALL(name)
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
__IA32_COMPAT_SYS_NI(name) \
|
||||
__X32_COMPAT_SYS_NI(name)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
|
@ -243,8 +221,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
|
||||
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
|
||||
* hurt, we only need to re-define it here to keep the naming congruent to
|
||||
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
|
||||
* macros to work correctly.
|
||||
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
|
||||
* to work correctly.
|
||||
*/
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
SYSCALL_METADATA(_##sname, 0); \
|
||||
|
@ -257,10 +235,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||
__X64_COND_SYSCALL(name) \
|
||||
__IA32_COND_SYSCALL(name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
__X64_SYS_NI(name) \
|
||||
__IA32_SYS_NI(name)
|
||||
|
||||
|
||||
/*
|
||||
* For VSYSCALLS, we need to declare these three syscalls with the new
|
||||
|
|
|
@ -53,10 +53,12 @@
|
|||
|
||||
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
|
||||
|
||||
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
|
||||
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
|
||||
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
|
||||
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
|
||||
|
||||
#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
|
||||
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
|
||||
|
||||
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
|
||||
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
|
||||
|
||||
|
@ -74,8 +76,12 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
|||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
|
||||
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
|
||||
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
|
||||
/* Writing 1s does not clear the interrupt status register */
|
||||
vdev->wa.interrupt_clear_with_0 = true;
|
||||
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
|
||||
}
|
||||
|
||||
IVPU_PRINT_WA(punit_disabled);
|
||||
IVPU_PRINT_WA(clear_runtime_mem);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -44,6 +45,7 @@ struct vhci_data {
|
|||
bool wakeup;
|
||||
__u16 msft_opcode;
|
||||
bool aosp_capable;
|
||||
atomic_t initialized;
|
||||
};
|
||||
|
||||
static int vhci_open_dev(struct hci_dev *hdev)
|
||||
|
@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
mutex_lock(&data->open_mutex);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
mutex_unlock(&data->open_mutex);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
if (atomic_read(&data->initialized))
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -464,7 +465,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
|||
skb_put_u8(skb, 0xff);
|
||||
skb_put_u8(skb, opcode);
|
||||
put_unaligned_le16(hdev->id, skb_put(skb, 2));
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
skb_queue_head(&data->readq, skb);
|
||||
atomic_inc(&data->initialized);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
|
|
|
@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
|
|||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
/* Flush posted write */
|
||||
|
||||
/*
|
||||
* Some devices need a delay before reading registers
|
||||
* after reset. Presumably a srst_udelay is not needed
|
||||
* for devices that use a rstctrl register reset.
|
||||
*/
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
/*
|
||||
* Flush posted write. For devices needing srst_udelay
|
||||
* this should trigger an interconnect error if the
|
||||
* srst_udelay value is needed but not configured.
|
||||
*/
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
}
|
||||
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
if (ddata->post_reset_quirk)
|
||||
ddata->post_reset_quirk(ddata);
|
||||
|
||||
|
|
|
@ -767,6 +767,7 @@ config SM_CAMCC_8450
|
|||
|
||||
config SM_CAMCC_8550
|
||||
tristate "SM8550 Camera Clock Controller"
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
select SM_GCC_8550
|
||||
help
|
||||
Support for the camera clock controller on SM8550 devices.
|
||||
|
|
|
@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480
|
|||
PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" };
|
||||
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" };
|
||||
|
||||
PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
|
||||
PNAME(mux_clk_peri_src_p) = { "gpll", "cpll", "gpll_div2", "gpll_div3" };
|
||||
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
|
||||
PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" };
|
||||
PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
|
||||
|
@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|||
RK2928_CLKGATE_CON(0), 11, GFLAGS),
|
||||
|
||||
/* PD_PERI */
|
||||
GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
|
||||
COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0,
|
||||
RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS,
|
||||
RK2928_CLKGATE_CON(2), 0, GFLAGS),
|
||||
GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
|
||||
RK2928_CLKGATE_CON(2), 0, GFLAGS),
|
||||
GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
|
||||
RK2928_CLKGATE_CON(2), 0, GFLAGS),
|
||||
GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
|
||||
RK2928_CLKGATE_CON(2), 0, GFLAGS),
|
||||
COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
|
||||
RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
|
||||
COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
|
||||
|
||||
COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0,
|
||||
RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
|
||||
RK2928_CLKGATE_CON(2), 3, GFLAGS),
|
||||
COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
|
||||
COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0,
|
||||
RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
|
||||
RK2928_CLKGATE_CON(2), 2, GFLAGS),
|
||||
GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
|
||||
GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0,
|
||||
RK2928_CLKGATE_CON(2), 1, GFLAGS),
|
||||
|
||||
GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
|
||||
|
@ -316,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|||
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
|
||||
RK2928_CLKGATE_CON(2), 15, GFLAGS),
|
||||
|
||||
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
|
||||
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
|
||||
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
|
||||
RK2928_CLKGATE_CON(2), 11, GFLAGS),
|
||||
|
||||
|
@ -490,7 +484,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|||
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
|
||||
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
|
||||
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
|
||||
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
||||
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
|
||||
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
|
||||
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
|
||||
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
|
||||
|
|
|
@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
|
|||
RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
|
||||
RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
|
||||
RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
|
||||
RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
|
||||
RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
|
||||
RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
|
||||
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
|
||||
|
|
|
@ -363,10 +363,9 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
|
|||
{
|
||||
resource_size_t base = -1;
|
||||
|
||||
down_read(&cxl_dpa_rwsem);
|
||||
lockdep_assert_held(&cxl_dpa_rwsem);
|
||||
if (cxled->dpa_res)
|
||||
base = cxled->dpa_res->start;
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
@ -839,6 +838,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|||
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
|
||||
else
|
||||
cxld->target_type = CXL_DECODER_DEVMEM;
|
||||
|
||||
guard(rwsem_write)(&cxl_region_rwsem);
|
||||
if (cxld->id != cxl_num_decoders_committed(port)) {
|
||||
dev_warn(&port->dev,
|
||||
"decoder%d.%d: Committed out of order\n",
|
||||
|
|
|
@ -227,10 +227,16 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
|
|||
if (!port || !is_cxl_endpoint(port))
|
||||
return -EINVAL;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
rc = down_read_interruptible(&cxl_region_rwsem);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
if (rc) {
|
||||
up_read(&cxl_region_rwsem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (cxl_num_decoders_committed(port) == 0) {
|
||||
/* No regions mapped to this memdev */
|
||||
rc = cxl_get_poison_by_memdev(cxlmd);
|
||||
|
@ -239,6 +245,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
|
|||
rc = cxl_get_poison_by_endpoint(port);
|
||||
}
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
up_read(&cxl_region_rwsem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -324,10 +331,16 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
|
|||
if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
||||
return 0;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
rc = down_read_interruptible(&cxl_region_rwsem);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
if (rc) {
|
||||
up_read(&cxl_region_rwsem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cxl_validate_poison_dpa(cxlmd, dpa);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -355,6 +368,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
|
|||
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
|
||||
out:
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
up_read(&cxl_region_rwsem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -372,10 +386,16 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
|
|||
if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
||||
return 0;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
rc = down_read_interruptible(&cxl_region_rwsem);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = down_read_interruptible(&cxl_dpa_rwsem);
|
||||
if (rc) {
|
||||
up_read(&cxl_region_rwsem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cxl_validate_poison_dpa(cxlmd, dpa);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -412,6 +432,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
|
|||
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
|
||||
out:
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
up_read(&cxl_region_rwsem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -620,7 +620,7 @@ void read_cdat_data(struct cxl_port *port)
|
|||
struct pci_dev *pdev = NULL;
|
||||
struct cxl_memdev *cxlmd;
|
||||
size_t cdat_length;
|
||||
void *cdat_table;
|
||||
void *cdat_table, *cdat_buf;
|
||||
int rc;
|
||||
|
||||
if (is_cxl_memdev(uport)) {
|
||||
|
@ -651,16 +651,15 @@ void read_cdat_data(struct cxl_port *port)
|
|||
return;
|
||||
}
|
||||
|
||||
cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
|
||||
GFP_KERNEL);
|
||||
if (!cdat_table)
|
||||
cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
|
||||
if (!cdat_buf)
|
||||
return;
|
||||
|
||||
rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
|
||||
rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
cdat_table = cdat_table + sizeof(__le32);
|
||||
cdat_table = cdat_buf + sizeof(__le32);
|
||||
if (cdat_checksum(cdat_table, cdat_length))
|
||||
goto err;
|
||||
|
||||
|
@ -670,7 +669,7 @@ void read_cdat_data(struct cxl_port *port)
|
|||
|
||||
err:
|
||||
/* Don't leave table data allocated on error */
|
||||
devm_kfree(dev, cdat_table);
|
||||
devm_kfree(dev, cdat_buf);
|
||||
dev_err(dev, "Failed to read/validate CDAT.\n");
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
|
||||
|
|
|
@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = {
|
|||
|
||||
static void remove_dev(void *dev)
|
||||
{
|
||||
device_del(dev);
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
|
||||
|
|
|
@ -226,9 +226,9 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
|
|||
char *buf)
|
||||
{
|
||||
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
|
||||
u64 base = cxl_dpa_resource_start(cxled);
|
||||
|
||||
return sysfs_emit(buf, "%#llx\n", base);
|
||||
guard(rwsem_read)(&cxl_dpa_rwsem);
|
||||
return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
|
||||
}
|
||||
static DEVICE_ATTR_RO(dpa_resource);
|
||||
|
||||
|
|
|
@ -2467,10 +2467,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
|
|||
struct cxl_poison_context ctx;
|
||||
int rc = 0;
|
||||
|
||||
rc = down_read_interruptible(&cxl_region_rwsem);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ctx = (struct cxl_poison_context) {
|
||||
.port = port
|
||||
};
|
||||
|
@ -2480,7 +2476,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
|
|||
rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
|
||||
&ctx);
|
||||
|
||||
up_read(&cxl_region_rwsem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
|||
dma_pool_destroy(fsl_chan->tcd_pool);
|
||||
fsl_chan->tcd_pool = NULL;
|
||||
fsl_chan->is_sw = false;
|
||||
fsl_chan->srcid = 0;
|
||||
}
|
||||
|
||||
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
|
||||
|
|
|
@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
|||
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (IS_ERR(link)) {
|
||||
dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
|
||||
PTR_ERR(link));
|
||||
if (!link) {
|
||||
dev_err(dev, "Failed to add device_link to %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev)
|
|||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
|
||||
/* Make sure chan is idle or will force disable. */
|
||||
if (unlikely(!fsl_chan->idle)) {
|
||||
|
@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev)
|
|||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
fsl_chan->pm_state = RUNNING;
|
||||
edma_write_tcdreg(fsl_chan, 0, csr);
|
||||
if (fsl_chan->slave_id != 0)
|
||||
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
|
||||
}
|
||||
|
||||
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
|
||||
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
|
||||
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -440,12 +440,14 @@ union wqcfg {
|
|||
/*
|
||||
* This macro calculates the offset into the GRPCFG register
|
||||
* idxd - struct idxd *
|
||||
* n - wq id
|
||||
* ofs - the index of the 32b dword for the config register
|
||||
* n - group id
|
||||
* ofs - the index of the 64b qword for the config register
|
||||
*
|
||||
* The WQCFG register block is divided into groups per each wq. The n index
|
||||
* allows us to move to the register group that's for that particular wq.
|
||||
* Each register is 32bits. The ofs gives us the number of register to access.
|
||||
* The GRPCFG register block is divided into three sub-registers, which
|
||||
* are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
|
||||
* to the register block that contains the three sub-registers.
|
||||
* Each register block is 64bits. And the ofs gives us the offset
|
||||
* within the GRPWQCFG register to access.
|
||||
*/
|
||||
#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
|
||||
(n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
|
||||
|
|
|
@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
|||
|
||||
portal = idxd_wq_portal_addr(wq);
|
||||
|
||||
/*
|
||||
* The wmb() flushes writes to coherent DMA data before
|
||||
* possibly triggering a DMA read. The wmb() is necessary
|
||||
* even on UP because the recipient is a device.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
/*
|
||||
* Pending the descriptor to the lockless list for the irq_entry
|
||||
* that we designated the descriptor to.
|
||||
|
@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
|||
llist_add(&desc->llnode, &ie->pending_llist);
|
||||
}
|
||||
|
||||
/*
|
||||
* The wmb() flushes writes to coherent DMA data before
|
||||
* possibly triggering a DMA read. The wmb() is necessary
|
||||
* even on UP because the recipient is a device.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
if (wq_dedicated(wq)) {
|
||||
iosubmit_cmds512(portal, desc->hw, 1);
|
||||
} else {
|
||||
|
|
|
@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
|
|||
enum dma_slave_buswidth max_width;
|
||||
struct stm32_dma_desc *desc;
|
||||
size_t xfer_count, offset;
|
||||
u32 num_sgs, best_burst, dma_burst, threshold;
|
||||
int i;
|
||||
u32 num_sgs, best_burst, threshold;
|
||||
int dma_burst, i;
|
||||
|
||||
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
|
||||
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
|
||||
|
@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
|
|||
best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
|
||||
threshold, max_width);
|
||||
dma_burst = stm32_dma_get_burst(chan, best_burst);
|
||||
if (dma_burst < 0) {
|
||||
kfree(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
|
||||
desc->sg_req[i].chan_reg.dma_scr =
|
||||
|
|
|
@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
|
|||
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
|
||||
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
|
||||
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
|
||||
/* PDMA_MAIN0 - SPI0-3 */
|
||||
/* PDMA_MAIN0 - SPI0-2 */
|
||||
PSIL_PDMA_XY_PKT(0x4300),
|
||||
PSIL_PDMA_XY_PKT(0x4301),
|
||||
PSIL_PDMA_XY_PKT(0x4302),
|
||||
PSIL_PDMA_XY_PKT(0x4303),
|
||||
PSIL_PDMA_XY_PKT(0x4304),
|
||||
|
@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
|
|||
PSIL_PDMA_XY_PKT(0x4309),
|
||||
PSIL_PDMA_XY_PKT(0x430a),
|
||||
PSIL_PDMA_XY_PKT(0x430b),
|
||||
PSIL_PDMA_XY_PKT(0x430c),
|
||||
PSIL_PDMA_XY_PKT(0x430d),
|
||||
/* PDMA_MAIN1 - UART0-6 */
|
||||
PSIL_PDMA_XY_PKT(0x4400),
|
||||
PSIL_PDMA_XY_PKT(0x4401),
|
||||
|
@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
|
|||
/* SAUL */
|
||||
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
|
||||
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
|
||||
/* PDMA_MAIN0 - SPI0-3 */
|
||||
/* PDMA_MAIN0 - SPI0-2 */
|
||||
PSIL_PDMA_XY_PKT(0xc300),
|
||||
PSIL_PDMA_XY_PKT(0xc301),
|
||||
PSIL_PDMA_XY_PKT(0xc302),
|
||||
PSIL_PDMA_XY_PKT(0xc303),
|
||||
PSIL_PDMA_XY_PKT(0xc304),
|
||||
|
@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
|
|||
PSIL_PDMA_XY_PKT(0xc309),
|
||||
PSIL_PDMA_XY_PKT(0xc30a),
|
||||
PSIL_PDMA_XY_PKT(0xc30b),
|
||||
PSIL_PDMA_XY_PKT(0xc30c),
|
||||
PSIL_PDMA_XY_PKT(0xc30d),
|
||||
/* PDMA_MAIN1 - UART0-6 */
|
||||
PSIL_PDMA_XY_PKT(0xc400),
|
||||
PSIL_PDMA_XY_PKT(0xc401),
|
||||
|
|
|
@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
|
|||
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
|
||||
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
|
||||
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
|
||||
/* PDMA_MAIN0 - SPI0-3 */
|
||||
/* PDMA_MAIN0 - SPI0-2 */
|
||||
PSIL_PDMA_XY_PKT(0x4300),
|
||||
PSIL_PDMA_XY_PKT(0x4301),
|
||||
PSIL_PDMA_XY_PKT(0x4302),
|
||||
PSIL_PDMA_XY_PKT(0x4303),
|
||||
PSIL_PDMA_XY_PKT(0x4304),
|
||||
|
@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
|
|||
PSIL_PDMA_XY_PKT(0x4309),
|
||||
PSIL_PDMA_XY_PKT(0x430a),
|
||||
PSIL_PDMA_XY_PKT(0x430b),
|
||||
PSIL_PDMA_XY_PKT(0x430c),
|
||||
PSIL_PDMA_XY_PKT(0x430d),
|
||||
/* PDMA_MAIN1 - UART0-6 */
|
||||
PSIL_PDMA_XY_PKT(0x4400),
|
||||
PSIL_PDMA_XY_PKT(0x4401),
|
||||
|
@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
|
|||
/* SAUL */
|
||||
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
|
||||
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
|
||||
/* PDMA_MAIN0 - SPI0-3 */
|
||||
/* PDMA_MAIN0 - SPI0-2 */
|
||||
PSIL_PDMA_XY_PKT(0xc300),
|
||||
PSIL_PDMA_XY_PKT(0xc301),
|
||||
PSIL_PDMA_XY_PKT(0xc302),
|
||||
PSIL_PDMA_XY_PKT(0xc303),
|
||||
PSIL_PDMA_XY_PKT(0xc304),
|
||||
|
@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
|
|||
PSIL_PDMA_XY_PKT(0xc309),
|
||||
PSIL_PDMA_XY_PKT(0xc30a),
|
||||
PSIL_PDMA_XY_PKT(0xc30b),
|
||||
PSIL_PDMA_XY_PKT(0xc30c),
|
||||
PSIL_PDMA_XY_PKT(0xc30d),
|
||||
/* PDMA_MAIN1 - UART0-6 */
|
||||
PSIL_PDMA_XY_PKT(0xc400),
|
||||
PSIL_PDMA_XY_PKT(0xc401),
|
||||
|
|
|
@ -966,10 +966,10 @@ static int mc_probe(struct platform_device *pdev)
|
|||
edac_mc_id = emif_get_id(pdev->dev.of_node);
|
||||
|
||||
regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
|
||||
num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
|
||||
num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
|
||||
num_chans++;
|
||||
|
||||
num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
|
||||
num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
|
||||
num_csrows *= 2;
|
||||
if (!num_csrows)
|
||||
num_csrows = 1;
|
||||
|
|
|
@ -4516,8 +4516,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
|
||||
amdgpu_ras_suspend(adev);
|
||||
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
amdgpu_device_ip_suspend_phase1(adev);
|
||||
|
||||
if (!adev->in_s0ix)
|
||||
|
|
|
@ -1343,6 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
|
||||
abo = ttm_to_amdgpu_bo(bo);
|
||||
|
||||
WARN_ON(abo->vm_bo);
|
||||
|
||||
if (abo->kfd_bo)
|
||||
amdgpu_amdkfd_release_notify(abo);
|
||||
|
||||
|
|
|
@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
|
|||
|
||||
if (!entry->bo)
|
||||
return;
|
||||
|
||||
entry->bo->vm_bo = NULL;
|
||||
shadow = amdgpu_bo_shadowed(entry->bo);
|
||||
if (shadow) {
|
||||
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
|
||||
amdgpu_bo_unref(&shadow);
|
||||
}
|
||||
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
|
||||
entry->bo->vm_bo = NULL;
|
||||
|
||||
spin_lock(&entry->vm->status_lock);
|
||||
list_del(&entry->vm_status);
|
||||
|
|
|
@ -155,13 +155,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
|
|||
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
|
||||
int r;
|
||||
|
||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
|
||||
|
||||
WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
|
||||
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
|
||||
VCN_JPEG_DB_CTRL__EN_MASK);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -336,6 +329,14 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
|
|||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_jpeg(adev, true);
|
||||
|
||||
/* doorbell programming is done for every playback */
|
||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
|
||||
|
||||
WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
|
||||
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
|
||||
VCN_JPEG_DB_CTRL__EN_MASK);
|
||||
|
||||
/* disable power gating */
|
||||
r = jpeg_v4_0_5_disable_static_power_gating(adev);
|
||||
if (r)
|
||||
|
|
|
@ -813,12 +813,12 @@ static int sdma_v2_4_early_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
||||
|
||||
r = sdma_v2_4_init_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
||||
|
||||
sdma_v2_4_set_ring_funcs(adev);
|
||||
sdma_v2_4_set_buffer_funcs(adev);
|
||||
sdma_v2_4_set_vm_pte_funcs(adev);
|
||||
|
|
|
@ -1643,6 +1643,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
|
|||
*flags |= AMD_CG_SUPPORT_SDMA_LS;
|
||||
}
|
||||
|
||||
static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
|
||||
* disallow GFXOFF in some cases leading to
|
||||
* hangs in SDMA. Disallow GFXOFF while SDMA is active.
|
||||
* We can probably just limit this to 5.2.3,
|
||||
* but it shouldn't hurt for other parts since
|
||||
* this GFXOFF will be disallowed anyway when SDMA is
|
||||
* active, this just makes it explicit.
|
||||
*/
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
}
|
||||
|
||||
static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
|
||||
* disallow GFXOFF in some cases leading to
|
||||
* hangs in SDMA. Allow GFXOFF when SDMA is complete.
|
||||
*/
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
|
||||
.name = "sdma_v5_2",
|
||||
.early_init = sdma_v5_2_early_init,
|
||||
|
@ -1690,6 +1716,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
|
|||
.test_ib = sdma_v5_2_ring_test_ib,
|
||||
.insert_nop = sdma_v5_2_ring_insert_nop,
|
||||
.pad_ib = sdma_v5_2_ring_pad_ib,
|
||||
.begin_use = sdma_v5_2_ring_begin_use,
|
||||
.end_use = sdma_v5_2_ring_end_use,
|
||||
.emit_wreg = sdma_v5_2_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
|
||||
|
|
|
@ -5182,6 +5182,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
|||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return;
|
||||
|
||||
if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
|
||||
goto ffu;
|
||||
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
clips = drm_plane_get_damage_clips(new_plane_state);
|
||||
|
||||
|
|
|
@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
|
|||
struct fixed31_32 v_scale_ratio;
|
||||
enum dc_rotation_angle rotation;
|
||||
bool mirror;
|
||||
struct dc_stream_state *stream;
|
||||
};
|
||||
|
||||
/* IPP related types */
|
||||
|
|
|
@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position(
|
|||
if (src_y_offset < 0)
|
||||
src_y_offset = 0;
|
||||
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
param->rotation != ROTATION_ANGLE_0) {
|
||||
hubp->cur_rect.x = 0;
|
||||
hubp->cur_rect.y = 0;
|
||||
hubp->cur_rect.w = param->stream->timing.h_addressable;
|
||||
hubp->cur_rect.h = param->stream->timing.v_addressable;
|
||||
} else {
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
}
|
||||
}
|
||||
|
||||
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
|
||||
|
|
|
@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.phyclk_mhz = 600.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 186.0,
|
||||
.dtbclk_mhz = 625.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 1,
|
||||
|
@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 209.0,
|
||||
.dtbclk_mhz = 625.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 2,
|
||||
|
@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 209.0,
|
||||
.dtbclk_mhz = 625.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 3,
|
||||
|
@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 371.0,
|
||||
.dtbclk_mhz = 625.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 4,
|
||||
|
@ -160,7 +160,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 417.0,
|
||||
.dtbclk_mhz = 625.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
|
@ -348,6 +348,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
|||
clock_limits[i].socclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
|
||||
clock_limits[i].dtbclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
|
@ -360,6 +362,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
|||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
|
||||
clk_table->num_entries;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
|||
mode_lib->ms.NoOfDPPThisState,
|
||||
mode_lib->ms.dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
|
||||
|
@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
|||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
|
||||
mode_lib->ms.MetaRowBytes[j][k],
|
||||
mode_lib->ms.DPTEBytesPerRow[j][k],
|
||||
|
@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
|||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
|
||||
|
@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
|||
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
|
||||
|
@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
|||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
|
||||
|
@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
|||
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
|
||||
locals->dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
|
||||
|
@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
|||
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
|
||||
|
@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
|||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
locals->PDEAndMetaPTEBytesFrame[k],
|
||||
locals->MetaRowByte[k],
|
||||
locals->PixelPTEBytesPerRow[k],
|
||||
|
|
|
@ -423,8 +423,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
|
|||
}
|
||||
|
||||
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) {
|
||||
p->in_states->state_array[i].dtbclk_mhz =
|
||||
dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
|
||||
if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0)
|
||||
p->in_states->state_array[i].dtbclk_mhz =
|
||||
dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
|
||||
|
|
|
@ -3417,7 +3417,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
|||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
|
||||
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
|
||||
.rotation = pipe_ctx->plane_state->rotation,
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror,
|
||||
.stream = pipe_ctx->stream,
|
||||
};
|
||||
bool pipe_split_on = false;
|
||||
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
|
||||
|
|
|
@ -287,8 +287,8 @@ bool set_default_brightness_aux(struct dc_link *link)
|
|||
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
|
||||
if (!read_default_bl_aux(link, &default_backlight))
|
||||
default_backlight = 150000;
|
||||
// if > 5000, it might be wrong readback
|
||||
if (default_backlight > 5000000)
|
||||
// if < 1 nits or > 5000, it might be wrong readback
|
||||
if (default_backlight < 1000 || default_backlight > 5000000)
|
||||
default_backlight = 150000;
|
||||
|
||||
return edp_set_backlight_level_nits(link, true,
|
||||
|
|
|
@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
|
|||
((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
|
||||
(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
|
||||
isPSRSUSupported = true;
|
||||
}
|
||||
|
|
|
@ -2198,10 +2198,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
|
||||
if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
|
||||
} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
|
||||
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
|
||||
} else if (DEVICE_ATTR_IS(pp_sclk_od)) {
|
||||
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
|
||||
|
|
|
@ -236,7 +236,7 @@ static int
|
|||
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
if (file_priv->was_master &&
|
||||
rcu_access_pointer(file_priv->pid) == task_pid(current))
|
||||
rcu_access_pointer(file_priv->pid) == task_tgid(current))
|
||||
return 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
|
|
@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
struct drm_mode_set set;
|
||||
uint32_t __user *set_connectors_ptr;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret;
|
||||
int i;
|
||||
int ret, i, num_connectors = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
connector->name);
|
||||
|
||||
connector_set[i] = connector;
|
||||
num_connectors++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
set.y = crtc_req->y;
|
||||
set.mode = mode;
|
||||
set.connectors = connector_set;
|
||||
set.num_connectors = crtc_req->count_connectors;
|
||||
set.num_connectors = num_connectors;
|
||||
set.fb = fb;
|
||||
|
||||
if (drm_drv_uses_atomic_modeset(dev))
|
||||
|
@ -892,7 +892,7 @@ out:
|
|||
drm_framebuffer_put(fb);
|
||||
|
||||
if (connector_set) {
|
||||
for (i = 0; i < crtc_req->count_connectors; i++) {
|
||||
for (i = 0; i < num_connectors; i++) {
|
||||
if (connector_set[i])
|
||||
drm_connector_put(connector_set[i]);
|
||||
}
|
||||
|
|
|
@ -2309,7 +2309,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
|
|||
|
||||
override = drm_edid_override_get(connector);
|
||||
if (override) {
|
||||
num_modes = drm_edid_connector_update(connector, override);
|
||||
if (drm_edid_connector_update(connector, override) == 0)
|
||||
num_modes = drm_edid_connector_add_modes(connector);
|
||||
|
||||
drm_edid_free(override);
|
||||
|
||||
|
|
|
@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
u8 link_bw, u8 rate_select)
|
||||
{
|
||||
u8 link_config[2];
|
||||
u8 lane_count = crtc_state->lane_count;
|
||||
|
||||
/* Write the link configuration data */
|
||||
link_config[0] = link_bw;
|
||||
link_config[1] = crtc_state->lane_count;
|
||||
if (crtc_state->enhanced_framing)
|
||||
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
|
||||
lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
|
||||
/* eDP 1.4 rate select method. */
|
||||
if (!link_bw)
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
|
||||
&rate_select, 1);
|
||||
if (link_bw) {
|
||||
/* DP and eDP v1.3 and earlier link bw set method. */
|
||||
u8 link_config[] = { link_bw, lane_count };
|
||||
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
|
||||
ARRAY_SIZE(link_config));
|
||||
} else {
|
||||
/*
|
||||
* eDP v1.4 and later link rate set method.
|
||||
*
|
||||
* eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
|
||||
* DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
|
||||
*
|
||||
* eDP v1.5 sinks allow choosing either, and the last choice
|
||||
* shall be active.
|
||||
*/
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1374,7 +1374,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
|
|||
struct drm_i915_private *i915 = to_i915(fb->base.dev);
|
||||
unsigned int stride_tiles;
|
||||
|
||||
if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
|
||||
if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
|
||||
src_stride_tiles < dst_stride_tiles)
|
||||
stride_tiles = src_stride_tiles;
|
||||
else
|
||||
stride_tiles = dst_stride_tiles;
|
||||
|
@ -1501,8 +1502,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
|
|||
|
||||
size += remap_info->size;
|
||||
} else {
|
||||
unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
|
||||
remap_info->width);
|
||||
unsigned int dst_stride;
|
||||
|
||||
/*
|
||||
* The hardware automagically calculates the CCS AUX surface
|
||||
* stride from the main surface stride so can't really remap a
|
||||
* smaller subset (unless we'd remap in whole AUX page units).
|
||||
*/
|
||||
if (intel_fb_needs_pot_stride_remap(fb) &&
|
||||
intel_fb_is_ccs_modifier(fb->base.modifier))
|
||||
dst_stride = remap_info->src_stride;
|
||||
else
|
||||
dst_stride = remap_info->width;
|
||||
|
||||
dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
|
||||
|
||||
assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
|
||||
color_plane_info->mapping_stride = dst_stride *
|
||||
|
|
|
@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct drm_plane *plane = NULL;
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *plane_state = NULL;
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
|
||||
|
@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* walkthrough scaler_users bits and start assigning scalers */
|
||||
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
|
||||
struct intel_plane_state *plane_state = NULL;
|
||||
int *scaler_id;
|
||||
const char *name;
|
||||
int idx, ret;
|
||||
|
|
|
@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
|
|||
if (msg)
|
||||
drm_notice(&engine->i915->drm,
|
||||
"Resetting %s for %s\n", engine->name, msg);
|
||||
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
|
||||
i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
|
||||
|
||||
ret = intel_gt_reset_engine(engine);
|
||||
if (ret) {
|
||||
|
|
|
@ -5001,7 +5001,8 @@ static void capture_error_state(struct intel_guc *guc,
|
|||
if (match) {
|
||||
intel_engine_set_hung_context(e, ce);
|
||||
engine_mask |= e->mask;
|
||||
atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
|
||||
i915_increase_reset_engine_count(&i915->gpu_error,
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5013,7 +5014,7 @@ static void capture_error_state(struct intel_guc *guc,
|
|||
} else {
|
||||
intel_engine_set_hung_context(ce->engine, ce);
|
||||
engine_mask = ce->engine->mask;
|
||||
atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
|
||||
i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
|
||||
}
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue