mm/treewide: replace pud_large() with pud_leaf()
pud_large() is always defined as pud_leaf(). Merge their usages. Chose pud_leaf() because pud_leaf() is a global API, while pud_large() is not. Link: https://lkml.kernel.org/r/20240305043750.93762-9-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2f709f7bfd
commit
0a845e0f63
|
@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
WARN_ON(pte_hw_valid(pud_pte(*pudp)));
|
||||
assert_spin_locked(pud_lockptr(mm, pudp));
|
||||
WARN_ON(!(pud_large(pud)));
|
||||
WARN_ON(!(pud_leaf(pud)));
|
||||
#endif
|
||||
trace_hugepage_set_pud(addr, pud_val(pud));
|
||||
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
|
||||
|
|
|
@ -366,7 +366,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
|
|||
}
|
||||
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (pud_large(*pud)) {
|
||||
} else if (pud_leaf(*pud)) {
|
||||
continue;
|
||||
}
|
||||
pgtable_pmd_populate(pud, addr, next, mode);
|
||||
|
|
|
@ -730,7 +730,7 @@ static inline int pud_bad(pud_t pud)
|
|||
{
|
||||
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
|
||||
|
||||
if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
|
||||
if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
|
||||
return 1;
|
||||
if (type < _REGION_ENTRY_TYPE_R3)
|
||||
return 0;
|
||||
|
@ -1400,7 +1400,7 @@ static inline unsigned long pud_deref(pud_t pud)
|
|||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _REGION_ENTRY_ORIGIN;
|
||||
if (pud_large(pud))
|
||||
if (pud_leaf(pud))
|
||||
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
|
||||
return (unsigned long)__va(pud_val(pud) & origin_mask);
|
||||
}
|
||||
|
|
|
@ -598,7 +598,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
|||
pud = pud_offset(p4d, vmaddr);
|
||||
VM_BUG_ON(pud_none(*pud));
|
||||
/* large puds cannot yet be handled */
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return -EFAULT;
|
||||
pmd = pmd_offset(pud, vmaddr);
|
||||
VM_BUG_ON(pmd_none(*pmd));
|
||||
|
|
|
@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||
if (p4d_present(*p4dp)) {
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
if (pud_present(*pudp)) {
|
||||
if (pud_large(*pudp))
|
||||
if (pud_leaf(*pudp))
|
||||
return (pte_t *) pudp;
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
|
|||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return pud_large(pud);
|
||||
return pud_leaf(pud);
|
||||
}
|
||||
|
||||
bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
|
|
|
@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (pud_none(*pudp))
|
||||
return -EINVAL;
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_large(*pudp)) {
|
||||
if (pud_leaf(*pudp)) {
|
||||
need_split = !!(flags & SET_MEMORY_4K);
|
||||
need_split |= !!(addr & ~PUD_MASK);
|
||||
need_split |= !!(addr + PUD_SIZE > next);
|
||||
|
|
|
@ -470,7 +470,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
|
|||
return -ENOENT;
|
||||
|
||||
/* Large PUDs are not supported yet. */
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return -EFAULT;
|
||||
|
||||
*pmdp = pmd_offset(pud, addr);
|
||||
|
|
|
@ -329,7 +329,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (!add) {
|
||||
if (pud_none(*pud))
|
||||
continue;
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
if (IS_ALIGNED(addr, PUD_SIZE) &&
|
||||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
pud_clear(pud);
|
||||
|
@ -350,7 +350,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (!pmd)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (pud_large(*pud)) {
|
||||
} else if (pud_leaf(*pud)) {
|
||||
continue;
|
||||
}
|
||||
ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
|
||||
|
@ -599,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
|
|||
if (!pmd)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (WARN_ON_ONCE(pud_large(*pud))) {
|
||||
} else if (WARN_ON_ONCE(pud_leaf(*pud))) {
|
||||
goto out;
|
||||
}
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
|
|
@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
|
|||
if (pud_none(*pud))
|
||||
return false;
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
|
|
@ -3126,7 +3126,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
|
|||
if (pud_none(pud) || !pud_present(pud))
|
||||
goto out;
|
||||
|
||||
if (pud_large(pud)) {
|
||||
if (pud_leaf(pud)) {
|
||||
level = PG_LEVEL_1G;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
|
|||
goto bad;
|
||||
|
||||
pr_cont("PUD %lx ", pud_val(*pud));
|
||||
if (!pud_present(*pud) || pud_large(*pud))
|
||||
if (!pud_present(*pud) || pud_leaf(*pud))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -1046,7 +1046,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
|
|||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return spurious_kernel_fault_check(error_code, (pte_t *) pud);
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
|
|
@ -33,7 +33,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
|||
next = end;
|
||||
|
||||
/* if this is already a gbpage, this portion is already mapped */
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
continue;
|
||||
|
||||
/* Is using a gbpage allowed? */
|
||||
|
|
|
@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
|||
}
|
||||
|
||||
if (!pud_none(*pud)) {
|
||||
if (!pud_large(*pud)) {
|
||||
if (!pud_leaf(*pud)) {
|
||||
pmd = pmd_offset(pud, 0);
|
||||
paddr_last = phys_pmd_init(pmd, paddr,
|
||||
paddr_end,
|
||||
|
@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
if (!pud_present(*pud))
|
||||
continue;
|
||||
|
||||
if (pud_large(*pud) &&
|
||||
if (pud_leaf(*pud) &&
|
||||
IS_ALIGNED(addr, PUD_SIZE) &&
|
||||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
|
|
@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
|
|||
pud = pud_offset(p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (!pud_large(*pud))
|
||||
if (!pud_leaf(*pud))
|
||||
kasan_populate_pud(pud, addr, next, nid);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
|
|||
set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
|
||||
}
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return NULL;
|
||||
|
||||
return pud;
|
||||
|
|
|
@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
|
|||
return NULL;
|
||||
|
||||
*level = PG_LEVEL_1G;
|
||||
if (pud_large(*pud) || !pud_present(*pud))
|
||||
if (pud_leaf(*pud) || !pud_present(*pud))
|
||||
return (pte_t *)pud;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
|
|||
return NULL;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
|
||||
if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
|
||||
return NULL;
|
||||
|
||||
return pmd_offset(pud, address);
|
||||
|
@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
|
|||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
pud_clear(pud);
|
||||
else
|
||||
unmap_pmd_range(pud, start, start + PUD_SIZE);
|
||||
|
|
|
@ -777,7 +777,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
|||
*/
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|||
|
||||
pud = pud_offset(p4d, address);
|
||||
/* The user page tables do not use large mappings: */
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ int relocate_restore_code(void)
|
|||
goto out;
|
||||
}
|
||||
pud = pud_offset(p4d, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
|
|||
pmd_t *pmd_tbl;
|
||||
int i;
|
||||
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
|
||||
xen_free_ro_pages(pa, PUD_SIZE);
|
||||
return;
|
||||
|
@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
|
|||
if (!pud_present(pud))
|
||||
return 0;
|
||||
pa = pud_val(pud) & PTE_PFN_MASK;
|
||||
if (pud_large(pud))
|
||||
if (pud_leaf(pud))
|
||||
return pa + (vaddr & ~PUD_MASK);
|
||||
|
||||
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
|
||||
|
|
Loading…
Reference in New Issue