fs: convert error_remove_page to error_remove_folio
There were already assertions that we were not passing a tail page to error_remove_page(), so make the compiler enforce that by converting everything to pass and use a folio. Link: https://lkml.kernel.org/r/20231117161447.2461643-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e130b6514e
commit
af7628d6ec
|
@ -261,7 +261,7 @@ prototypes::
|
|||
struct folio *src, enum migrate_mode);
|
||||
int (*launder_folio)(struct folio *);
|
||||
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
int (*error_remove_folio)(struct address_space *, struct folio *);
|
||||
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
|
||||
int (*swap_deactivate)(struct file *);
|
||||
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
|
||||
|
@ -287,7 +287,7 @@ direct_IO:
|
|||
migrate_folio: yes (both)
|
||||
launder_folio: yes
|
||||
is_partially_uptodate: yes
|
||||
error_remove_page: yes
|
||||
error_remove_folio: yes
|
||||
swap_activate: no
|
||||
swap_deactivate: no
|
||||
swap_rw: yes, unlocks
|
||||
|
|
|
@ -823,7 +823,7 @@ cache in your filesystem. The following members are defined:
|
|||
bool (*is_partially_uptodate) (struct folio *, size_t from,
|
||||
size_t count);
|
||||
void (*is_dirty_writeback)(struct folio *, bool *, bool *);
|
||||
int (*error_remove_page) (struct mapping *mapping, struct page *page);
|
||||
int (*error_remove_folio)(struct mapping *mapping, struct folio *);
|
||||
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
|
||||
int (*swap_deactivate)(struct file *);
|
||||
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
|
||||
|
@ -1034,8 +1034,8 @@ cache in your filesystem. The following members are defined:
|
|||
VM if a folio should be treated as dirty or writeback for the
|
||||
purposes of stalling.
|
||||
|
||||
``error_remove_page``
|
||||
normally set to generic_error_remove_page if truncation is ok
|
||||
``error_remove_folio``
|
||||
normally set to generic_error_remove_folio if truncation is ok
|
||||
for this address space. Used for memory failure handling.
|
||||
Setting this implies you deal with pages going away under you,
|
||||
unless you have them locked or reference counts increased.
|
||||
|
|
|
@ -500,7 +500,7 @@ const struct address_space_operations def_blk_aops = {
|
|||
.readahead = blkdev_readahead,
|
||||
.writepages = blkdev_writepages,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
};
|
||||
#endif /* CONFIG_BUFFER_HEAD */
|
||||
|
|
|
@ -242,7 +242,7 @@ static void afs_kill_pages(struct address_space *mapping,
|
|||
folio_clear_uptodate(folio);
|
||||
folio_end_writeback(folio);
|
||||
folio_lock(folio);
|
||||
generic_error_remove_page(mapping, &folio->page);
|
||||
generic_error_remove_folio(mapping, folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
||||
|
|
|
@ -1103,7 +1103,7 @@ static const struct address_space_operations bch_address_space_operations = {
|
|||
#ifdef CONFIG_MIGRATION
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
#endif
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
struct bcachefs_fid {
|
||||
|
|
|
@ -10930,7 +10930,7 @@ static const struct address_space_operations btrfs_aops = {
|
|||
.release_folio = btrfs_release_folio,
|
||||
.migrate_folio = btrfs_migrate_folio,
|
||||
.dirty_folio = filemap_dirty_folio,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = btrfs_swap_activate,
|
||||
.swap_deactivate = btrfs_swap_deactivate,
|
||||
};
|
||||
|
|
|
@ -907,8 +907,8 @@ static void writepages_finish(struct ceph_osd_request *req)
|
|||
doutc(cl, "unlocking %p\n", page);
|
||||
|
||||
if (remove_page)
|
||||
generic_error_remove_page(inode->i_mapping,
|
||||
page);
|
||||
generic_error_remove_folio(inode->i_mapping,
|
||||
page_folio(page));
|
||||
|
||||
unlock_page(page);
|
||||
}
|
||||
|
|
|
@ -969,7 +969,7 @@ const struct address_space_operations ext2_aops = {
|
|||
.writepages = ext2_writepages,
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
static const struct address_space_operations ext2_dax_aops = {
|
||||
|
|
|
@ -3564,7 +3564,7 @@ static const struct address_space_operations ext4_aops = {
|
|||
.direct_IO = noop_direct_IO,
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = ext4_iomap_swap_activate,
|
||||
};
|
||||
|
||||
|
@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_journalled_aops = {
|
|||
.direct_IO = noop_direct_IO,
|
||||
.migrate_folio = buffer_migrate_folio_norefs,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = ext4_iomap_swap_activate,
|
||||
};
|
||||
|
||||
|
@ -3598,7 +3598,7 @@ static const struct address_space_operations ext4_da_aops = {
|
|||
.direct_IO = noop_direct_IO,
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = ext4_iomap_swap_activate,
|
||||
};
|
||||
|
||||
|
|
|
@ -1944,7 +1944,7 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
|
|||
continue;
|
||||
}
|
||||
|
||||
generic_error_remove_page(mapping, &folio->page);
|
||||
generic_error_remove_folio(mapping, folio);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_batch_release(&fbatch);
|
||||
|
|
|
@ -600,7 +600,7 @@ make_now:
|
|||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
inode->i_mapping->a_ops = &f2fs_compress_aops;
|
||||
/*
|
||||
* generic_error_remove_page only truncates pages of regular
|
||||
* generic_error_remove_folio only truncates pages of regular
|
||||
* inode
|
||||
*/
|
||||
inode->i_mode |= S_IFREG;
|
||||
|
|
|
@ -745,7 +745,7 @@ static const struct address_space_operations gfs2_aops = {
|
|||
.bmap = gfs2_bmap,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
static const struct address_space_operations gfs2_jdata_aops = {
|
||||
|
@ -758,7 +758,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
|
|||
.invalidate_folio = gfs2_invalidate_folio,
|
||||
.release_folio = gfs2_release_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
void gfs2_set_aops(struct inode *inode)
|
||||
|
|
|
@ -1129,8 +1129,8 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
|
|||
#define hugetlbfs_migrate_folio NULL
|
||||
#endif
|
||||
|
||||
static int hugetlbfs_error_remove_page(struct address_space *mapping,
|
||||
struct page *page)
|
||||
static int hugetlbfs_error_remove_folio(struct address_space *mapping,
|
||||
struct folio *folio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1277,7 +1277,7 @@ static const struct address_space_operations hugetlbfs_aops = {
|
|||
.write_end = hugetlbfs_write_end,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
.migrate_folio = hugetlbfs_migrate_folio,
|
||||
.error_remove_page = hugetlbfs_error_remove_page,
|
||||
.error_remove_folio = hugetlbfs_error_remove_folio,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -567,7 +567,7 @@ const struct address_space_operations nfs_file_aops = {
|
|||
.migrate_folio = nfs_migrate_folio,
|
||||
.launder_folio = nfs_launder_folio,
|
||||
.is_dirty_writeback = nfs_check_dirty_writeback,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = nfs_swap_activate,
|
||||
.swap_deactivate = nfs_swap_deactivate,
|
||||
.swap_rw = nfs_swap_rw,
|
||||
|
|
|
@ -1644,7 +1644,7 @@ const struct address_space_operations ntfs_normal_aops = {
|
|||
.bmap = ntfs_bmap,
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1658,7 +1658,7 @@ const struct address_space_operations ntfs_compressed_aops = {
|
|||
#endif /* NTFS_RW */
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1673,7 +1673,7 @@ const struct address_space_operations ntfs_mst_aops = {
|
|||
#endif /* NTFS_RW */
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
||||
#ifdef NTFS_RW
|
||||
|
|
|
@ -2480,5 +2480,5 @@ const struct address_space_operations ocfs2_aops = {
|
|||
.release_folio = ocfs2_release_folio,
|
||||
.migrate_folio = buffer_migrate_folio,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
};
|
||||
|
|
|
@ -584,7 +584,7 @@ const struct address_space_operations xfs_address_space_operations = {
|
|||
.bmap = xfs_vm_bmap,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = xfs_iomap_swapfile_activate,
|
||||
};
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ const struct address_space_operations zonefs_file_aops = {
|
|||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.error_remove_folio = generic_error_remove_folio,
|
||||
.swap_activate = zonefs_swap_activate,
|
||||
};
|
||||
|
||||
|
|
|
@ -434,7 +434,7 @@ struct address_space_operations {
|
|||
bool (*is_partially_uptodate) (struct folio *, size_t from,
|
||||
size_t count);
|
||||
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
int (*error_remove_folio)(struct address_space *, struct folio *);
|
||||
|
||||
/* swapfile support */
|
||||
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
|
||||
|
|
|
@ -2384,7 +2384,8 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
|
|||
extern void truncate_setsize(struct inode *inode, loff_t newsize);
|
||||
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
||||
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
|
||||
int generic_error_remove_page(struct address_space *mapping, struct page *page);
|
||||
int generic_error_remove_folio(struct address_space *mapping,
|
||||
struct folio *folio);
|
||||
|
||||
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
|
||||
unsigned long address, struct pt_regs *regs);
|
||||
|
|
|
@ -927,13 +927,13 @@ static int delete_from_lru_cache(struct folio *folio)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static int truncate_error_page(struct folio *folio, unsigned long pfn,
|
||||
static int truncate_error_folio(struct folio *folio, unsigned long pfn,
|
||||
struct address_space *mapping)
|
||||
{
|
||||
int ret = MF_FAILED;
|
||||
|
||||
if (mapping->a_ops->error_remove_page) {
|
||||
int err = mapping->a_ops->error_remove_page(mapping, &folio->page);
|
||||
if (mapping->a_ops->error_remove_folio) {
|
||||
int err = mapping->a_ops->error_remove_folio(mapping, folio);
|
||||
|
||||
if (err != 0)
|
||||
pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
|
||||
|
@ -1054,7 +1054,7 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
|
|||
*
|
||||
* Open: to take i_rwsem or not for this? Right now we don't.
|
||||
*/
|
||||
ret = truncate_error_page(folio, page_to_pfn(p), mapping);
|
||||
ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
|
||||
if (has_extra_refcount(ps, p, extra_pins))
|
||||
ret = MF_FAILED;
|
||||
|
||||
|
@ -1188,7 +1188,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
|
|||
|
||||
mapping = folio_mapping(folio);
|
||||
if (mapping) {
|
||||
res = truncate_error_page(folio, page_to_pfn(p), mapping);
|
||||
res = truncate_error_folio(folio, page_to_pfn(p), mapping);
|
||||
/* The page is kept in page cache. */
|
||||
extra_pins = true;
|
||||
folio_unlock(folio);
|
||||
|
|
|
@ -4445,8 +4445,8 @@ static void __init shmem_destroy_inodecache(void)
|
|||
}
|
||||
|
||||
/* Keep the page in page cache instead of truncating it */
|
||||
static int shmem_error_remove_page(struct address_space *mapping,
|
||||
struct page *page)
|
||||
static int shmem_error_remove_folio(struct address_space *mapping,
|
||||
struct folio *folio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -4461,7 +4461,7 @@ const struct address_space_operations shmem_aops = {
|
|||
#ifdef CONFIG_MIGRATION
|
||||
.migrate_folio = migrate_folio,
|
||||
#endif
|
||||
.error_remove_page = shmem_error_remove_page,
|
||||
.error_remove_folio = shmem_error_remove_folio,
|
||||
};
|
||||
EXPORT_SYMBOL(shmem_aops);
|
||||
|
||||
|
|
|
@ -250,10 +250,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
|
|||
/*
|
||||
* Used to get rid of pages on hardware memory corruption.
|
||||
*/
|
||||
int generic_error_remove_page(struct address_space *mapping, struct page *page)
|
||||
int generic_error_remove_folio(struct address_space *mapping,
|
||||
struct folio *folio)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
if (!mapping)
|
||||
return -EINVAL;
|
||||
/*
|
||||
|
@ -262,9 +261,9 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
|
|||
*/
|
||||
if (!S_ISREG(mapping->host->i_mode))
|
||||
return -EIO;
|
||||
return truncate_inode_folio(mapping, page_folio(page));
|
||||
return truncate_inode_folio(mapping, folio);
|
||||
}
|
||||
EXPORT_SYMBOL(generic_error_remove_page);
|
||||
EXPORT_SYMBOL(generic_error_remove_folio);
|
||||
|
||||
/**
|
||||
* mapping_evict_folio() - Remove an unused folio from the page-cache.
|
||||
|
|
Loading…
Reference in New Issue