mm: remove invalidate_inode_page()
All callers are now converted to call mapping_evict_folio(). Link: https://lkml.kernel.org/r/20231108182809.602073-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
761d79fbad
commit
2033c98cce
|
@ -139,7 +139,6 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
|
|||
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
|
||||
loff_t end);
|
||||
long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
|
||||
long invalidate_inode_page(struct page *page);
|
||||
unsigned long mapping_try_invalidate(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
|
||||
|
||||
|
|
|
@ -294,13 +294,6 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
|
|||
return remove_mapping(mapping, folio);
|
||||
}
|
||||
|
||||
long invalidate_inode_page(struct page *page)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
return mapping_evict_folio(folio_mapping(folio), folio);
|
||||
}
|
||||
|
||||
/**
|
||||
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
|
||||
* @mapping: mapping to truncate
|
||||
|
@ -559,9 +552,9 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|||
EXPORT_SYMBOL(invalidate_mapping_pages);
|
||||
|
||||
/*
|
||||
* This is like invalidate_inode_page(), except it ignores the page's
|
||||
* This is like mapping_evict_folio(), except it ignores the folio's
|
||||
* refcount. We do this because invalidate_inode_pages2() needs stronger
|
||||
* invalidation guarantees, and cannot afford to leave pages behind because
|
||||
* invalidation guarantees, and cannot afford to leave folios behind because
|
||||
* shrink_page_list() has a temp ref on them, or because they're transiently
|
||||
* sitting in the folio_add_lru() caches.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue