mm/zsmalloc: remove unused zspage->isolated
The zspage->isolated is not used anywhere, we don't need to maintain it, which needs to hold the heavy pool lock to update it, so just remove it. Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-3-34cd49c6545b@bytedance.com Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
59def443c9
commit
4ad63e1632
|
@ -116,7 +116,6 @@
|
||||||
#define HUGE_BITS 1
|
#define HUGE_BITS 1
|
||||||
#define FULLNESS_BITS 4
|
#define FULLNESS_BITS 4
|
||||||
#define CLASS_BITS 8
|
#define CLASS_BITS 8
|
||||||
#define ISOLATED_BITS 5
|
|
||||||
#define MAGIC_VAL_BITS 8
|
#define MAGIC_VAL_BITS 8
|
||||||
|
|
||||||
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
|
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
|
||||||
|
@ -246,7 +245,6 @@ struct zspage {
|
||||||
unsigned int huge:HUGE_BITS;
|
unsigned int huge:HUGE_BITS;
|
||||||
unsigned int fullness:FULLNESS_BITS;
|
unsigned int fullness:FULLNESS_BITS;
|
||||||
unsigned int class:CLASS_BITS + 1;
|
unsigned int class:CLASS_BITS + 1;
|
||||||
unsigned int isolated:ISOLATED_BITS;
|
|
||||||
unsigned int magic:MAGIC_VAL_BITS;
|
unsigned int magic:MAGIC_VAL_BITS;
|
||||||
};
|
};
|
||||||
unsigned int inuse;
|
unsigned int inuse;
|
||||||
|
@ -1732,17 +1730,6 @@ static void migrate_write_unlock(struct zspage *zspage)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPACTION
|
#ifdef CONFIG_COMPACTION
|
||||||
/* Number of isolated subpage for *page migration* in this zspage */
|
|
||||||
static void inc_zspage_isolation(struct zspage *zspage)
|
|
||||||
{
|
|
||||||
zspage->isolated++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dec_zspage_isolation(struct zspage *zspage)
|
|
||||||
{
|
|
||||||
VM_BUG_ON(zspage->isolated == 0);
|
|
||||||
zspage->isolated--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct movable_operations zsmalloc_mops;
|
static const struct movable_operations zsmalloc_mops;
|
||||||
|
|
||||||
|
@ -1771,21 +1758,12 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
|
||||||
|
|
||||||
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||||
{
|
{
|
||||||
struct zs_pool *pool;
|
|
||||||
struct zspage *zspage;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Page is locked so zspage couldn't be destroyed. For detail, look at
|
* Page is locked so zspage couldn't be destroyed. For detail, look at
|
||||||
* lock_zspage in free_zspage.
|
* lock_zspage in free_zspage.
|
||||||
*/
|
*/
|
||||||
VM_BUG_ON_PAGE(PageIsolated(page), page);
|
VM_BUG_ON_PAGE(PageIsolated(page), page);
|
||||||
|
|
||||||
zspage = get_zspage(page);
|
|
||||||
pool = zspage->pool;
|
|
||||||
spin_lock(&pool->lock);
|
|
||||||
inc_zspage_isolation(zspage);
|
|
||||||
spin_unlock(&pool->lock);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1850,7 +1828,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||||
kunmap_atomic(s_addr);
|
kunmap_atomic(s_addr);
|
||||||
|
|
||||||
replace_sub_page(class, zspage, newpage, page);
|
replace_sub_page(class, zspage, newpage, page);
|
||||||
dec_zspage_isolation(zspage);
|
|
||||||
/*
|
/*
|
||||||
* Since we complete the data copy and set up new zspage structure,
|
* Since we complete the data copy and set up new zspage structure,
|
||||||
* it's okay to release the pool's lock.
|
* it's okay to release the pool's lock.
|
||||||
|
@ -1872,16 +1849,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||||
|
|
||||||
static void zs_page_putback(struct page *page)
|
static void zs_page_putback(struct page *page)
|
||||||
{
|
{
|
||||||
struct zs_pool *pool;
|
|
||||||
struct zspage *zspage;
|
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||||
|
|
||||||
zspage = get_zspage(page);
|
|
||||||
pool = zspage->pool;
|
|
||||||
spin_lock(&pool->lock);
|
|
||||||
dec_zspage_isolation(zspage);
|
|
||||||
spin_unlock(&pool->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct movable_operations zsmalloc_mops = {
|
static const struct movable_operations zsmalloc_mops = {
|
||||||
|
|
Loading…
Reference in New Issue