mm/zsmalloc: remove migrate_write_lock_nested()

The migrate write lock is to protect the race between zspage migration and
zspage objects' map users.

We only need to lock out the map users of src zspage, not dst zspage,
which is safe to map by users concurrently, since we only need to do
obj_malloc() from dst zspage.

So we can remove the migrate_write_lock_nested() use case.

As we are here, cleanup the __zs_compact() by moving putback_zspage()
outside of migrate_write_unlock since we hold pool lock, no malloc or free
users can come in.

Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-2-34cd49c6545b@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Chengming Zhou 2024-02-19 13:33:52 +00:00 committed by Andrew Morton
parent 568b567f78
commit 59def443c9
1 changed files with 5 additions and 17 deletions

View File

@ -279,7 +279,6 @@ static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage);
static void migrate_write_lock(struct zspage *zspage); static void migrate_write_lock(struct zspage *zspage);
static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
@ -1727,11 +1726,6 @@ static void migrate_write_lock(struct zspage *zspage)
write_lock(&zspage->lock); write_lock(&zspage->lock);
} }
static void migrate_write_lock_nested(struct zspage *zspage)
{
write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
}
static void migrate_write_unlock(struct zspage *zspage) static void migrate_write_unlock(struct zspage *zspage)
{ {
write_unlock(&zspage->lock); write_unlock(&zspage->lock);
@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct zs_pool *pool,
dst_zspage = isolate_dst_zspage(class); dst_zspage = isolate_dst_zspage(class);
if (!dst_zspage) if (!dst_zspage)
break; break;
migrate_write_lock(dst_zspage);
} }
src_zspage = isolate_src_zspage(class); src_zspage = isolate_src_zspage(class);
if (!src_zspage) if (!src_zspage)
break; break;
migrate_write_lock_nested(src_zspage); migrate_write_lock(src_zspage);
migrate_zspage(pool, src_zspage, dst_zspage); migrate_zspage(pool, src_zspage, dst_zspage);
fg = putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage); migrate_write_unlock(src_zspage);
fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) { if (fg == ZS_INUSE_RATIO_0) {
free_zspage(pool, class, src_zspage); free_zspage(pool, class, src_zspage);
pages_freed += class->pages_per_zspage; pages_freed += class->pages_per_zspage;
@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
|| spin_is_contended(&pool->lock)) { || spin_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage); putback_zspage(class, dst_zspage);
migrate_write_unlock(dst_zspage);
dst_zspage = NULL; dst_zspage = NULL;
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct zs_pool *pool,
} }
} }
if (src_zspage) { if (src_zspage)
putback_zspage(class, src_zspage); putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage);
}
if (dst_zspage) { if (dst_zspage)
putback_zspage(class, dst_zspage); putback_zspage(class, dst_zspage);
migrate_write_unlock(dst_zspage);
}
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return pages_freed; return pages_freed;