static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
write_lock(&zspage->lock);
}
-static void migrate_write_lock_nested(struct zspage *zspage)
-{
- write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
-}
-
static void migrate_write_unlock(struct zspage *zspage)
{
write_unlock(&zspage->lock);
dst_zspage = isolate_dst_zspage(class);
if (!dst_zspage)
break;
- migrate_write_lock(dst_zspage);
}
src_zspage = isolate_src_zspage(class);
if (!src_zspage)
break;
- migrate_write_lock_nested(src_zspage);
-
+ migrate_write_lock(src_zspage);
migrate_zspage(pool, src_zspage, dst_zspage);
- fg = putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage);
+ fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
free_zspage(pool, class, src_zspage);
pages_freed += class->pages_per_zspage;
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
|| spin_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
dst_zspage = NULL;
spin_unlock(&pool->lock);
}
}
- if (src_zspage) {
+ if (src_zspage)
putback_zspage(class, src_zspage);
- migrate_write_unlock(src_zspage);
- }
- if (dst_zspage) {
+ if (dst_zspage)
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
- }
+
spin_unlock(&pool->lock);
return pages_freed;