mm: zswap: function ordering: move entry sections out of LRU section
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 30 Jan 2024 01:36:51 +0000 (20:36 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 18:24:44 +0000 (10:24 -0800)
This completes consolidation of the LRU section.

Link: https://lkml.kernel.org/r/20240130014208.565554-16-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zswap.c

index 74b128c3a0edf15f36b8cc271849afa0d889e826..b8834c4fb7dbae06cbf3023af1674f3ddad8361d 100644 (file)
@@ -768,58 +768,6 @@ static inline int entry_to_nid(struct zswap_entry *entry)
        return page_to_nid(virt_to_page(entry));
 }
 
-void zswap_lruvec_state_init(struct lruvec *lruvec)
-{
-       atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
-}
-
-void zswap_folio_swapin(struct folio *folio)
-{
-       struct lruvec *lruvec;
-
-       if (folio) {
-               lruvec = folio_lruvec(folio);
-               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
-       }
-}
-
-void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
-{
-       struct zswap_pool *pool;
-
-       /* lock out zswap pools list modification */
-       spin_lock(&zswap_pools_lock);
-       list_for_each_entry(pool, &zswap_pools, list) {
-               if (pool->next_shrink == memcg)
-                       pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
-       }
-       spin_unlock(&zswap_pools_lock);
-}
-
-/*********************************
-* zswap entry functions
-**********************************/
-static struct kmem_cache *zswap_entry_cache;
-
-static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
-{
-       struct zswap_entry *entry;
-       entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
-       if (!entry)
-               return NULL;
-       entry->refcount = 1;
-       RB_CLEAR_NODE(&entry->rbnode);
-       return entry;
-}
-
-static void zswap_entry_cache_free(struct zswap_entry *entry)
-{
-       kmem_cache_free(zswap_entry_cache, entry);
-}
-
-/*********************************
-* lru functions
-**********************************/
 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
 {
        atomic_long_t *nr_zswap_protected;
@@ -872,6 +820,55 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
        rcu_read_unlock();
 }
 
+void zswap_lruvec_state_init(struct lruvec *lruvec)
+{
+       atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+}
+
+void zswap_folio_swapin(struct folio *folio)
+{
+       struct lruvec *lruvec;
+
+       if (folio) {
+               lruvec = folio_lruvec(folio);
+               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+       }
+}
+
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
+{
+       struct zswap_pool *pool;
+
+       /* lock out zswap pools list modification */
+       spin_lock(&zswap_pools_lock);
+       list_for_each_entry(pool, &zswap_pools, list) {
+               if (pool->next_shrink == memcg)
+                       pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
+       }
+       spin_unlock(&zswap_pools_lock);
+}
+
+/*********************************
+* zswap entry functions
+**********************************/
+static struct kmem_cache *zswap_entry_cache;
+
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
+{
+       struct zswap_entry *entry;
+       entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
+       if (!entry)
+               return NULL;
+       entry->refcount = 1;
+       RB_CLEAR_NODE(&entry->rbnode);
+       return entry;
+}
+
+static void zswap_entry_cache_free(struct zswap_entry *entry)
+{
+       kmem_cache_free(zswap_entry_cache, entry);
+}
+
 /*********************************
 * rbtree functions
 **********************************/