swap_state: update shadow_nodes for anonymous page
authorYang Yang <yang.yang29@zte.com.cn>
Wed, 18 Jan 2023 12:13:03 +0000 (20:13 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:24 +0000 (22:33 -0800)
Shadow_nodes is for shadow nodes reclaiming of workingset handling, it is
updated when page cache add or delete since long time ago workingset only
supported page cache.  But when workingset supports anonymous page
detection, we missied updating shadow nodes for it.  This caused that
shadow nodes of anonymous page will never be reclaimd by
scan_shadow_nodes() even they use much memory and system memory is tense.

So update shadow_nodes of anonymous page when swap cache is add or delete
by calling xas_set_update(..workingset_update_node).

Link: https://lkml.kernel.org/r/202301182013032211005@zte.com.cn
Fixes: aae466b0052e ("mm/swap: implement workingset detection for anonymous LRU")
Signed-off-by: Yang Yang <yang.yang29@zte.com.cn>
Reviewed-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
Cc: Bagas Sanjaya <bagasdotme@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/xarray.h
mm/swap_state.c
mm/workingset.c

index 44dd6d6e01bccbfaafb6462b13248ac5bbd578b0..741703b45f61a8f082873b6a02cb4fd18452ab58 100644 (file)
@@ -1643,7 +1643,8 @@ static inline void xas_set_order(struct xa_state *xas, unsigned long index,
  * @update: Function to call when updating a node.
  *
  * The XArray can notify a caller after it has updated an xa_node.
- * This is advanced functionality and is only needed by the page cache.
+ * This is advanced functionality and is only needed by the page
+ * cache and swap cache.
  */
 static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
 {
index cb9aaa00951d99add3f83fac1ac5788f9b0c9fd6..7a003d8abb37bcf750da82940197bdc010b49aef 100644 (file)
@@ -94,6 +94,8 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
        unsigned long i, nr = folio_nr_pages(folio);
        void *old;
 
+       xas_set_update(&xas, workingset_update_node);
+
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
        VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
@@ -145,6 +147,8 @@ void __delete_from_swap_cache(struct folio *folio,
        pgoff_t idx = swp_offset(entry);
        XA_STATE(xas, &address_space->i_pages, idx);
 
+       xas_set_update(&xas, workingset_update_node);
+
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
        VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
@@ -252,6 +256,8 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
                struct address_space *address_space = swap_address_space(entry);
                XA_STATE(xas, &address_space->i_pages, curr);
 
+               xas_set_update(&xas, workingset_update_node);
+
                xa_lock_irq(&address_space->i_pages);
                xas_for_each(&xas, old, end) {
                        if (!xa_is_value(old))
index f194d13beabbdca818b7735004b31a10bb31356d..00c6f4d9d9be5ae8a09a85c87bce47440b1a0a76 100644 (file)
@@ -657,11 +657,14 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
                goto out;
        }
 
-       if (!spin_trylock(&mapping->host->i_lock)) {
-               xa_unlock(&mapping->i_pages);
-               spin_unlock_irq(lru_lock);
-               ret = LRU_RETRY;
-               goto out;
+       /* For page cache we need to hold i_lock */
+       if (mapping->host != NULL) {
+               if (!spin_trylock(&mapping->host->i_lock)) {
+                       xa_unlock(&mapping->i_pages);
+                       spin_unlock_irq(lru_lock);
+                       ret = LRU_RETRY;
+                       goto out;
+               }
        }
 
        list_lru_isolate(lru, item);
@@ -683,9 +686,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 
 out_invalid:
        xa_unlock_irq(&mapping->i_pages);
-       if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
-       spin_unlock(&mapping->host->i_lock);
+       if (mapping->host != NULL) {
+               if (mapping_shrinkable(mapping))
+                       inode_add_lru(mapping->host);
+               spin_unlock(&mapping->host->i_lock);
+       }
        ret = LRU_REMOVED_RETRY;
 out:
        cond_resched();