mm: memcontrol: move out cgroup swaprate throttling
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 3 Jun 2020 23:01:38 +0000 (16:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:47 +0000 (20:09 -0700)
The cgroup swaprate throttling is about matching new anon allocations to
the rate of available IO when that is being throttled.  It's the io
controller hooking into the VM, rather than a memory controller thing.

Rename mem_cgroup_throttle_swaprate() to cgroup_throttle_swaprate(), and
drop the @memcg argument which is only used to check whether the preceding
page charge has succeeded and the fault is proceeding.

We could decouple the call from mem_cgroup_try_charge() here as well, but
that would cause unnecessary churn: the following patches convert all
callsites to a new charge API and we'll decouple as we go along.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Alex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-5-hannes@cmpxchg.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/memcontrol.c
mm/swapfile.c

index e92176fc882427d215b17ae6af0e77708a2cd9f9..6cea1eb97d45b25a9013761bb473bae3c2b87a68 100644 (file)
@@ -651,11 +651,9 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 #endif
 
 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
-                                        gfp_t gfp_mask);
+extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
 #else
-static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
-                                               int node, gfp_t gfp_mask)
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 {
 }
 #endif
index 340c580f8363e28684e88978d20e63d4be5cb775..bc0f55d0cc087d09426549139b92ba48d055afd0 100644 (file)
@@ -6553,12 +6553,11 @@ out:
 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
                          gfp_t gfp_mask, struct mem_cgroup **memcgp)
 {
-       struct mem_cgroup *memcg;
        int ret;
 
        ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp);
-       memcg = *memcgp;
-       mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
+       if (*memcgp)
+               cgroup_throttle_swaprate(page, gfp_mask);
        return ret;
 }
 
index 40a80617cb037866352eea179366f0f218ca5d3f..1829fc4b3ca2cb8e38d53d45db767fa939b0672c 100644 (file)
@@ -3798,11 +3798,12 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
 }
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
-                                 gfp_t gfp_mask)
+void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
 {
        struct swap_info_struct *si, *next;
-       if (!(gfp_mask & __GFP_IO) || !memcg)
+       int nid = page_to_nid(page);
+
+       if (!(gfp_mask & __GFP_IO))
                return;
 
        if (!blk_cgroup_congested())
@@ -3816,11 +3817,10 @@ void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
                return;
 
        spin_lock(&swap_avail_lock);
-       plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
-                                 avail_lists[node]) {
+       plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
+                                 avail_lists[nid]) {
                if (si->bdev) {
-                       blkcg_schedule_throttle(bdev_get_queue(si->bdev),
-                                               true);
+                       blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
                        break;
                }
        }