mm/page_alloc: batch the accounting updates in the bulk allocator
authorMel Gorman <mgorman@techsingularity.net>
Tue, 29 Jun 2021 02:41:50 +0000 (19:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jun 2021 17:53:54 +0000 (10:53 -0700)
Now that the zone_statistics are simple counters that do not require
special protection, the bulk allocator accounting updates can be batch
updated without adding too much complexity with protected RMW updates or
using xchg.

Link: https://lkml.kernel.org/r/20210512095458.30632-6-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmstat.h
mm/page_alloc.c

index fe32a2210e73af249a4d07a3e4397be001c5ca6e..d6a6cf53b1278f40bfe6cdeab9c7e30b75f406fc 100644 (file)
@@ -247,6 +247,14 @@ __count_numa_event(struct zone *zone, enum numa_stat_item item)
        raw_cpu_inc(pzstats->vm_numa_event[item]);
 }
 
+static inline void
+__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
+{
+       struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+       raw_cpu_add(pzstats->vm_numa_event[item], delta);
+}
+
 extern unsigned long sum_zone_node_page_state(int node,
                                              enum zone_stat_item item);
 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
index 4e03109bdae56485724c0d201bd3077c973740bb..6bb9b87cf7d5d60cdeed15682716758da3ec447b 100644 (file)
@@ -3467,7 +3467,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
  *
  * Must be called with interrupts disabled.
  */
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
+                                  long nr_account)
 {
 #ifdef CONFIG_NUMA
        enum numa_stat_item local_stat = NUMA_LOCAL;
@@ -3480,12 +3481,12 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
                local_stat = NUMA_OTHER;
 
        if (zone_to_nid(z) == zone_to_nid(preferred_zone))
-               __count_numa_event(z, NUMA_HIT);
+               __count_numa_events(z, NUMA_HIT, nr_account);
        else {
-               __count_numa_event(z, NUMA_MISS);
-               __count_numa_event(preferred_zone, NUMA_FOREIGN);
+               __count_numa_events(z, NUMA_MISS, nr_account);
+               __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
        }
-       __count_numa_event(z, local_stat);
+       __count_numa_events(z, local_stat, nr_account);
 #endif
 }
 
@@ -3531,7 +3532,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
        if (page) {
                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
-               zone_statistics(preferred_zone, zone);
+               zone_statistics(preferred_zone, zone, 1);
        }
        local_unlock_irqrestore(&pagesets.lock, flags);
        return page;
@@ -3592,7 +3593,7 @@ struct page *rmqueue(struct zone *preferred_zone,
                                  get_pcppage_migratetype(page));
 
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
-       zone_statistics(preferred_zone, zone);
+       zone_statistics(preferred_zone, zone, 1);
        local_irq_restore(flags);
 
 out:
@@ -5077,7 +5078,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
        struct alloc_context ac;
        gfp_t alloc_gfp;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
-       int nr_populated = 0;
+       int nr_populated = 0, nr_account = 0;
 
        if (unlikely(nr_pages <= 0))
                return 0;
@@ -5154,15 +5155,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
                                goto failed_irq;
                        break;
                }
-
-               /*
-                * Ideally this would be batched but the best way to do
-                * that cheaply is to first convert zone_statistics to
-                * be inaccurate per-cpu counter like vm_events to avoid
-                * a RMW cycle then do the accounting with IRQs enabled.
-                */
-               __count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
-               zone_statistics(ac.preferred_zoneref->zone, zone);
+               nr_account++;
 
                prep_new_page(page, 0, gfp, 0);
                if (page_list)
@@ -5172,6 +5165,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
                nr_populated++;
        }
 
+       __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
+       zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+
        local_unlock_irqrestore(&pagesets.lock, flags);
 
        return nr_populated;