From: Kirill A. Shutemov Date: Wed, 12 Dec 2012 21:51:09 +0000 (-0800) Subject: thp, vmstat: implement HZP_ALLOC and HZP_ALLOC_FAILED events X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=d8a8e1f0da3d29d7268b3300c96a059d63901b76;p=linux.git thp, vmstat: implement HZP_ALLOC and HZP_ALLOC_FAILED events hzp_alloc is incremented every time a huge zero page is successfully allocated. It includes allocations which where dropped due race with other allocation. Note, it doesn't count every map of the huge zero page, only its allocation. hzp_alloc_failed is incremented if kernel fails to allocate huge zero page and falls back to using small pages. Signed-off-by: Kirill A. Shutemov Cc: Andrea Arcangeli Cc: Andi Kleen Cc: "H. Peter Anvin" Cc: Mel Gorman Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt index 8f5b41db314cf..60aeedd546153 100644 --- a/Documentation/vm/transhuge.txt +++ b/Documentation/vm/transhuge.txt @@ -197,6 +197,14 @@ thp_split is incremented every time a huge page is split into base pages. This can happen for a variety of reasons but a common reason is that a huge page is old and is being reclaimed. +thp_zero_page_alloc is incremented every time a huge zero page is + successfully allocated. It includes allocations which where + dropped due race with other allocation. Note, it doesn't count + every map of the huge zero page, only its allocation. + +thp_zero_page_alloc_failed is incremented if kernel fails to allocate + huge zero page and falls back to using small pages. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in /proc/vmstat to help diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 3d31145943701..fe786f07d2bd9 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -58,6 +58,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, THP_SPLIT, + THP_ZERO_PAGE_ALLOC, + THP_ZERO_PAGE_ALLOC_FAILED, #endif NR_VM_EVENT_ITEMS }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d89220cb1d9fc..9a5d45dfad44a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -184,8 +184,11 @@ retry: zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, HPAGE_PMD_ORDER); - if (!zero_page) + if (!zero_page) { + count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); return 0; + } + count_vm_event(THP_ZERO_PAGE_ALLOC); preempt_disable(); if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) { preempt_enable(); diff --git a/mm/vmstat.c b/mm/vmstat.c index c7370579111b8..5da4b19023c30 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -801,6 +801,8 @@ const char * const vmstat_text[] = { "thp_collapse_alloc", "thp_collapse_alloc_failed", "thp_split", + "thp_zero_page_alloc", + "thp_zero_page_alloc_failed", #endif #endif /* CONFIG_VM_EVENTS_COUNTERS */