PROC_TGID_STAT,
        PROC_TGID_STATM,
        PROC_TGID_MAPS,
+       PROC_TGID_NUMA_MAPS,
        PROC_TGID_MOUNTS,
        PROC_TGID_WCHAN,
 #ifdef CONFIG_SCHEDSTATS
        PROC_TID_STAT,
        PROC_TID_STATM,
        PROC_TID_MAPS,
+       PROC_TID_NUMA_MAPS,
        PROC_TID_MOUNTS,
        PROC_TID_WCHAN,
 #ifdef CONFIG_SCHEDSTATS
        E(PROC_TGID_STAT,      "stat",    S_IFREG|S_IRUGO),
        E(PROC_TGID_STATM,     "statm",   S_IFREG|S_IRUGO),
        E(PROC_TGID_MAPS,      "maps",    S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+       E(PROC_TGID_NUMA_MAPS, "numa_maps", S_IFREG|S_IRUGO),
+#endif
        E(PROC_TGID_MEM,       "mem",     S_IFREG|S_IRUSR|S_IWUSR),
 #ifdef CONFIG_SECCOMP
        E(PROC_TGID_SECCOMP,   "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
        E(PROC_TID_STAT,       "stat",    S_IFREG|S_IRUGO),
        E(PROC_TID_STATM,      "statm",   S_IFREG|S_IRUGO),
        E(PROC_TID_MAPS,       "maps",    S_IFREG|S_IRUGO),
+#ifdef CONFIG_NUMA
+       E(PROC_TID_NUMA_MAPS,  "numa_maps",    S_IFREG|S_IRUGO),
+#endif
        E(PROC_TID_MEM,        "mem",     S_IFREG|S_IRUSR|S_IWUSR),
 #ifdef CONFIG_SECCOMP
        E(PROC_TID_SECCOMP,    "seccomp", S_IFREG|S_IRUSR|S_IWUSR),
        .release        = seq_release,
 };
 
+#ifdef CONFIG_NUMA
+extern struct seq_operations proc_pid_numa_maps_op;
+static int numa_maps_open(struct inode *inode, struct file *file)
+{
+       struct task_struct *task = proc_task(inode);
+       int ret = seq_open(file, &proc_pid_numa_maps_op);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+               m->private = task;
+       }
+       return ret;
+}
+
+static struct file_operations proc_numa_maps_operations = {
+       .open           = numa_maps_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+#endif
+
 extern struct seq_operations mounts_op;
 static int mounts_open(struct inode *inode, struct file *file)
 {
                case PROC_TGID_MAPS:
                        inode->i_fop = &proc_maps_operations;
                        break;
+#ifdef CONFIG_NUMA
+               case PROC_TID_NUMA_MAPS:
+               case PROC_TGID_NUMA_MAPS:
+                       inode->i_fop = &proc_numa_maps_operations;
+                       break;
+#endif
                case PROC_TID_MEM:
                case PROC_TGID_MEM:
                        inode->i_op = &proc_mem_inode_operations;
 
 #include <linux/hugetlb.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/mempolicy.h>
 #include <asm/elf.h>
 #include <asm/uaccess.h>
 #include "internal.h"
        .stop   = m_stop,
        .show   = show_map
 };
+
+#ifdef CONFIG_NUMA
+
+struct numa_maps {
+       unsigned long pages;
+       unsigned long anon;
+       unsigned long mapped;
+       unsigned long mapcount_max;
+       unsigned long node[MAX_NUMNODES];
+};
+
+/*
+ * Calculate numa node maps for a vma
+ */
+static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
+{
+       struct page *page;
+       unsigned long vaddr;
+       struct mm_struct *mm = vma->vm_mm;
+       int i;
+       struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
+
+       if (!md)
+               return NULL;
+       md->pages = 0;
+       md->anon = 0;
+       md->mapped = 0;
+       md->mapcount_max = 0;
+       for_each_node(i)
+               md->node[i] =0;
+
+       spin_lock(&mm->page_table_lock);
+       for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
+               page = follow_page(mm, vaddr, 0);
+               if (page) {
+                       int count = page_mapcount(page);
+
+                       if (count)
+                               md->mapped++;
+                       if (count > md->mapcount_max)
+                               md->mapcount_max = count;
+                       md->pages++;
+                       if (PageAnon(page))
+                               md->anon++;
+                       md->node[page_to_nid(page)]++;
+               }
+       }
+       spin_unlock(&mm->page_table_lock);
+       return md;
+}
+
+static int show_numa_map(struct seq_file *m, void *v)
+{
+       struct task_struct *task = m->private;
+       struct vm_area_struct *vma = v;
+       struct mempolicy *pol;
+       struct numa_maps *md;
+       struct zone **z;
+       int n;
+       int first;
+
+       if (!vma->vm_mm)
+               return 0;
+
+       md = get_numa_maps(vma);
+       if (!md)
+               return 0;
+
+       seq_printf(m, "%08lx", vma->vm_start);
+       pol = get_vma_policy(task, vma, vma->vm_start);
+       /* Print policy */
+       switch (pol->policy) {
+       case MPOL_PREFERRED:
+               seq_printf(m, " prefer=%d", pol->v.preferred_node);
+               break;
+       case MPOL_BIND:
+               seq_printf(m, " bind={");
+               first = 1;
+               for (z = pol->v.zonelist->zones; *z; z++) {
+
+                       if (!first)
+                               seq_putc(m, ',');
+                       else
+                               first = 0;
+                       seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
+                                       (*z)->name);
+               }
+               seq_putc(m, '}');
+               break;
+       case MPOL_INTERLEAVE:
+               seq_printf(m, " interleave={");
+               first = 1;
+               for_each_node(n) {
+                       if (test_bit(n, pol->v.nodes)) {
+                               if (!first)
+                                       seq_putc(m,',');
+                               else
+                                       first = 0;
+                               seq_printf(m, "%d",n);
+                       }
+               }
+               seq_putc(m, '}');
+               break;
+       default:
+               seq_printf(m," default");
+               break;
+       }
+       seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
+                       md->mapcount_max, md->pages, md->mapped);
+       if (md->anon)
+               seq_printf(m," Anon=%lu",md->anon);
+
+       for_each_online_node(n) {
+               if (md->node[n])
+                       seq_printf(m, " N%d=%lu", n, md->node[n]);
+       }
+       seq_putc(m, '\n');
+       kfree(md);
+       if (m->count < m->size)  /* vma is copied successfully */
+               m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+       return 0;
+}
+
+struct seq_operations proc_pid_numa_maps_op = {
+       .start  = m_start,
+       .next   = m_next,
+       .stop   = m_stop,
+       .show   = show_numa_map
+};
+#endif
 
 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
                                            unsigned long idx);
 
+struct mempolicy *get_vma_policy(struct task_struct *task,
+                       struct vm_area_struct *vma, unsigned long addr);
+
 extern void numa_default_policy(void);
 extern void numa_policy_init(void);
 
 
 #endif
 
 /* Return effective policy for a VMA */
-static struct mempolicy *
-get_vma_policy(struct vm_area_struct *vma, unsigned long addr)
+struct mempolicy *
+get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
 {
-       struct mempolicy *pol = current->mempolicy;
+       struct mempolicy *pol = task->mempolicy;
 
        if (vma) {
                if (vma->vm_ops && vma->vm_ops->get_policy)
 struct page *
 alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr)
 {
-       struct mempolicy *pol = get_vma_policy(vma, addr);
+       struct mempolicy *pol = get_vma_policy(current, vma, addr);
 
        cpuset_update_current_mems_allowed();
 
 /* Find first node suitable for an allocation */
 int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
 {
-       struct mempolicy *pol = get_vma_policy(vma, addr);
+       struct mempolicy *pol = get_vma_policy(current, vma, addr);
 
        switch (pol->policy) {
        case MPOL_DEFAULT:
 /* Find secondary valid nodes for an allocation */
 int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
 {
-       struct mempolicy *pol = get_vma_policy(vma, addr);
+       struct mempolicy *pol = get_vma_policy(current, vma, addr);
 
        switch (pol->policy) {
        case MPOL_PREFERRED: