#include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/syscalls.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 
 #include <asm/mman.h>
 
+#include "swap.h"
+
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
        return try_to_free_buffers(folio);
 }
 EXPORT_SYMBOL(filemap_release_folio);
+
+#ifdef CONFIG_CACHESTAT_SYSCALL
+/**
+ * filemap_cachestat() - compute the page cache statistics of a mapping
+ * @mapping:   The mapping to compute the statistics for.
+ * @first_index:       The starting page cache index.
+ * @last_index:        The final page index (inclusive).
+ * @cs:        the cachestat struct to write the result to.
+ *
+ * This will query the page cache statistics of a mapping in the
+ * page range of [first_index, last_index] (inclusive). The statistics
+ * queried include: number of dirty pages, number of pages marked for
+ * writeback, and the number of (recently) evicted pages.
+ */
+static void filemap_cachestat(struct address_space *mapping,
+               pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
+{
+       XA_STATE(xas, &mapping->i_pages, first_index);
+       struct folio *folio;
+
+       rcu_read_lock();
+       xas_for_each(&xas, folio, last_index) {
+               unsigned long nr_pages;
+               pgoff_t folio_first_index, folio_last_index;
+
+               if (xas_retry(&xas, folio))
+                       continue;
+
+               if (xa_is_value(folio)) {
+                       /* page is evicted */
+                       void *shadow = (void *)folio;
+                       bool workingset; /* not used */
+                       int order = xa_get_order(xas.xa, xas.xa_index);
+
+                       nr_pages = 1 << order;
+                       folio_first_index = round_down(xas.xa_index, 1 << order);
+                       folio_last_index = folio_first_index + nr_pages - 1;
+
+                       /* Folios might straddle the range boundaries, only count covered pages */
+                       if (folio_first_index < first_index)
+                               nr_pages -= first_index - folio_first_index;
+
+                       if (folio_last_index > last_index)
+                               nr_pages -= folio_last_index - last_index;
+
+                       cs->nr_evicted += nr_pages;
+
+#ifdef CONFIG_SWAP /* implies CONFIG_MMU */
+                       if (shmem_mapping(mapping)) {
+                               /* shmem file - in swap cache */
+                               swp_entry_t swp = radix_to_swp_entry(folio);
+
+                               shadow = get_shadow_from_swap_cache(swp);
+                       }
+#endif
+                       if (workingset_test_recent(shadow, true, &workingset))
+                               cs->nr_recently_evicted += nr_pages;
+
+                       goto resched;
+               }
+
+               nr_pages = folio_nr_pages(folio);
+               folio_first_index = folio_pgoff(folio);
+               folio_last_index = folio_first_index + nr_pages - 1;
+
+               /* Folios might straddle the range boundaries, only count covered pages */
+               if (folio_first_index < first_index)
+                       nr_pages -= first_index - folio_first_index;
+
+               if (folio_last_index > last_index)
+                       nr_pages -= folio_last_index - last_index;
+
+               /* page is in cache */
+               cs->nr_cache += nr_pages;
+
+               if (folio_test_dirty(folio))
+                       cs->nr_dirty += nr_pages;
+
+               if (folio_test_writeback(folio))
+                       cs->nr_writeback += nr_pages;
+
+resched:
+               if (need_resched()) {
+                       xas_pause(&xas);
+                       cond_resched_rcu();
+               }
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * The cachestat(2) system call.
+ *
+ * cachestat() returns the page cache statistics of a file in the
+ * bytes range specified by `off` and `len`: number of cached pages,
+ * number of dirty pages, number of pages marked for writeback,
+ * number of evicted pages, and number of recently evicted pages.
+ *
+ * An evicted page is a page that is previously in the page cache
+ * but has been evicted since. A page is recently evicted if its last
+ * eviction was recent enough that its reentry to the cache would
+ * indicate that it is actively being used by the system, and that
+ * there is memory pressure on the system.
+ *
+ * `off` and `len` must be non-negative integers. If `len` > 0,
+ * the queried range is [`off`, `off` + `len`]. If `len` == 0,
+ * we will query in the range from `off` to the end of the file.
+ *
+ * The `flags` argument is unused for now, but is included for future
+ * extensibility. User should pass 0 (i.e no flag specified).
+ *
+ * Currently, hugetlbfs is not supported.
+ *
+ * Because the status of a page can change after cachestat() checks it
+ * but before it returns to the application, the returned values may
+ * contain stale information.
+ *
+ * return values:
+ *  zero        - success
+ *  -EFAULT     - cstat or cstat_range points to an illegal address
+ *  -EINVAL     - invalid flags
+ *  -EBADF      - invalid file descriptor
+ *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
+ */
+SYSCALL_DEFINE4(cachestat, unsigned int, fd,
+               struct cachestat_range __user *, cstat_range,
+               struct cachestat __user *, cstat, unsigned int, flags)
+{
+       struct fd f = fdget(fd);
+       struct address_space *mapping;
+       struct cachestat_range csr;
+       struct cachestat cs;
+       pgoff_t first_index, last_index;
+
+       if (!f.file)
+               return -EBADF;
+
+       if (copy_from_user(&csr, cstat_range,
+                       sizeof(struct cachestat_range))) {
+               fdput(f);
+               return -EFAULT;
+       }
+
+       /* hugetlbfs is not supported */
+       if (is_file_hugepages(f.file)) {
+               fdput(f);
+               return -EOPNOTSUPP;
+       }
+
+       if (flags != 0) {
+               fdput(f);
+               return -EINVAL;
+       }
+
+       first_index = csr.off >> PAGE_SHIFT;
+       last_index =
+               csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
+       memset(&cs, 0, sizeof(struct cachestat));
+       mapping = f.file->f_mapping;
+       filemap_cachestat(mapping, first_index, last_index, &cs);
+       fdput(f);
+
+       if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
+               return -EFAULT;
+
+       return 0;
+}
+#endif /* CONFIG_CACHESTAT_SYSCALL */