/*********************************
 * statistics
 **********************************/
-/* Total bytes used by the compressed storage */
-u64 zswap_pool_total_size;
 /* The number of compressed pages currently stored in zswap */
 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 /* The number of same-value filled pages currently stored in zswap */
        pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
                 zpool_get_type((p)->zpools[0]))
 
-static bool zswap_is_full(void)
-{
-       return totalram_pages() * zswap_max_pool_percent / 100 <
-                       DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
-}
-
-static bool zswap_can_accept(void)
-{
-       return totalram_pages() * zswap_accept_thr_percent / 100 *
-                               zswap_max_pool_percent / 100 >
-                       DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
-}
-
-static u64 get_zswap_pool_size(struct zswap_pool *pool)
-{
-       u64 pool_size = 0;
-       int i;
-
-       for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
-               pool_size += zpool_get_total_size(pool->zpools[i]);
-
-       return pool_size;
-}
-
-static void zswap_update_total_size(void)
-{
-       struct zswap_pool *pool;
-       u64 total = 0;
-
-       rcu_read_lock();
-
-       list_for_each_entry_rcu(pool, &zswap_pools, list)
-               total += get_zswap_pool_size(pool);
-
-       rcu_read_unlock();
-
-       zswap_pool_total_size = total;
-}
-
 /*********************************
 * pool functions
 **********************************/
        return NULL;
 }
 
+static unsigned long zswap_max_pages(void)
+{
+       return totalram_pages() * zswap_max_pool_percent / 100;
+}
+
+static unsigned long zswap_accept_thr_pages(void)
+{
+       return zswap_max_pages() * zswap_accept_thr_percent / 100;
+}
+
+unsigned long zswap_total_pages(void)
+{
+       struct zswap_pool *pool;
+       u64 total = 0;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(pool, &zswap_pools, list) {
+               int i;
+
+               for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
+                       total += zpool_get_total_size(pool->zpools[i]);
+       }
+       rcu_read_unlock();
+
+       return total >> PAGE_SHIFT;
+}
+
 /*********************************
 * param callbacks
 **********************************/
        }
        zswap_entry_cache_free(entry);
        atomic_dec(&zswap_stored_pages);
-       zswap_update_total_size();
 }
 
 /*
                nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
                nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
        } else {
-               nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
+               nr_backing = zswap_total_pages();
                nr_stored = atomic_read(&zswap_nr_stored);
        }
 
 {
        struct mem_cgroup *memcg;
        int ret, failures = 0;
+       unsigned long thr;
+
+       /* Reclaim down to the accept threshold */
+       thr = zswap_accept_thr_pages();
 
        /* global reclaim will select cgroup in a round-robin fashion. */
        do {
                        break;
                if (ret && ++failures == MAX_RECLAIM_RETRIES)
                        break;
-
 resched:
                cond_resched();
-       } while (!zswap_can_accept());
+       } while (zswap_total_pages() > thr);
 }
 
 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
        struct zswap_entry *entry, *dupentry;
        struct obj_cgroup *objcg = NULL;
        struct mem_cgroup *memcg = NULL;
+       unsigned long max_pages, cur_pages;
 
        VM_WARN_ON_ONCE(!folio_test_locked(folio));
        VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
        if (!zswap_enabled)
                goto check_old;
 
+       /* Check cgroup limits */
        objcg = get_obj_cgroup_from_folio(folio);
        if (objcg && !obj_cgroup_may_zswap(objcg)) {
                memcg = get_mem_cgroup_from_objcg(objcg);
                mem_cgroup_put(memcg);
        }
 
-       /* reclaim space if needed */
-       if (zswap_is_full()) {
+       /* Check global limits */
+       cur_pages = zswap_total_pages();
+       max_pages = zswap_max_pages();
+
+       if (cur_pages >= max_pages) {
                zswap_pool_limit_hit++;
                zswap_pool_reached_full = true;
                goto shrink;
        }
 
        if (zswap_pool_reached_full) {
-              if (!zswap_can_accept())
+               if (cur_pages > zswap_accept_thr_pages())
                        goto shrink;
                else
                        zswap_pool_reached_full = false;
 
        /* update stats */
        atomic_inc(&zswap_stored_pages);
-       zswap_update_total_size();
        count_vm_event(ZSWPOUT);
 
        return true;
 
 static struct dentry *zswap_debugfs_root;
 
+static int debugfs_get_total_size(void *data, u64 *val)
+{
+       *val = zswap_total_pages() * PAGE_SIZE;
+       return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
+
 static int zswap_debugfs_init(void)
 {
        if (!debugfs_initialized())
                           zswap_debugfs_root, &zswap_reject_compress_poor);
        debugfs_create_u64("written_back_pages", 0444,
                           zswap_debugfs_root, &zswap_written_back_pages);
-       debugfs_create_u64("pool_total_size", 0444,
-                          zswap_debugfs_root, &zswap_pool_total_size);
+       debugfs_create_file("pool_total_size", 0444,
+                           zswap_debugfs_root, NULL, &total_size_fops);
        debugfs_create_atomic_t("stored_pages", 0444,
                                zswap_debugfs_root, &zswap_stored_pages);
        debugfs_create_atomic_t("same_filled_pages", 0444,