panfrost_device.o \
        panfrost_devfreq.o \
        panfrost_gem.o \
+       panfrost_gem_shrinker.o \
        panfrost_gpu.o \
        panfrost_job.o \
        panfrost_mmu.o \
 
 
 - Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
 
-- Support for madvise and a shrinker.
-
 - Compute job support. So called 'compute only' jobs need to be plumbed up to
   userspace.
 
        struct mutex sched_lock;
        struct mutex reset_lock;
 
+       struct mutex shrinker_lock;
+       struct list_head shrinker_list;
+       struct shrinker shrinker;
+
        struct {
                struct devfreq *devfreq;
                struct thermal_cooling_device *cooling;
 
        return 0;
 }
 
+static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
+{
+       struct drm_panfrost_madvise *args = data;
+       struct panfrost_device *pfdev = dev->dev_private;
+       struct drm_gem_object *gem_obj;
+
+       gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+       if (!gem_obj) {
+               DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+               return -ENOENT;
+       }
+
+       args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+       if (args->retained) {
+               struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+
+               mutex_lock(&pfdev->shrinker_lock);
+
+               if (args->madv == PANFROST_MADV_DONTNEED)
+                       list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+               else if (args->madv == PANFROST_MADV_WILLNEED)
+                       list_del_init(&bo->base.madv_list);
+
+               mutex_unlock(&pfdev->shrinker_lock);
+       }
+
+       drm_gem_object_put_unlocked(gem_obj);
+       return 0;
+}
+
 int panfrost_unstable_ioctl_check(void)
 {
        if (!unstable_ioctls)
        PANFROST_IOCTL(GET_BO_OFFSET,   get_bo_offset,  DRM_RENDER_ALLOW),
        PANFROST_IOCTL(PERFCNT_ENABLE,  perfcnt_enable, DRM_RENDER_ALLOW),
        PANFROST_IOCTL(PERFCNT_DUMP,    perfcnt_dump,   DRM_RENDER_ALLOW),
+       PANFROST_IOCTL(MADVISE,         madvise,        DRM_RENDER_ALLOW),
 };
 
 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
        pfdev->ddev = ddev;
 
        spin_lock_init(&pfdev->mm_lock);
+       mutex_init(&pfdev->shrinker_lock);
+       INIT_LIST_HEAD(&pfdev->shrinker_list);
 
        /* 4G enough for now. can be 48-bit */
        drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
        if (err < 0)
                goto err_out1;
 
+       panfrost_gem_shrinker_init(ddev);
+
        return 0;
 
 err_out1:
        struct drm_device *ddev = pfdev->ddev;
 
        drm_dev_unregister(ddev);
+       panfrost_gem_shrinker_cleanup(ddev);
        pm_runtime_get_sync(pfdev->dev);
        pm_runtime_put_sync_autosuspend(pfdev->dev);
        pm_runtime_disable(pfdev->dev);
 
        drm_mm_remove_node(&bo->node);
        spin_unlock(&pfdev->mm_lock);
 
+       mutex_lock(&pfdev->shrinker_lock);
+       if (!list_empty(&bo->base.madv_list))
+               list_del(&bo->base.madv_list);
+       mutex_unlock(&pfdev->shrinker_lock);
+
        drm_gem_shmem_free_object(obj);
 }
 
 
                                   struct dma_buf_attachment *attach,
                                   struct sg_table *sgt);
 
+void panfrost_gem_shrinker_init(struct drm_device *dev);
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
 #endif /* __PANFROST_GEM_H__ */
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd.
+ *
+ * Based on msm_gem_freedreno.c:
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/list.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+static unsigned long
+panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct panfrost_device *pfdev =
+               container_of(shrinker, struct panfrost_device, shrinker);
+       struct drm_gem_shmem_object *shmem;
+       unsigned long count = 0;
+
+       if (!mutex_trylock(&pfdev->shrinker_lock))
+               return 0;
+
+       list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
+               if (drm_gem_shmem_is_purgeable(shmem))
+                       count += shmem->base.size >> PAGE_SHIFT;
+       }
+
+       mutex_unlock(&pfdev->shrinker_lock);
+
+       return count;
+}
+
+static void panfrost_gem_purge(struct drm_gem_object *obj)
+{
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       mutex_lock(&shmem->pages_lock);
+
+       panfrost_mmu_unmap(to_panfrost_bo(obj));
+       drm_gem_shmem_purge_locked(obj);
+
+       mutex_unlock(&shmem->pages_lock);
+}
+
+static unsigned long
+panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct panfrost_device *pfdev =
+               container_of(shrinker, struct panfrost_device, shrinker);
+       struct drm_gem_shmem_object *shmem, *tmp;
+       unsigned long freed = 0;
+
+       if (!mutex_trylock(&pfdev->shrinker_lock))
+               return SHRINK_STOP;
+
+       list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
+               if (freed >= sc->nr_to_scan)
+                       break;
+               if (drm_gem_shmem_is_purgeable(shmem)) {
+                       panfrost_gem_purge(&shmem->base);
+                       freed += shmem->base.size >> PAGE_SHIFT;
+                       list_del_init(&shmem->madv_list);
+               }
+       }
+
+       mutex_unlock(&pfdev->shrinker_lock);
+
+       if (freed > 0)
+               pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+       return freed;
+}
+
+/**
+ * panfrost_gem_shrinker_init - Initialize panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function registers and sets up the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_init(struct drm_device *dev)
+{
+       struct panfrost_device *pfdev = dev->dev_private;
+       pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
+       pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
+       pfdev->shrinker.seeks = DEFAULT_SEEKS;
+       WARN_ON(register_shrinker(&pfdev->shrinker));
+}
+
+/**
+ * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function unregisters the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
+{
+       struct panfrost_device *pfdev = dev->dev_private;
+
+       if (pfdev->shrinker.nr_deferred) {
+               unregister_shrinker(&pfdev->shrinker);
+       }
+}
 
 #define DRM_PANFROST_GET_BO_OFFSET             0x05
 #define DRM_PANFROST_PERFCNT_ENABLE            0x06
 #define DRM_PANFROST_PERFCNT_DUMP              0x07
+#define DRM_PANFROST_MADVISE                   0x08
 
 #define DRM_IOCTL_PANFROST_SUBMIT              DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
 #define DRM_IOCTL_PANFROST_WAIT_BO             DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
 #define DRM_IOCTL_PANFROST_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
 #define DRM_IOCTL_PANFROST_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
 #define DRM_IOCTL_PANFROST_GET_BO_OFFSET       DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+#define DRM_IOCTL_PANFROST_MADVISE             DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
 
 /*
  * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
        __u64 buf_ptr;
 };
 
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define PANFROST_MADV_WILLNEED 0       /* backing pages are needed, status returned in 'retained' */
+#define PANFROST_MADV_DONTNEED 1       /* backing pages not needed */
+
+struct drm_panfrost_madvise {
+       __u32 handle;         /* in, GEM handle */
+       __u32 madv;           /* in, PANFROST_MADV_x */
+       __u32 retained;       /* out, whether backing store still exists */
+};
+
 #if defined(__cplusplus)
 }
 #endif