In a few places we always end up mapping the pool object with the FORCE
constraint(to prevent hitting -EBUSY) which will destroy the cached
mapping if it has a different type. As a simple first step, make the
mapping type part of the pool interface, where the behaviour is to only
give out pool objects which match the requested mapping type.
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119133106.66294-4-matthew.auld@intel.com
 
        int err;
 
        if (!pool) {
-               pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+               pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+                                               cache->has_llc ?
+                                               I915_MAP_WB :
+                                               I915_MAP_WC);
                if (IS_ERR(pool))
                        return PTR_ERR(pool);
        }
        if (err)
                goto err_pool;
 
-       cmd = i915_gem_object_pin_map(pool->obj,
-                                     cache->has_llc ?
-                                     I915_MAP_FORCE_WB :
-                                     I915_MAP_FORCE_WC);
+       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
        if (IS_ERR(cmd)) {
                err = PTR_ERR(cmd);
                goto err_pool;
                return -EINVAL;
 
        if (!pool) {
-               pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+               pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+                                               I915_MAP_WB);
                if (IS_ERR(pool))
                        return PTR_ERR(pool);
                eb->batch_pool = pool;
 
        count = div_u64(round_up(vma->size, block_size), block_size);
        size = (1 + 8 * count) * sizeof(u32);
        size = round_up(size, PAGE_SIZE);
-       pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+       pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
        if (IS_ERR(pool)) {
                err = PTR_ERR(pool);
                goto out_pm;
        if (unlikely(err))
                goto out_put;
 
-       cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
        if (IS_ERR(cmd)) {
                err = PTR_ERR(cmd);
                goto out_unpin;
        count = div_u64(round_up(dst->size, block_size), block_size);
        size = (1 + 11 * count) * sizeof(u32);
        size = round_up(size, PAGE_SIZE);
-       pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+       pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
        if (IS_ERR(pool)) {
                err = PTR_ERR(pool);
                goto out_pm;
        if (unlikely(err))
                goto out_put;
 
-       cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
        if (IS_ERR(cmd)) {
                err = PTR_ERR(cmd);
                goto out_unpin;
 
 }
 
 static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+           enum i915_map_type type)
 {
        struct intel_gt *gt = to_gt(pool);
        struct intel_gt_buffer_pool_node *node;
 
        i915_gem_object_set_readonly(obj);
 
+       node->type = type;
        node->obj = obj;
        return node;
 }
 
 struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+                        enum i915_map_type type)
 {
        struct intel_gt_buffer_pool *pool = >->buffer_pool;
        struct intel_gt_buffer_pool_node *node;
                if (node->obj->base.size < size)
                        continue;
 
+               if (node->type != type)
+                       continue;
+
                age = READ_ONCE(node->age);
                if (!age)
                        continue;
        rcu_read_unlock();
 
        if (&node->link == list) {
-               node = node_create(pool, size);
+               node = node_create(pool, size, type);
                if (IS_ERR(node))
                        return node;
        }
 
 struct i915_request;
 
 struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+                        enum i915_map_type type);
 
 static inline int
 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
 
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active_types.h"
 
-struct drm_i915_gem_object;
-
 struct intel_gt_buffer_pool {
        spinlock_t lock;
        struct list_head cache_list[4];
                struct rcu_head rcu;
        };
        unsigned long age;
+       enum i915_map_type type;
 };
 
 #endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
 
        void *dst, *src;
        int ret;
 
-       dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+       dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
        if (IS_ERR(dst))
                return dst;