struct xe_device *xe = xe_bo_device(bo);
        struct xe_ttm_tt *tt;
        unsigned long extra_pages;
+       enum ttm_caching caching = ttm_cached;
        int err;
 
        tt = kzalloc(sizeof(*tt), GFP_KERNEL);
                extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
                                           PAGE_SIZE);
 
-       /* TODO: Select caching mode */
-       err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags,
-                         bo->flags & XE_BO_SCANOUT_BIT ? ttm_write_combined : ttm_cached,
-                         extra_pages);
+       /*
+        * Display scanout is always non-coherent with the CPU cache.
+        *
+        * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
+        * require a CPU:WC mapping.
+        */
+       if (bo->flags & XE_BO_SCANOUT_BIT ||
+           (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+               caching = ttm_write_combined;
+
+       err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
        if (err) {
                kfree(tt);
                return NULL;
 
 #define XE_BO_DEFER_BACKING            BIT(9)
 #define XE_BO_SCANOUT_BIT              BIT(10)
 #define XE_BO_FIXED_PLACEMENT_BIT      BIT(11)
+#define XE_BO_PAGETABLE                        BIT(12)
 /* this one is trigger internally only */
 #define XE_BO_INTERNAL_TEST            BIT(30)
 #define XE_BO_INTERNAL_64K             BIT(31)
 
                                  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
                                  XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
                                  XE_BO_CREATE_PINNED_BIT |
-                                 XE_BO_CREATE_NO_RESV_EVICT);
+                                 XE_BO_CREATE_NO_RESV_EVICT |
+                                 XE_BO_PAGETABLE);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                goto err_kfree;