{
        struct device *dev = mlx5_core_dma_dev(priv->mdev);
 
-       return xsk_pool_dma_map(pool, dev, 0);
+       return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
                                 struct xsk_buff_pool *pool)
 {
-       return xsk_pool_dma_unmap(pool, 0);
+       return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
 
        if (unlikely(!dma_info->page))
                return -ENOMEM;
 
-       dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
-                                     PAGE_SIZE, rq->buff.map_dir);
+       dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
+                                           rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
                page_pool_recycle_direct(rq->page_pool, dma_info->page);
                dma_info->page = NULL;
 
 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
 {
-       dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
+       dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+                            DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,