{
        struct xe_device *xe = to_xe_device(dev);
 
+       if (xe->preempt_fence_wq)
+               destroy_workqueue(xe->preempt_fence_wq);
+
        if (xe->ordered_wq)
                destroy_workqueue(xe->ordered_wq);
 
        INIT_LIST_HEAD(&xe->pinned.external_vram);
        INIT_LIST_HEAD(&xe->pinned.evicted);
 
+       xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
        xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
        xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
-       if (!xe->ordered_wq || !xe->unordered_wq) {
+       if (!xe->ordered_wq || !xe->unordered_wq ||
+           !xe->preempt_fence_wq) {
+               /*
+                * Cleanup done in xe_device_destroy via
+                * drmm_add_action_or_reset register above
+                */
                drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
                err = -ENOMEM;
                goto err;
 
        /** @ufence_wq: user fence wait queue */
        wait_queue_head_t ufence_wq;
 
+       /** @preempt_fence_wq: used to serialize preempt fences */
+       struct workqueue_struct *preempt_fence_wq;
+
        /** @ordered_wq: used to serialize compute mode resume */
        struct workqueue_struct *ordered_wq;
 
 
        struct xe_exec_queue *q = pfence->q;
 
        pfence->error = q->ops->suspend(q);
-       queue_work(system_unbound_wq, &pfence->preempt_work);
+       queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
        return true;
 }