/* Release the page table from the device */
        writeq(0, slot);
-       /* Force sync around the address release. */
-       mb();
 
        if (pte->dma_addr)
                dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
                                        (void *)page_to_phys(page));
                                return -1;
                        }
-                       /* Wait until the page is mapped. */
-                       mb();
                }
 
                /* Make the DMA-space address available to the device. */
         */
        for (i = 0; i < num_pages; i++) {
                /* release the address from the device, */
-               if (is_simple_mapping || ptes[i].status == PTE_INUSE)
+               if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
                        writeq(0, &slots[i]);
-               else
+               } else {
                        ((u64 __force *)slots)[i] = 0;
-               /* Force sync around the address release. */
-               mb();
+                       /* sync above PTE update before updating mappings */
+                       wmb();
+               }
 
                /* release the address from the driver, */
                if (ptes[i].status == PTE_INUSE) {
        /* Map the page into DMA space. */
        pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
                                     DMA_BIDIRECTIONAL);
-       /* Wait until the page is mapped. */
-       mb();
 
        /* make the addresses available to the device */
        dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;