}
 
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
-                               struct efx_rx_buffer *rx_buf)
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int used_len)
 {
        if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
                struct efx_rx_page_state *state;
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       DMA_FROM_DEVICE);
+               } else if (used_len) {
+                       dma_sync_single_for_cpu(&efx->pci_dev->dev,
+                                               rx_buf->dma_addr, used_len,
+                                               DMA_FROM_DEVICE);
                }
        } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
                dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
                               struct efx_rx_buffer *rx_buf)
 {
-       efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+       efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
        efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 
                goto out;
        }
 
-       /* Release card resources - assumes all RX buffers consumed in-order
-        * per RX queue
+       /* Release and/or sync DMA mapping - assumes all RX buffers
+        * consumed in-order per RX queue
         */
-       efx_unmap_rx_buffer(efx, rx_buf);
+       efx_unmap_rx_buffer(efx, rx_buf, len);
 
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.