ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
 }
 
+static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
+{
+       dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+                        dma_unmap_len(buf, len), DMA_TO_DEVICE);
+
+       dma_unmap_len_set(buf, len, 0);
+}
+
 /**
  * igc_clean_tx_ring - Free Tx Buffers
  * @tx_ring: ring to be cleaned
                else
                        dev_kfree_skb_any(tx_buffer->skb);
 
-               /* unmap skb header data */
-               dma_unmap_single(tx_ring->dev,
-                                dma_unmap_addr(tx_buffer, dma),
-                                dma_unmap_len(tx_buffer, len),
-                                DMA_TO_DEVICE);
+               igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 
                /* check for eop_desc to determine the end of the packet */
                eop_desc = tx_buffer->next_to_watch;
 
                        /* unmap any remaining paged data */
                        if (dma_unmap_len(tx_buffer, len))
-                               dma_unmap_page(tx_ring->dev,
-                                              dma_unmap_addr(tx_buffer, dma),
-                                              dma_unmap_len(tx_buffer, len),
-                                              DMA_TO_DEVICE);
+                               igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
                }
 
                /* move us one more past the eop_desc for start of next pkt */
        /* clear dma mappings for failed tx_buffer_info map */
        while (tx_buffer != first) {
                if (dma_unmap_len(tx_buffer, len))
-                       dma_unmap_page(tx_ring->dev,
-                                      dma_unmap_addr(tx_buffer, dma),
-                                      dma_unmap_len(tx_buffer, len),
-                                      DMA_TO_DEVICE);
-               dma_unmap_len_set(tx_buffer, len, 0);
+                       igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 
                if (i-- == 0)
                        i += tx_ring->count;
        }
 
        if (dma_unmap_len(tx_buffer, len))
-               dma_unmap_single(tx_ring->dev,
-                                dma_unmap_addr(tx_buffer, dma),
-                                dma_unmap_len(tx_buffer, len),
-                                DMA_TO_DEVICE);
-       dma_unmap_len_set(tx_buffer, len, 0);
+               igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 
        dev_kfree_skb_any(tx_buffer->skb);
        tx_buffer->skb = NULL;
                else
                        napi_consume_skb(tx_buffer->skb, napi_budget);
 
-               /* unmap skb header data */
-               dma_unmap_single(tx_ring->dev,
-                                dma_unmap_addr(tx_buffer, dma),
-                                dma_unmap_len(tx_buffer, len),
-                                DMA_TO_DEVICE);
-
-               /* clear tx_buffer data */
-               dma_unmap_len_set(tx_buffer, len, 0);
+               igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 
                /* clear last DMA location and unmap remaining buffers */
                while (tx_desc != eop_desc) {
                        }
 
                        /* unmap any remaining paged data */
-                       if (dma_unmap_len(tx_buffer, len)) {
-                               dma_unmap_page(tx_ring->dev,
-                                              dma_unmap_addr(tx_buffer, dma),
-                                              dma_unmap_len(tx_buffer, len),
-                                              DMA_TO_DEVICE);
-                               dma_unmap_len_set(tx_buffer, len, 0);
-                       }
+                       if (dma_unmap_len(tx_buffer, len))
+                               igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
                }
 
                /* move us one more past the eop_desc for start of next pkt */