skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
                                map = pci_map_single(qdev->pdev,
                                                     sbq_desc->p.skb->data,
-                                                    rx_ring->sbq_buf_size,
+                                                    SMALL_BUF_MAP_SIZE,
                                                     PCI_DMA_FROMDEVICE);
                                if (pci_dma_mapping_error(qdev->pdev, map)) {
                                        netif_err(qdev, ifup, qdev->ndev,
 
        pci_dma_sync_single_for_cpu(qdev->pdev,
                                    dma_unmap_addr(sbq_desc, mapaddr),
-                                   rx_ring->sbq_buf_size,
-                                   PCI_DMA_FROMDEVICE);
+                                   SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
 
        skb_put_data(new_skb, skb->data, length);
 
        pci_dma_sync_single_for_device(qdev->pdev,
                                       dma_unmap_addr(sbq_desc, mapaddr),
-                                      rx_ring->sbq_buf_size,
+                                      SMALL_BUF_MAP_SIZE,
                                       PCI_DMA_FROMDEVICE);
        skb = new_skb;
 
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                dma_unmap_addr(sbq_desc, mapaddr),
-                               rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
+                               SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
                skb = sbq_desc->p.skb;
                ql_realign_skb(skb, hdr_len);
                skb_put(skb, hdr_len);
                        pci_dma_sync_single_for_cpu(qdev->pdev,
                                                    dma_unmap_addr(sbq_desc,
                                                                   mapaddr),
-                                                   rx_ring->sbq_buf_size,
+                                                   SMALL_BUF_MAP_SIZE,
                                                    PCI_DMA_FROMDEVICE);
                        skb_put_data(skb, sbq_desc->p.skb->data, length);
                        pci_dma_sync_single_for_device(qdev->pdev,
                                                       dma_unmap_addr(sbq_desc,
                                                                      mapaddr),
-                                                      rx_ring->sbq_buf_size,
+                                                      SMALL_BUF_MAP_SIZE,
                                                       PCI_DMA_FROMDEVICE);
                } else {
                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
                        skb_put(skb, length);
                        pci_unmap_single(qdev->pdev,
                                         dma_unmap_addr(sbq_desc, mapaddr),
-                                        rx_ring->sbq_buf_size,
+                                        SMALL_BUF_MAP_SIZE,
                                         PCI_DMA_FROMDEVICE);
                        sbq_desc->p.skb = NULL;
                }
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                 dma_unmap_addr(sbq_desc, mapaddr),
-                                rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
+                                SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
                        /*
                         * This is an non TCP/UDP IP frame, so
                if (sbq_desc->p.skb) {
                        pci_unmap_single(qdev->pdev,
                                         dma_unmap_addr(sbq_desc, mapaddr),
-                                        rx_ring->sbq_buf_size,
+                                        SMALL_BUF_MAP_SIZE,
                                         PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(sbq_desc->p.skb);
                        sbq_desc->p.skb = NULL;
                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
                cqicb->sbq_addr =
                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
-               cqicb->sbq_buf_size =
-                   cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+               cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUF_MAP_SIZE);
                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
                        (u16) rx_ring->sbq_len;
                cqicb->sbq_len = cpu_to_le16(bq_len);
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
                            rx_ring->sbq_len * sizeof(__le64);
-                       rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
                        rx_ring->type = RX_Q;
                } else {
                        /*
                        rx_ring->lbq_size = 0;
                        rx_ring->sbq_len = 0;
                        rx_ring->sbq_size = 0;
-                       rx_ring->sbq_buf_size = 0;
                        rx_ring->type = TX_Q;
                }
        }