iwlwifi: pcie: gen2: use DMA pool for byte-count tables
authorJohannes Berg <johannes.berg@intel.com>
Sat, 25 Apr 2020 10:04:53 +0000 (13:04 +0300)
committerLuca Coelho <luciano.coelho@intel.com>
Fri, 8 May 2020 06:52:53 +0000 (09:52 +0300)
Since the recent patch in this area, we no longer allocate 64k
for a single queue, but only 1k, which still means a full page.
Use a DMA pool to reduce this further, since we will have a lot
of queues in a typical system that can share pages.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20200425130140.6e84c79aea30.Ie9a417132812d110ec1cc87852f101477c01cfcb@changeid
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c

index abe649af689ccc243659834aa9fa8820965978d1..43f81204c1520efa771e127f972dd31c97ec9374 100644 (file)
@@ -556,6 +556,7 @@ struct iwl_trans_pcie {
        u32 scd_base_addr;
        struct iwl_dma_ptr scd_bc_tbls;
        struct iwl_dma_ptr kw;
+       struct dma_pool *bc_pool;
 
        struct iwl_txq *txq_memory;
        struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
index a0daae058c1cbe0399e4b392024fb07a1f811092..8ccfc7cc73483b3e768b02c0a92fbb3b17c1af97 100644 (file)
@@ -3672,6 +3672,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        init_waitqueue_head(&trans_pcie->sx_waitq);
 
+       /*
+        * For gen2 devices, we use a single allocation for each byte-count
+        * table, but they're pretty small (1k) so use a DMA pool that we
+        * allocate here.
+        */
+       if (cfg_trans->gen2) {
+               size_t bc_tbl_size;
+
+               if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
+                       bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+               else
+                       bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+
+               trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
+                                                      bc_tbl_size, 256, 0);
+               if (!trans_pcie->bc_pool)
+                       goto out_no_pci;
+       }
+
        if (trans_pcie->msix_enabled) {
                ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
                if (ret)
index 84df12ff131af2485a409d57a4d061236e52408a..bb55563bba6818e50a21cf845119db52a85f4338 100644 (file)
@@ -1224,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
        }
 
        kfree(txq->entries);
-       iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+       if (txq->bc_tbl.addr)
+               dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
+                             txq->bc_tbl.dma);
        kfree(txq);
 }
 
@@ -1272,6 +1274,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
                                     struct iwl_txq **intxq, int size,
                                     unsigned int timeout)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        size_t bc_tbl_size, bc_tbl_entries;
        struct iwl_txq *txq;
        int ret;
@@ -1290,8 +1293,10 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
        txq = kzalloc(sizeof(*txq), GFP_KERNEL);
        if (!txq)
                return -ENOMEM;
-       ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, bc_tbl_size);
-       if (ret) {
+
+       txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
+                                         &txq->bc_tbl.dma);
+       if (!txq->bc_tbl.addr) {
                IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
                kfree(txq);
                return -ENOMEM;