* struct iwl_trans_txqs - transport tx queues data
  *
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
  * @queue_used - bit mask of used queues
  * @queue_stopped - bit mask of stopped queues
  */
        struct dma_pool *bc_pool;
        size_t bc_tbl_size;
        bool bc_table_dword;
+       u8 page_offs;
+       u8 dev_cmd_offs;
 
        struct {
                u8 fifo;
 
        wait_queue_head_t wait_command_queue;
        wait_queue_head_t sx_waitq;
 
-       u8 page_offs, dev_cmd_offs;
-
        u8 def_rx_queue;
        u8 n_no_reclaim_cmds;
        u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
                           struct iwl_dma_ptr *ptr, size_t size);
 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
 void iwl_pcie_apply_destination(struct iwl_trans *trans);
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+void iwl_pcie_free_tso_page(struct iwl_trans *trans,
                            struct sk_buff *skb);
 #ifdef CONFIG_INET
 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
 
        trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
        trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
        trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+       trans->txqs.page_offs = trans_cfg->cb_data_offs;
+       trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+
        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
                trans_pcie->n_no_reclaim_cmds = 0;
        else
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
        trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
 
-       trans_pcie->page_offs = trans_cfg->cb_data_offs;
-       trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
-
        trans->command_groups = trans_cfg->command_groups;
        trans->command_groups_size = trans_cfg->command_groups_size;
 
 
 static struct page *get_workaround_page(struct iwl_trans *trans,
                                        struct sk_buff *skb)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct page **page_ptr;
        struct page *ret;
 
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
 
        ret = alloc_page(GFP_ATOMIC);
        if (!ret)
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
                           struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq = trans->txqs.txq[txq_id];
        u16 cmd_len;
                        struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
-                                              trans_pcie->dev_cmd_offs);
+                                              trans->txqs.dev_cmd_offs);
 
                        *dev_cmd_ptr = dev_cmd;
                        __skb_queue_tail(&txq->overflow_q, skb);
  */
 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[txq_id];
 
        spin_lock_bh(&txq->lock);
                        if (WARN_ON_ONCE(!skb))
                                continue;
 
-                       iwl_pcie_free_tso_page(trans_pcie, skb);
+                       iwl_pcie_free_tso_page(trans, skb);
                }
                iwl_pcie_gen2_free_tfd(trans, txq);
                txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
 
        return 0;
 }
 
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+void iwl_pcie_free_tso_page(struct iwl_trans *trans,
                            struct sk_buff *skb)
 {
        struct page **page_ptr;
        struct page *next;
 
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
        next = *page_ptr;
        *page_ptr = NULL;
 
                        if (WARN_ON_ONCE(!skb))
                                continue;
 
-                       iwl_pcie_free_tso_page(trans_pcie, skb);
+                       iwl_pcie_free_tso_page(trans, skb);
                }
                iwl_pcie_txq_free_tfd(trans, txq);
                txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[txq_id];
        int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
        int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
                if (WARN_ON_ONCE(!skb))
                        continue;
 
-               iwl_pcie_free_tso_page(trans_pcie, skb);
+               iwl_pcie_free_tso_page(trans, skb);
 
                __skb_queue_tail(skbs, skb);
 
                        struct iwl_device_tx_cmd *dev_cmd_ptr;
 
                        dev_cmd_ptr = *(void **)((u8 *)skb->cb +
-                                                trans_pcie->dev_cmd_offs);
+                                                trans->txqs.dev_cmd_offs);
 
                        /*
                         * Note that we can very well be overflowing again.
        struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
        struct page **page_ptr;
 
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
 
        if (WARN_ON(*page_ptr))
                return NULL;
                        struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
-                                              trans_pcie->dev_cmd_offs);
+                                              trans->txqs.dev_cmd_offs);
 
                        *dev_cmd_ptr = dev_cmd;
                        __skb_queue_tail(&txq->overflow_q, skb);