static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
 {
        struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
-       struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+       struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
        struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
        struct bnxt_qplib_nq *nq;
        int indx, rc;
         * in device sctructure.
         */
        for (indx = 0; indx < rdev->num_msix; indx++)
-               rdev->msix_entries[indx].vector = ent[indx].vector;
+               rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
 
        bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
                                  false);
        return rc;
 }
 
-static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
-{
-       int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
-       struct bnxt_en_dev *en_dev;
-
-       en_dev = rdev->en_dev;
-
-       num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
-
-       num_msix_got = bnxt_req_msix_vecs(en_dev,
-                                         rdev->msix_entries,
-                                         num_msix_want);
-       if (num_msix_got < BNXT_RE_MIN_MSIX) {
-               rc = -EINVAL;
-               goto done;
-       }
-       if (num_msix_got != num_msix_want) {
-               ibdev_warn(&rdev->ibdev,
-                          "Requested %d MSI-X vectors, got %d\n",
-                          num_msix_want, num_msix_got);
-       }
-       rdev->num_msix = num_msix_got;
-done:
-       return rc;
-}
-
 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
                                  u16 opcd, u16 crid, u16 trid)
 {
        return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
                (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
                                   BNXT_RE_GEN_P5_PF_NQ_DB) :
-                                  rdev->msix_entries[indx].db_offset;
+                                  rdev->en_dev->msix_entries[indx].db_offset;
 }
 
 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
        for (i = 1; i < rdev->num_msix ; i++) {
                db_offt = bnxt_re_get_nqdb_offset(rdev, i);
                rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
-                                         i - 1, rdev->msix_entries[i].vector,
+                                         i - 1, rdev->en_dev->msix_entries[i].vector,
                                          db_offt, &bnxt_re_cqn_handler,
                                          &bnxt_re_srqn_handler);
                if (rc) {
                rattr.type = type;
                rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
                rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
-               rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+               rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
                rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
                if (rc) {
                        ibdev_err(&rdev->ibdev,
                bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
-               bnxt_free_msix_vecs(rdev->en_dev);
+               rdev->num_msix = 0;
 
        bnxt_re_destroy_chip_ctx(rdev);
        if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
        /* Check whether VF or PF */
        bnxt_re_get_sriov_func_type(rdev);
 
-       rc = bnxt_re_request_msix(rdev);
-       if (rc) {
+       if (!rdev->en_dev->ulp_tbl->msix_requested) {
                ibdev_err(&rdev->ibdev,
                          "Failed to get MSI-X vectors: %#x\n", rc);
                rc = -EINVAL;
                goto fail;
        }
+       ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+                 rdev->en_dev->ulp_tbl->msix_requested);
+       rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
        set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
 
        bnxt_re_query_hwrm_intf_version(rdev);
        rattr.type = type;
        rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
        rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
-       rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+       rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
        rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
        if (rc) {
                ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
                goto free_rcfw;
        }
        db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
-       vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+       vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
        rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
                                            vid, db_offt, rdev->is_virtfn,
                                            &bnxt_re_aeq_handler);
 
 
 static DEFINE_IDA(bnxt_aux_dev_ids);
 
+static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
+{
+       struct bnxt_en_dev *edev = bp->edev;
+       int num_msix, idx, i;
+
+       if (!edev->ulp_tbl->msix_requested) {
+               netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+               return;
+       }
+       num_msix = edev->ulp_tbl->msix_requested;
+       idx = edev->ulp_tbl->msix_base;
+       for (i = 0; i < num_msix; i++) {
+               ent[i].vector = bp->irq_tbl[idx + i].vector;
+               ent[i].ring_idx = idx + i;
+               if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                       ent[i].db_offset = DB_PF_OFFSET_P5;
+                       if (BNXT_VF(bp))
+                               ent[i].db_offset = DB_VF_OFFSET_P5;
+               } else {
+                       ent[i].db_offset = (idx + i) * 0x80;
+               }
+       }
+}
+
 int bnxt_register_dev(struct bnxt_en_dev *edev,
                      struct bnxt_ulp_ops *ulp_ops,
                      void *handle)
            bp->cp_nr_rings == max_stat_ctxs)
                return -ENOMEM;
 
-       ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+       ulp = edev->ulp_tbl;
        if (!ulp)
                return -ENOMEM;
 
-       edev->ulp_tbl = ulp;
        ulp->handle = handle;
        rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
 
        if (test_bit(BNXT_STATE_OPEN, &bp->state))
                bnxt_hwrm_vnic_cfg(bp, 0);
 
+       bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
+       edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return 0;
 }
 EXPORT_SYMBOL(bnxt_register_dev);
 
        ulp = edev->ulp_tbl;
        if (ulp->msix_requested)
-               bnxt_free_msix_vecs(edev);
+               edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
 
        if (ulp->max_async_event_id)
                bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
                msleep(100);
                i++;
        }
-       kfree(ulp);
-       edev->ulp_tbl = NULL;
        return;
 }
 EXPORT_SYMBOL(bnxt_unregister_dev);
 
-static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
-{
-       struct bnxt_en_dev *edev = bp->edev;
-       int num_msix, idx, i;
-
-       num_msix = edev->ulp_tbl->msix_requested;
-       idx = edev->ulp_tbl->msix_base;
-       for (i = 0; i < num_msix; i++) {
-               ent[i].vector = bp->irq_tbl[idx + i].vector;
-               ent[i].ring_idx = idx + i;
-               if (bp->flags & BNXT_FLAG_CHIP_P5) {
-                       ent[i].db_offset = DB_PF_OFFSET_P5;
-                       if (BNXT_VF(bp))
-                               ent[i].db_offset = DB_VF_OFFSET_P5;
-               } else {
-                       ent[i].db_offset = (idx + i) * 0x80;
-               }
-       }
-}
-
-int bnxt_req_msix_vecs(struct bnxt_en_dev *edev,
-                             struct bnxt_msix_entry *ent,
-                             int num_msix)
-{
-       struct net_device *dev = edev->net;
-       struct bnxt *bp = netdev_priv(dev);
-       struct bnxt_hw_resc *hw_resc;
-       int max_idx, max_cp_rings;
-       int avail_msix, idx;
-       int total_vecs;
-       int rc = 0;
-
-       if (!(bp->flags & BNXT_FLAG_USING_MSIX))
-               return -ENODEV;
-
-       if (edev->ulp_tbl->msix_requested)
-               return -EAGAIN;
-
-       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
-       avail_msix = bnxt_get_avail_msix(bp, num_msix);
-       if (!avail_msix)
-               return -ENOMEM;
-       if (avail_msix > num_msix)
-               avail_msix = num_msix;
-
-       if (BNXT_NEW_RM(bp)) {
-               idx = bp->cp_nr_rings;
-       } else {
-               max_idx = min_t(int, bp->total_irqs, max_cp_rings);
-               idx = max_idx - avail_msix;
-       }
-       edev->ulp_tbl->msix_base = idx;
-       edev->ulp_tbl->msix_requested = avail_msix;
-       hw_resc = &bp->hw_resc;
-       total_vecs = idx + avail_msix;
-       rtnl_lock();
-       if (bp->total_irqs < total_vecs ||
-           (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
-               if (netif_running(dev)) {
-                       bnxt_close_nic(bp, true, false);
-                       rc = bnxt_open_nic(bp, true, false);
-               } else {
-                       rc = bnxt_reserve_rings(bp, true);
-               }
-       }
-       rtnl_unlock();
-       if (rc) {
-               edev->ulp_tbl->msix_requested = 0;
-               return -EAGAIN;
-       }
-
-       if (BNXT_NEW_RM(bp)) {
-               int resv_msix;
-
-               resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
-               avail_msix = min_t(int, resv_msix, avail_msix);
-               edev->ulp_tbl->msix_requested = avail_msix;
-       }
-       bnxt_fill_msix_vecs(bp, ent);
-       edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
-       return avail_msix;
-}
-EXPORT_SYMBOL(bnxt_req_msix_vecs);
-
-void bnxt_free_msix_vecs(struct bnxt_en_dev *edev)
-{
-       struct net_device *dev = edev->net;
-       struct bnxt *bp = netdev_priv(dev);
-
-       if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
-               return;
-
-       edev->ulp_tbl->msix_requested = 0;
-       edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
-       rtnl_lock();
-       if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
-               bnxt_close_nic(bp, true, false);
-               bnxt_open_nic(bp, true, false);
-       }
-       rtnl_unlock();
-
-       return;
-}
-EXPORT_SYMBOL(bnxt_free_msix_vecs);
-
 int bnxt_get_ulp_msix_num(struct bnxt *bp)
 {
-       if (bnxt_ulp_registered(bp->edev)) {
-               struct bnxt_en_dev *edev = bp->edev;
+       u32 roce_msix = BNXT_VF(bp) ?
+                       BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
 
-               return edev->ulp_tbl->msix_requested;
-       }
-       return 0;
+       return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
+               min_t(u32, roce_msix, num_online_cpus()) : 0);
 }
 
 int bnxt_get_ulp_msix_base(struct bnxt *bp)
                container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
 
        ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+       kfree(aux_priv->edev->ulp_tbl);
        kfree(aux_priv->edev);
        kfree(aux_priv);
 }
        edev->hw_ring_stats_size = bp->hw_ring_stats_size;
        edev->pf_port_id = bp->pf.port_id;
        edev->en_state = bp->state;
+
+       edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
 }
 
 void bnxt_rdma_aux_device_init(struct bnxt *bp)
        struct auxiliary_device *aux_dev;
        struct bnxt_aux_priv *aux_priv;
        struct bnxt_en_dev *edev;
+       struct bnxt_ulp *ulp;
        int rc;
 
        if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
        if (!edev)
                goto aux_dev_uninit;
 
+       ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+       if (!ulp)
+               goto aux_dev_uninit;
+
+       edev->ulp_tbl = ulp;
        aux_priv->edev = edev;
        bp->edev = edev;
        bnxt_set_edev_info(edev, bp);