return -ENODEV;
 
        ctrl_vsi = pf->eswitch.control_vsi;
-       /* cp VSI is createad with 1 queue as default */
-       pf->eswitch.qs.value = 1;
        pf->eswitch.uplink_vsi = uplink_vsi;
 
        if (ice_eswitch_setup_env(pf))
        ice_vsi_release(ctrl_vsi);
 
        pf->eswitch.is_running = false;
-       pf->eswitch.qs.is_reaching = false;
 }
 
 /**
        struct ice_vsi *cp = eswitch->control_vsi;
        int queues = 0;
 
-       if (eswitch->qs.is_reaching) {
-               if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
-                       queues = eswitch->qs.to_reach;
-                       eswitch->qs.is_reaching = false;
-               } else {
-                       queues = 0;
-               }
-       } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
-                  change < 0) {
-               queues = cp->alloc_txq + change;
-       }
-
        if (queues) {
                cp->req_txq = queues;
                cp->req_rxq = queues;
                ice_vsi_open(cp);
        }
 
-       eswitch->qs.value += change;
        ice_eswitch_remap_rings_to_vectors(eswitch);
 }
 
                err = ice_eswitch_enable_switchdev(pf);
                if (err)
                        return err;
-               /* Control plane VSI is created with 1 queue as default */
-               pf->eswitch.qs.to_reach -= 1;
                change = 0;
        }
 
 
        return 0;
 }
-
-/**
- * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
- * @pf: pointer to PF structure
- * @change: how many more (or less) queues is needed
- *
- * Remember to call ice_eswitch_attach/detach() the "change" times.
- */
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
-{
-       if (pf->eswitch.qs.value + change < 0)
-               return;
-
-       pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
-       pf->eswitch.qs.is_reaching = true;
-}
 
                                struct ice_tx_offload_params *off);
 netdev_tx_t
 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
 #else /* CONFIG_ICE_SWITCHDEV */
 static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
 
 {
        return NETDEV_TX_BUSY;
 }
-
-static inline void
-ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
 #endif /* CONFIG_ICE_SWITCHDEV */
 #endif /* _ICE_ESWITCH_H_ */
 
        else
                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 
-       ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
-
        mutex_lock(&vfs->table_lock);
 
        ice_for_each_vf(pf, bkt, vf) {
                goto err_unroll_sriov;
        }
 
-       ice_eswitch_reserve_cp_queues(pf, num_vfs);
        ret = ice_start_vfs(pf);
        if (ret) {
                dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);