ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid;
goto resume_traffic;
/* Move Vf's VSI node for this TC to newport's scheduler tree */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_traffic;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for failover\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_traffic;
qbuf_err:
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
goto resume_reclaim;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_reclaim;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_reclaim;
reclaim_qerr:
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
goto resume_sync;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
- goto resume_sync;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_sync;
sync_qerr:
}
/**
- * ice_aq_move_sched_elems - move scheduler elements
+ * ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct
- * @grps_req: number of groups to move
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved
- * @cd: pointer to command details structure or NULL
*
* Move scheduling elements (0x0408)
*/
int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd)
+ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
- grps_req, (void *)buf, buf_size,
- grps_movd, cd);
+ 1, buf, buf_size, grps_movd, NULL);
}
/**
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- struct ice_aqc_move_elem *buf;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
int status = 0;
- u16 buf_len;
hw = pi->hw;
hw->max_children[parent->tx_sched_layer])
return -ENOSPC;
- buf_len = struct_size(buf, teid, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
status = -EINVAL;
- goto move_err_exit;
+ break;
}
buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
- &grps_movd, NULL);
+ status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
if (status && grps_movd != 1) {
status = -EIO;
- goto move_err_exit;
+ break;
}
/* update the SW DB */
ice_sched_update_parent(parent, node);
}
-move_err_exit:
- kfree(buf);
return status;
}