#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE              0x0076
 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT                0x0077
 #define ICE_AQC_CAPS_NVM_MGMT                          0x0080
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE           0x0085
 #define ICE_AQC_CAPS_FW_LAG_SUPPORT                    0x0092
 #define ICE_AQC_BIT_ROCEV2_LAG                         0x01
 #define ICE_AQC_BIT_SRIOV_LAG                          0x02
        __le32 addr_low;
 };
 
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+       u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER           BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM          BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW         BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED           BIT(5)
+
+       u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_RAM                2
+
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
 /* Update TSE (indirect 0x0403)
  * Get TSE (indirect 0x0404)
  * Add TSE (indirect 0x0401)
                struct ice_aqc_get_link_topo get_link_topo;
                struct ice_aqc_i2c read_write_i2c;
                struct ice_aqc_read_i2c_resp read_i2c_resp;
+               struct ice_aqc_get_set_tx_topo get_set_tx_topo;
        } params;
 };
 
        ice_aqc_opc_query_sched_res                     = 0x0412,
        ice_aqc_opc_remove_rl_profiles                  = 0x0415,
 
+       /* tx topology commands */
+       ice_aqc_opc_set_tx_topo                         = 0x0417,
+       ice_aqc_opc_get_tx_topo                         = 0x0418,
+
        /* PHY commands */
        ice_aqc_opc_get_phy_caps                        = 0x0600,
        ice_aqc_opc_set_phy_cfg                         = 0x0601,
 
 #include "ice_common.h"
 #include "ice.h"
 #include "ice_ddp.h"
+#include "ice_sched.h"
 
 /* For supporting double VLAN mode, it is necessary to enable or disable certain
  * boost tcam entries. The metadata labels names that match the following
 
        return state;
 }
+
+/**
+ * ice_get_set_tx_topo - get or set Tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set Tx topology
+ *
+ * Return: zero when set was successful, negative values otherwise.
+ */
+static int
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+                   struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+       struct ice_aqc_get_set_tx_topo *cmd;
+       struct ice_aq_desc desc;
+       int status;
+
+       cmd = &desc.params.get_set_tx_topo;
+       if (set) {
+               ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+               cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+               /* requested to update a new topology, not a default topology */
+               if (buf)
+                       cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+                                         ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+               if (ice_is_e825c(hw))
+                       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+       } else {
+               ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+               cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+       }
+
+       if (!ice_is_e825c(hw))
+               desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+       status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+       if (status)
+               return status;
+       /* read the return flag values (first byte) for get operation */
+       if (!set && flags)
+               *flags = desc.params.get_set_tx_topo.set_flags;
+
+       return 0;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new Tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ *
+ * Return: zero when update was successful, negative values otherwise.
+ */
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+       u8 *current_topo, *new_topo = NULL;
+       struct ice_run_time_cfg_seg *seg;
+       struct ice_buf_hdr *section;
+       struct ice_pkg_hdr *pkg_hdr;
+       enum ice_ddp_state state;
+       u16 offset, size = 0;
+       u32 reg = 0;
+       int status;
+       u8 flags;
+
+       if (!buf || !len)
+               return -EINVAL;
+
+       /* Does FW support new Tx topology mode ? */
+       if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+               ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+       if (!current_topo)
+               return -ENOMEM;
+
+       /* Get the current Tx topology */
+       status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+                                    &flags, false);
+
+       kfree(current_topo);
+
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+               return status;
+       }
+
+       /* Is default topology already applied ? */
+       if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+           hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
+               ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
+               return -EEXIST;
+       }
+
+       /* Is new topology already applied ? */
+       if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+           hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+               ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
+               return -EEXIST;
+       }
+
+       /* Setting topology already issued? */
+       if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+               ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
+               /* Add a small delay before exiting */
+               msleep(2000);
+               return -EEXIST;
+       }
+
+       /* Change the topology from new to default (5 to 9) */
+       if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+           hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+               ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+               goto update_topo;
+       }
+
+       pkg_hdr = (struct ice_pkg_hdr *)buf;
+       state = ice_verify_pkg(pkg_hdr, len);
+       if (state) {
+               ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
+                         state);
+               return -EIO;
+       }
+
+       /* Find runtime configuration segment */
+       seg = (struct ice_run_time_cfg_seg *)
+             ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+       if (!seg) {
+               ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+               return -EIO;
+       }
+
+       if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+               ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+                         seg->buf_table.buf_count);
+               return -EIO;
+       }
+
+       section = ice_pkg_val_buf(seg->buf_table.buf_array);
+       if (!section || le32_to_cpu(section->section_entry[0].type) !=
+               ICE_SID_TX_5_LAYER_TOPO) {
+               ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+               return -EIO;
+       }
+
+       size = le16_to_cpu(section->section_entry[0].size);
+       offset = le16_to_cpu(section->section_entry[0].offset);
+       if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+               ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+               return -EIO;
+       }
+
+       /* Make sure the section fits in the buffer */
+       if (offset + size > ICE_PKG_BUF_SIZE) {
+               ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+               return -EIO;
+       }
+
+       /* Get the new topology buffer */
+       new_topo = ((u8 *)section) + offset;
+
+update_topo:
+       /* Acquire global lock to make sure that set topology issued
+        * by one PF.
+        */
+       status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+                                ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+               return status;
+       }
+
+       /* Check if reset was triggered already. */
+       reg = rd32(hw, GLGEN_RSTAT);
+       if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+               /* Reset is in progress, re-init the HW again */
+               ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+               ice_check_reset(hw);
+               return 0;
+       }
+
+       /* Set new topology */
+       status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
+               return status;
+       }
+
+       /* New topology is updated, delay 1 second before issuing the CORER */
+       msleep(1000);
+       ice_reset(hw, ICE_RESET_CORER);
+       /* CORER will clear the global lock, so no explicit call
+        * required for release.
+        */
+
+       return 0;
+}