#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
 #define ICE_INT_NAME_STR_LEN   (IFNAMSIZ + 16)
-#define ICE_AQ_LEN             64
+#define ICE_AQ_LEN             192
 #define ICE_MBXSQ_LEN          64
+#define ICE_SBQ_LEN            64
 #define ICE_MIN_LAN_TXRX_MSIX  1
 #define ICE_MIN_LAN_OICR_MSIX  1
 #define ICE_MIN_MSIX           (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
        ICE_STATE_NOMINAL_CHECK_BITS,
        ICE_ADMINQ_EVENT_PENDING,
        ICE_MAILBOXQ_EVENT_PENDING,
+       ICE_SIDEBANDQ_EVENT_PENDING,
        ICE_MDD_EVENT_PENDING,
        ICE_VFLR_EVENT_PENDING,
        ICE_FLTR_OVERFLOW_PROMISC,
 
        __le32 addr_low;
 };
 
+/* Sideband Control Interface Commands */
+/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
+struct ice_aqc_neigh_dev_req {
+       __le16 sb_data_len;
+       u8 reserved[6];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
 /* Add Tx LAN Queues (indirect 0x0C30) */
 struct ice_aqc_add_txqs {
        u8 num_qgrps;
                struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
                struct ice_aqc_get_set_rss_lut get_set_rss_lut;
                struct ice_aqc_get_set_rss_key get_set_rss_key;
+               struct ice_aqc_neigh_dev_req neigh_dev;
                struct ice_aqc_add_txqs add_txqs;
                struct ice_aqc_dis_txqs dis_txqs;
                struct ice_aqc_add_rdma_qset add_rdma_qset;
        ice_aqc_opc_get_rss_key                         = 0x0B04,
        ice_aqc_opc_get_rss_lut                         = 0x0B05,
 
+       /* Sideband Control Interface commands */
+       ice_aqc_opc_neighbour_device_request            = 0x0C00,
+
        /* Tx queue handling commands/events */
        ice_aqc_opc_add_txqs                            = 0x0C30,
        ice_aqc_opc_dis_txqs                            = 0x0C31,
 
        { 0 }
 };
 
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+                void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+       return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
+                                                  (struct ice_aq_desc *)desc,
+                                                  buf, buf_size, cd));
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+{
+       struct ice_sbq_cmd_desc desc = {0};
+       struct ice_sbq_msg_req msg = {0};
+       u16 msg_len;
+       int status;
+
+       msg_len = sizeof(msg);
+
+       msg.dest_dev = in->dest_dev;
+       msg.opcode = in->opcode;
+       msg.flags = ICE_SBQ_MSG_FLAGS;
+       msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+       msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
+       msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
+
+       if (in->opcode)
+               msg.data = cpu_to_le32(in->data);
+       else
+               /* data read comes back in completion, so shorten the struct by
+                * sizeof(msg.data)
+                */
+               msg_len -= sizeof(msg.data);
+
+       desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+       desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
+       desc.param0.cmd_len = cpu_to_le16(msg_len);
+       status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+       if (!status && !in->opcode)
+               in->data = le32_to_cpu
+                       (((struct ice_sbq_msg_cmpl *)&msg)->data);
+       return status;
+}
+
 /* FW Admin Queue command wrappers */
 
 /* Software lock/mutex that is meant to be held while the Global Config Lock
 
 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
                      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
                      enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+bool ice_is_sbq_supported(struct ice_hw *hw);
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
 enum ice_status
 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
                struct ice_aq_desc *desc, void *buf, u16 buf_size,
 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
 struct ice_q_ctx *
 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
 void
 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
                  u64 *prev_stat, u64 *cur_stat);
 
        ICE_CQ_INIT_REGS(cq, PF_MBX);
 }
 
+/**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+       struct ice_ctl_q_info *cq = &hw->sbq;
+
+       ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
 /**
  * ice_check_sq_alive
  * @hw: pointer to the HW struct
                ice_adminq_init_regs(hw);
                cq = &hw->adminq;
                break;
+       case ICE_CTL_Q_SB:
+               ice_sb_init_regs(hw);
+               cq = &hw->sbq;
+               break;
        case ICE_CTL_Q_MAILBOX:
                ice_mailbox_init_regs(hw);
                cq = &hw->mailboxq;
        return ret_code;
 }
 
+/**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+       /* The device sideband queue is only supported on devices with the
+        * generic MAC type.
+        */
+       return hw->mac_type == ICE_MAC_GENERIC;
+}
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+       if (ice_is_sbq_supported(hw))
+               return &hw->sbq;
+       return &hw->adminq;
+}
+
 /**
  * ice_shutdown_ctrlq - shutdown routine for any control queue
  * @hw: pointer to the hardware structure
                if (ice_check_sq_alive(hw, cq))
                        ice_aq_q_shutdown(hw, true);
                break;
+       case ICE_CTL_Q_SB:
+               cq = &hw->sbq;
+               break;
        case ICE_CTL_Q_MAILBOX:
                cq = &hw->mailboxq;
                break;
 {
        /* Shutdown FW admin queue */
        ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+       /* Shutdown PHY Sideband */
+       if (ice_is_sbq_supported(hw))
+               ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
        /* Shutdown PF-VF Mailbox */
        ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
 
        if (status)
                return status;
+       /* sideband control queue (SBQ) interface is not supported on some
+        * devices. Initialize if supported, else fallback to the admin queue
+        * interface
+        */
+       if (ice_is_sbq_supported(hw)) {
+               status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+               if (status)
+                       return status;
+       }
        /* Init Mailbox queue */
        return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
 {
        ice_init_ctrlq_locks(&hw->adminq);
+       if (ice_is_sbq_supported(hw))
+               ice_init_ctrlq_locks(&hw->sbq);
        ice_init_ctrlq_locks(&hw->mailboxq);
 
        return ice_init_all_ctrlq(hw);
        ice_shutdown_all_ctrlq(hw);
 
        ice_destroy_ctrlq_locks(&hw->adminq);
+       if (ice_is_sbq_supported(hw))
+               ice_destroy_ctrlq_locks(&hw->sbq);
        ice_destroy_ctrlq_locks(&hw->mailboxq);
 }
 
 
 /* Maximum buffer lengths for all control queue types */
 #define ICE_AQ_MAX_BUF_LEN 4096
 #define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
 
 #define ICE_CTL_Q_DESC(R, i) \
        (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
        ICE_CTL_Q_UNKNOWN = 0,
        ICE_CTL_Q_ADMIN,
        ICE_CTL_Q_MAILBOX,
+       ICE_CTL_Q_SB,
 };
 
 /* Control Queue timeout settings - max delay 1s */
 
 #define PF_MBX_ATQLEN_ATQCRIT_M                        BIT(30)
 #define PF_MBX_ATQLEN_ATQENABLE_M              BIT(31)
 #define PF_MBX_ATQT                            0x0022E300
+#define PF_SB_ARQBAH                           0x0022FF00
+#define PF_SB_ARQBAH_ARQBAH_S                  0
+#define PF_SB_ARQBAH_ARQBAH_M                  ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ARQBAL                           0x0022FE80
+#define PF_SB_ARQBAL_ARQBAL_LSB_S              0
+#define PF_SB_ARQBAL_ARQBAL_LSB_M              ICE_M(0x3F, 0)
+#define PF_SB_ARQBAL_ARQBAL_S                  6
+#define PF_SB_ARQBAL_ARQBAL_M                  ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ARQH                             0x00230000
+#define PF_SB_ARQH_ARQH_S                      0
+#define PF_SB_ARQH_ARQH_M                      ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN                           0x0022FF80
+#define PF_SB_ARQLEN_ARQLEN_S                  0
+#define PF_SB_ARQLEN_ARQLEN_M                  ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN_ARQVFE_S                  28
+#define PF_SB_ARQLEN_ARQVFE_M                  BIT(28)
+#define PF_SB_ARQLEN_ARQOVFL_S                 29
+#define PF_SB_ARQLEN_ARQOVFL_M                 BIT(29)
+#define PF_SB_ARQLEN_ARQCRIT_S                 30
+#define PF_SB_ARQLEN_ARQCRIT_M                 BIT(30)
+#define PF_SB_ARQLEN_ARQENABLE_S               31
+#define PF_SB_ARQLEN_ARQENABLE_M               BIT(31)
+#define PF_SB_ARQT                             0x00230080
+#define PF_SB_ARQT_ARQT_S                      0
+#define PF_SB_ARQT_ARQT_M                      ICE_M(0x3FF, 0)
+#define PF_SB_ATQBAH                           0x0022FC80
+#define PF_SB_ATQBAH_ATQBAH_S                  0
+#define PF_SB_ATQBAH_ATQBAH_M                  ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ATQBAL                           0x0022FC00
+#define PF_SB_ATQBAL_ATQBAL_S                  6
+#define PF_SB_ATQBAL_ATQBAL_M                  ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ATQH                             0x0022FD80
+#define PF_SB_ATQH_ATQH_S                      0
+#define PF_SB_ATQH_ATQH_M                      ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN                           0x0022FD00
+#define PF_SB_ATQLEN_ATQLEN_S                  0
+#define PF_SB_ATQLEN_ATQLEN_M                  ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN_ATQVFE_S                  28
+#define PF_SB_ATQLEN_ATQVFE_M                  BIT(28)
+#define PF_SB_ATQLEN_ATQOVFL_S                 29
+#define PF_SB_ATQLEN_ATQOVFL_M                 BIT(29)
+#define PF_SB_ATQLEN_ATQCRIT_S                 30
+#define PF_SB_ATQLEN_ATQCRIT_M                 BIT(30)
+#define PF_SB_ATQLEN_ATQENABLE_S               31
+#define PF_SB_ATQLEN_ATQENABLE_M               BIT(31)
+#define PF_SB_ATQT                             0x0022FE00
+#define PF_SB_ATQT_ATQT_S                      0
+#define PF_SB_ATQT_ATQT_M                      ICE_M(0x3FF, 0)
 #define PRTDCB_GENC                            0x00083000
 #define PRTDCB_GENC_PFCLDA_S                   16
 #define PRTDCB_GENC_PFCLDA_M                   ICE_M(0xFFFF, 16)
 #define PFINT_OICR_CTL_ITR_INDX_M              ICE_M(0x3, 11)
 #define PFINT_OICR_CTL_CAUSE_ENA_M             BIT(30)
 #define PFINT_OICR_ENA                         0x0016C900
+#define PFINT_SB_CTL                           0x0016B600
+#define PFINT_SB_CTL_MSIX_INDX_M               ICE_M(0x7FF, 0)
+#define PFINT_SB_CTL_CAUSE_ENA_M               BIT(30)
 #define QINT_RQCTL(_QRX)                       (0x00150000 + ((_QRX) * 4))
 #define QINT_RQCTL_MSIX_INDX_S                 0
 #define QINT_RQCTL_MSIX_INDX_M                 ICE_M(0x7FF, 0)
 
        case ICE_ERR_DOES_NOT_EXIST:
                return -ENOENT;
        case ICE_ERR_OUT_OF_RANGE:
-               return -ENOTTY;
+       case ICE_ERR_AQ_ERROR:
+       case ICE_ERR_AQ_TIMEOUT:
+       case ICE_ERR_AQ_EMPTY:
+       case ICE_ERR_AQ_FW_CRITICAL:
+               return -EIO;
        case ICE_ERR_PARAM:
+       case ICE_ERR_INVAL_SIZE:
                return -EINVAL;
        case ICE_ERR_NO_MEMORY:
                return -ENOMEM;
        case ICE_ERR_MAX_LIMIT:
                return -EAGAIN;
+       case ICE_ERR_RESET_ONGOING:
+               return -EBUSY;
+       case ICE_ERR_AQ_FULL:
+               return -ENOSPC;
        default:
                return -EINVAL;
        }
 
                cq = &hw->adminq;
                qtype = "Admin";
                break;
+       case ICE_CTL_Q_SB:
+               cq = &hw->sbq;
+               qtype = "Sideband";
+               break;
        case ICE_CTL_Q_MAILBOX:
                cq = &hw->mailboxq;
                qtype = "Mailbox";
        ice_flush(hw);
 }
 
+/**
+ * ice_clean_sbq_subtask - clean the Sideband Queue rings
+ * @pf: board private structure
+ */
+static void ice_clean_sbq_subtask(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+
+       /* Nothing to do here if sideband queue is not supported */
+       if (!ice_is_sbq_supported(hw)) {
+               clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+               return;
+       }
+
+       if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
+               return;
+
+       if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
+               return;
+
+       clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+
+       if (ice_ctrlq_pending(hw, &hw->sbq))
+               __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
+
+       ice_flush(hw);
+}
+
 /**
  * ice_service_task_schedule - schedule the service task to wake up
  * @pf: board private structure
 
        ice_process_vflr_event(pf);
        ice_clean_mailboxq_subtask(pf);
+       ice_clean_sbq_subtask(pf);
        ice_sync_arfs_fltrs(pf);
        ice_flush_fdir_ctx(pf);
 
            test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
            test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
            test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
+           test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
            test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
                mod_timer(&pf->serv_tmr, jiffies);
 }
        hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
        hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
        hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+       hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+       hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+       hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+       hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
 }
 
 /**
        dev = ice_pf_to_dev(pf);
        set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
        set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+       set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
 
        oicr = rd32(hw, PFINT_OICR);
        ena_mask = rd32(hw, PFINT_OICR_ENA);
        wr32(hw, PFINT_MBX_CTL,
             rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
 
+       wr32(hw, PFINT_SB_CTL,
+            rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
+
        /* disable Control queue Interrupt causes */
        wr32(hw, PFINT_OICR_CTL,
             rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
               PFINT_MBX_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_MBX_CTL, val);
 
+       /* This enables Sideband queue Interrupt causes */
+       val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+              PFINT_SB_CTL_CAUSE_ENA_M);
+       wr32(hw, PFINT_SB_CTL, val);
+
        ice_flush(hw);
 }
 
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+       /* Sideband Queue commands */
+       ice_sbq_opc_neigh_dev_req                       = 0x0C00,
+       ice_sbq_opc_neigh_dev_ev                        = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+       __le16 flags;
+       __le16 opcode;
+       __le16 datalen;
+       __le16 cmd_retval;
+
+       /* Opaque message data */
+       __le32 cookie_high;
+       __le32 cookie_low;
+
+       union {
+               __le16 cmd_len;
+               __le16 cmpl_len;
+       } param0;
+
+       u8 reserved[6];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+       __le16 flags;
+       __le16 opcode;
+       __le16 datalen;
+       __le16 cmd_retval;
+       u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+       rmn_0   = 0x02,
+       rmn_1   = 0x03,
+       rmn_2   = 0x04,
+       cgu     = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+       ice_sbq_msg_rd  = 0x00,
+       ice_sbq_msg_wr  = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS      0x40
+#define ICE_SBQ_MSG_SBE_FBE    0x0F
+
+struct ice_sbq_msg_req {
+       u8 dest_dev;
+       u8 src_dev;
+       u8 opcode;
+       u8 flags;
+       u8 sbe_fbe;
+       u8 func_id;
+       __le16 msg_addr_low;
+       __le32 msg_addr_high;
+       __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+       u8 dest_dev;
+       u8 src_dev;
+       u8 opcode;
+       u8 flags;
+       __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+       u8 dest_dev;
+       u8 opcode;
+       u16 msg_addr_low;
+       u32 msg_addr_high;
+       u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
 
 #include "ice_lan_tx_rx.h"
 #include "ice_flex_type.h"
 #include "ice_protocol_type.h"
+#include "ice_sbq_cmd.h"
 
 static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
 {
 
        /* Control Queue info */
        struct ice_ctl_q_info adminq;
+       struct ice_ctl_q_info sbq;
        struct ice_ctl_q_info mailboxq;
 
        u8 api_branch;          /* API branch version */