if (!device)
                return -EINVAL;
 
-       if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
+       if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
                ib_device_put(device);
                return -EINVAL;
        }
 
        resp->hw_ver            = attr->hw_ver;
        resp->max_qp            = attr->max_qp;
        resp->max_qp_wr         = attr->max_qp_wr;
-       resp->device_cap_flags  = lower_32_bits(attr->device_cap_flags &
-               IB_UVERBS_DEVICE_CAP_FLAGS_MASK);
+       resp->device_cap_flags  = lower_32_bits(attr->device_cap_flags);
        resp->max_sge           = min(attr->max_send_sge, attr->max_recv_sge);
        resp->max_sge_rd        = attr->max_sge_rd;
        resp->max_cq            = attr->max_cq;
 
        resp.timestamp_mask = attr.timestamp_mask;
        resp.hca_core_clock = attr.hca_core_clock;
-       resp.device_cap_flags_ex = attr.device_cap_flags &
-               IB_UVERBS_DEVICE_CAP_FLAGS_MASK;
+       resp.device_cap_flags_ex = attr.device_cap_flags;
        resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
        resp.rss_caps.max_rwq_indirection_tables =
                attr.rss_caps.max_rwq_indirection_tables;
 
        }
        rdma_restrack_add(&pd->res);
 
-       if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
+       if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
                pd->local_dma_lkey = device->local_dma_lkey;
        else
                mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
 
                pd->__internal_mr = mr;
 
-               if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
+               if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
                        pd->local_dma_lkey = pd->__internal_mr->lkey;
 
                if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
        struct ib_mr *mr;
 
        if (access_flags & IB_ACCESS_ON_DEMAND) {
-               if (!(pd->device->attrs.device_cap_flags &
-                     IB_DEVICE_ON_DEMAND_PAGING)) {
+               if (!(pd->device->attrs.kernel_cap_flags &
+                     IBK_ON_DEMAND_PAGING)) {
                        pr_debug("ODP support not available\n");
                        return ERR_PTR(-EINVAL);
                }
 
                                    | IB_DEVICE_RC_RNR_NAK_GEN
                                    | IB_DEVICE_SHUTDOWN_PORT
                                    | IB_DEVICE_SYS_IMAGE_GUID
-                                   | IB_DEVICE_LOCAL_DMA_LKEY
                                    | IB_DEVICE_RESIZE_MAX_WR
                                    | IB_DEVICE_PORT_ACTIVE_EVENT
                                    | IB_DEVICE_N_NOTIFY_CQ
                                    | IB_DEVICE_MEM_WINDOW
                                    | IB_DEVICE_MEM_WINDOW_TYPE_2B
                                    | IB_DEVICE_MEM_MGT_EXTENSIONS;
+       ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
        ib_attr->max_send_sge = dev_attr->max_qp_sges;
        ib_attr->max_recv_sge = dev_attr->max_qp_sges;
        ib_attr->max_sge_rd = dev_attr->max_qp_sges;
 
 struct c4iw_dev {
        struct ib_device ibdev;
        struct c4iw_rdev rdev;
-       u32 device_cap_flags;
        struct xarray cqs;
        struct xarray qps;
        struct xarray mrs;
 
                            dev->rdev.lldi.ports[0]->dev_addr);
        props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
        props->fw_ver = dev->rdev.lldi.fw_vers;
-       props->device_cap_flags = dev->device_cap_flags;
+       props->device_cap_flags = IB_DEVICE_MEM_WINDOW;
+       props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+       if (fastreg_support)
+               props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
        props->page_size_cap = T4_PAGESIZE_MASK;
        props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
        props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
        pr_debug("c4iw_dev %p\n", dev);
        addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
                            dev->rdev.lldi.ports[0]->dev_addr);
-       dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
-       if (fastreg_support)
-               dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
        dev->ibdev.local_dma_lkey = 0;
        dev->ibdev.node_type = RDMA_NODE_RNIC;
        BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
 
                        IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
                        IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
                        IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
-                       IB_DEVICE_MEM_MGT_EXTENSIONS |
-                       IB_DEVICE_RDMA_NETDEV_OPA;
+                       IB_DEVICE_MEM_MGT_EXTENSIONS;
+       rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA;
        rdi->dparms.props.page_size_cap = PAGE_SIZE;
        rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
        rdi->dparms.props.vendor_part_id = dd->pcidev->device;
 
                        rf->rsrc_created = true;
                }
 
-               iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
-                                         IB_DEVICE_MEM_WINDOW |
-                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
-
                if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
                        irdma_alloc_set_mac(iwdev);
                irdma_add_ip(iwdev);
 
        u32 roce_ackcreds;
        u32 vendor_id;
        u32 vendor_part_id;
-       u32 device_cap_flags;
        u32 push_mode;
        u32 rcv_wnd;
        u16 mac_ip_table_idx;
 
                            iwdev->netdev->dev_addr);
        props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
                        irdma_fw_minor_ver(&rf->sc_dev);
-       props->device_cap_flags = iwdev->device_cap_flags;
+       props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
+                                 IB_DEVICE_MEM_MGT_EXTENSIONS;
+       props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
        props->vendor_id = pcidev->vendor;
        props->vendor_part_id = pcidev->device;
 
 
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
                IB_DEVICE_SYS_IMAGE_GUID                |
-               IB_DEVICE_RC_RNR_NAK_GEN                |
-               IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+               IB_DEVICE_RC_RNR_NAK_GEN;
+       props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
        if (dev->dev->caps.max_gso_sz &&
            (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
            (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
-               props->device_cap_flags |= IB_DEVICE_UD_TSO;
+               props->kernel_cap_flags |= IBK_UD_TSO;
        if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
-               props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
+               props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY;
        if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
            (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
            (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
 
                                           IB_DEVICE_MEM_WINDOW_TYPE_2B;
                props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
                /* We support 'Gappy' memory registration too */
-               props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+               props->kernel_cap_flags |= IBK_SG_GAPS_REG;
        }
        /* IB_WR_REG_MR always requires changing the entity size with UMR */
        if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
                props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
        if (MLX5_CAP_GEN(mdev, sho)) {
-               props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
+               props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
                /* At this stage no support for signature handover */
                props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
                                      IB_PROT_T10DIF_TYPE_2 |
                                       IB_GUARD_T10DIF_CSUM;
        }
        if (MLX5_CAP_GEN(mdev, block_lb_mc))
-               props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+               props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
 
        if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
                if (MLX5_CAP_ETH(mdev, csum_cap)) {
 
        if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-               props->device_cap_flags |= IB_DEVICE_UD_TSO;
+               props->kernel_cap_flags |= IBK_UD_TSO;
        }
 
        if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
 
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
                if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
-                       props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
+                       props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
                props->odp_caps = dev->odp_caps;
                if (!uhw) {
                        /* ODP for kernel QPs is not implemented for receive
                }
        }
 
-       if (MLX5_CAP_GEN(mdev, cd))
-               props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
-
        if (mlx5_core_is_vf(mdev))
-               props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+               props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
 
        if (mlx5_ib_port_link_layer(ibdev, 1) ==
            IB_LINK_LAYER_ETHERNET && raw_support) {
 
                                        IB_DEVICE_RC_RNR_NAK_GEN |
                                        IB_DEVICE_SHUTDOWN_PORT |
                                        IB_DEVICE_SYS_IMAGE_GUID |
-                                       IB_DEVICE_LOCAL_DMA_LKEY |
                                        IB_DEVICE_MEM_MGT_EXTENSIONS;
+       attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
        attr->max_send_sge = dev->attr.max_send_sge;
        attr->max_recv_sge = dev->attr.max_recv_sge;
        attr->max_sge_rd = dev->attr.max_rdma_sge;
 
        attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
        attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
            IB_DEVICE_RC_RNR_NAK_GEN |
-           IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+           IB_DEVICE_MEM_MGT_EXTENSIONS;
+       attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
 
        if (!rdma_protocol_iwarp(&dev->ibdev, 1))
                attr->device_cap_flags |= IB_DEVICE_XRC;
 
        props->max_qp = qp_per_vf *
                kref_read(&us_ibdev->vf_cnt);
        props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
-               IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+               IB_DEVICE_SYS_IMAGE_GUID;
+       props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK;
        props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
                kref_read(&us_ibdev->vf_cnt);
        props->max_pd = USNIC_UIOM_MAX_PD_CNT;
 
        rxe->attr.max_qp                        = RXE_MAX_QP;
        rxe->attr.max_qp_wr                     = RXE_MAX_QP_WR;
        rxe->attr.device_cap_flags              = RXE_DEVICE_CAP_FLAGS;
+       rxe->attr.kernel_cap_flags              = IBK_ALLOW_USER_UNREG;
        rxe->attr.max_send_sge                  = RXE_MAX_SGE;
        rxe->attr.max_recv_sge                  = RXE_MAX_SGE;
        rxe->attr.max_sge_rd                    = RXE_MAX_SGE_RD;
 
                                        | IB_DEVICE_RC_RNR_NAK_GEN
                                        | IB_DEVICE_SRQ_RESIZE
                                        | IB_DEVICE_MEM_MGT_EXTENSIONS
-                                       | IB_DEVICE_ALLOW_USER_UNREG
                                        | IB_DEVICE_MEM_WINDOW
                                        | IB_DEVICE_MEM_WINDOW_TYPE_2A
                                        | IB_DEVICE_MEM_WINDOW_TYPE_2B,
 
 
        /* Revisit atomic caps if RFC 7306 gets supported */
        attr->atomic_cap = 0;
-       attr->device_cap_flags =
-               IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG;
+       attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+       attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
        attr->max_cq = sdev->attrs.max_cq;
        attr->max_cqe = sdev->attrs.max_cqe;
        attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
 
        struct dentry *path_dentry;
 #endif
        u64     hca_caps;
+       u64     kernel_caps;
        struct ipoib_ethtool_st ethtool;
        unsigned int max_send_sge;
        const struct net_device_ops     *rn_ops;
 
 static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
 {
        priv->hca_caps = priv->ca->attrs.device_cap_flags;
+       priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
 
        if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
                priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
 
-               if (priv->hca_caps & IB_DEVICE_UD_TSO)
+               if (priv->kernel_caps & IBK_UD_TSO)
                        priv->dev->hw_features |= NETIF_F_TSO;
 
                priv->dev->features |= priv->dev->hw_features;
 
        priv->rn_ops = dev->netdev_ops;
 
-       if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
+       if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
                dev->netdev_ops = &ipoib_netdev_ops_vf;
        else
                dev->netdev_ops = &ipoib_netdev_ops_pf;
 
        init_attr.send_cq = priv->send_cq;
        init_attr.recv_cq = priv->recv_cq;
 
-       if (priv->hca_caps & IB_DEVICE_UD_TSO)
+       if (priv->kernel_caps & IBK_UD_TSO)
                init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
 
-       if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
+       if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK)
                init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
 
        if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
                init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
 
-       if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+       if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA)
                init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
 
        priv->qp = ib_create_qp(priv->pd, &init_attr);
 
                                                   SHOST_DIX_GUARD_CRC);
                }
 
-               if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+               if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
                        shost->virt_boundary_mask = SZ_4K - 1;
 
                if (iscsi_host_add(shost, ib_dev->dev.parent)) {
 
        if (!desc)
                return ERR_PTR(-ENOMEM);
 
-       if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+       if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
                mr_type = IB_MR_TYPE_SG_GAPS;
        else
                mr_type = IB_MR_TYPE_MEM_REG;
         * (head and tail) for a single page worth data, so one additional
         * entry is required.
         */
-       if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+       if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
                reserved_mr_pages = 0;
        else
                reserved_mr_pages = 1;
 
        /* connection T10-PI support */
        if (iser_pi_enable) {
-               if (!(device->ib_device->attrs.device_cap_flags &
-                     IB_DEVICE_INTEGRITY_HANDOVER)) {
+               if (!(device->ib_device->attrs.kernel_cap_flags &
+                     IBK_INTEGRITY_HANDOVER)) {
                        iser_warn("T10-PI requested but not supported on %s, "
                                  "continue without T10-PI\n",
                                  dev_name(&ib_conn->device->ib_device->dev));
 
        }
 
        /* Check signature cap */
-       if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER)
+       if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
                device->pi_capable = true;
        else
                device->pi_capable = false;
 
        spin_lock_init(&pool->lock);
        INIT_LIST_HEAD(&pool->free_list);
 
-       if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+       if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
                mr_type = IB_MR_TYPE_SG_GAPS;
        else
                mr_type = IB_MR_TYPE_MEM_REG;
        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
        target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
 
-       if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+       if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
                target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
 
        target = host_to_target(target_host);
        }
 
        if (srp_dev->use_fast_reg) {
-               bool gaps_reg = (ibdev->attrs.device_cap_flags &
-                                IB_DEVICE_SG_GAPS_REG);
+               bool gaps_reg = ibdev->attrs.kernel_cap_flags &
+                                IBK_SG_GAPS_REG;
 
                max_sectors_per_mr = srp_dev->max_pages_per_mr <<
                                  (ilog2(srp_dev->mr_page_size) - 9);
 
        ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
 
        /* T10-PI support */
-       if (ctrl->device->dev->attrs.device_cap_flags &
-           IB_DEVICE_INTEGRITY_HANDOVER)
+       if (ctrl->device->dev->attrs.kernel_cap_flags &
+           IBK_INTEGRITY_HANDOVER)
                pi_capable = true;
 
        ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
 
        ndev->inline_data_size = nport->inline_data_size;
        ndev->inline_page_count = inline_page_count;
 
-       if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
-                                 IB_DEVICE_INTEGRITY_HANDOVER)) {
+       if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
+                                 IBK_INTEGRITY_HANDOVER)) {
                pr_warn("T10-PI is not supported by device %s. Disabling it\n",
                        cm_id->device->name);
                nport->pi_enable = false;
 
                smbd_max_frmr_depth,
                info->id->device->attrs.max_fast_reg_page_list_len);
        info->mr_type = IB_MR_TYPE_MEM_REG;
-       if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+       if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
                info->mr_type = IB_MR_TYPE_SG_GAPS;
 
        info->pd = ib_alloc_pd(info->id->device, 0);
 
        IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
        IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
 
-       /*
-        * This device supports a per-device lkey or stag that can be
-        * used without performing a memory registration for the local
-        * memory.  Note that ULPs should never check this flag, but
-        * instead of use the local_dma_lkey flag in the ib_pd structure,
-        * which will always contain a usable lkey.
-        */
-       IB_DEVICE_LOCAL_DMA_LKEY = 1 << 15,
        /* Reserved, old SEND_W_INV = 1 << 16,*/
        IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
        /*
         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
         */
        IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
-       IB_DEVICE_UD_TSO = 1 << 19,
        IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
 
        /*
         * stag.
         */
        IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
-       IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = 1 << 22,
        IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
        IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
        IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
        IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
-       /*
-        * Devices should set IB_DEVICE_CROSS_CHANNEL if they
-        * support execution of WQEs that involve synchronization
-        * of I/O operations with single completion queue managed
-        * by hardware.
-        */
-       IB_DEVICE_CROSS_CHANNEL = 1 << 27,
        IB_DEVICE_MANAGED_FLOW_STEERING =
                IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
-       IB_DEVICE_INTEGRITY_HANDOVER = 1 << 30,
-       IB_DEVICE_ON_DEMAND_PAGING = 1ULL << 31,
-       IB_DEVICE_SG_GAPS_REG = 1ULL << 32,
-       IB_DEVICE_VIRTUAL_FUNCTION = 1ULL << 33,
        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
        IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
-       IB_DEVICE_RDMA_NETDEV_OPA = 1ULL << 35,
        /* The device supports padding incoming writes to cacheline. */
        IB_DEVICE_PCI_WRITE_END_PADDING =
                IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
-       IB_DEVICE_ALLOW_USER_UNREG = 1ULL << 37,
-};
-
-#define IB_UVERBS_DEVICE_CAP_FLAGS_MASK        (IB_UVERBS_DEVICE_RESIZE_MAX_WR | \
-               IB_UVERBS_DEVICE_BAD_PKEY_CNTR | \
-               IB_UVERBS_DEVICE_BAD_QKEY_CNTR | \
-               IB_UVERBS_DEVICE_RAW_MULTI | \
-               IB_UVERBS_DEVICE_AUTO_PATH_MIG | \
-               IB_UVERBS_DEVICE_CHANGE_PHY_PORT | \
-               IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE | \
-               IB_UVERBS_DEVICE_CURR_QP_STATE_MOD | \
-               IB_UVERBS_DEVICE_SHUTDOWN_PORT | \
-               IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT | \
-               IB_UVERBS_DEVICE_SYS_IMAGE_GUID | \
-               IB_UVERBS_DEVICE_RC_RNR_NAK_GEN | \
-               IB_UVERBS_DEVICE_SRQ_RESIZE | \
-               IB_UVERBS_DEVICE_N_NOTIFY_CQ | \
-               IB_UVERBS_DEVICE_MEM_WINDOW | \
-               IB_UVERBS_DEVICE_UD_IP_CSUM | \
-               IB_UVERBS_DEVICE_XRC | \
-               IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS | \
-               IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A | \
-               IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B | \
-               IB_UVERBS_DEVICE_RC_IP_CSUM | \
-               IB_UVERBS_DEVICE_RAW_IP_CSUM | \
-               IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING | \
-               IB_UVERBS_DEVICE_RAW_SCATTER_FCS | \
-               IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING)
+};
+
+enum ib_kernel_cap_flags {
+       /*
+        * This device supports a per-device lkey or stag that can be
+        * used without performing a memory registration for the local
+        * memory.  Note that ULPs should never check this flag, but
+        * instead of use the local_dma_lkey flag in the ib_pd structure,
+        * which will always contain a usable lkey.
+        */
+       IBK_LOCAL_DMA_LKEY = 1 << 0,
+       /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
+       IBK_INTEGRITY_HANDOVER = 1 << 1,
+       /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
+       IBK_ON_DEMAND_PAGING = 1 << 2,
+       /* IB_MR_TYPE_SG_GAPS is supported */
+       IBK_SG_GAPS_REG = 1 << 3,
+       /* Driver supports RDMA_NLDEV_CMD_DELLINK */
+       IBK_ALLOW_USER_UNREG = 1 << 4,
+
+       /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
+       IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
+       /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
+       IBK_UD_TSO = 1 << 6,
+       /* iopib will use the device ops:
+        *   get_vf_config
+        *   get_vf_guid
+        *   get_vf_stats
+        *   set_vf_guid
+        *   set_vf_link_state
+        */
+       IBK_VIRTUAL_FUNCTION = 1 << 7,
+       /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
+       IBK_RDMA_NETDEV_OPA = 1 << 8,
+};
 
 enum ib_atomic_cap {
        IB_ATOMIC_NONE,
        int                     max_qp;
        int                     max_qp_wr;
        u64                     device_cap_flags;
+       u64                     kernel_cap_flags;
        int                     max_send_sge;
        int                     max_recv_sge;
        int                     max_sge_rd;
                return -EINVAL;
 
        if (flags & IB_ACCESS_ON_DEMAND &&
-           !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+           !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
                return -EINVAL;
        return 0;
 }
 
 
 static inline bool rdma_cap_opa_vnic(struct ib_device *device)
 {
-       return !!(device->attrs.device_cap_flags &
-                 IB_DEVICE_RDMA_NETDEV_OPA);
+       return !!(device->attrs.kernel_cap_flags & IBK_RDMA_NETDEV_OPA);
 }
 
 #endif /* _OPA_VNIC_H */
 
 
 #define IB_DEVICE_NAME_MAX 64
 
+/*
+ * bits 9, 15, 16, 19, 22, 27, 30, 31, 32, 33, 35 and 37 may be set by old
+ * kernels and should not be used.
+ */
 enum ib_uverbs_device_cap_flags {
        IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0,
        IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1,
 
        rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
 
        rds_ibdev->odp_capable =
-               !!(device->attrs.device_cap_flags &
-                  IB_DEVICE_ON_DEMAND_PAGING) &&
+               !!(device->attrs.kernel_cap_flags &
+                  IBK_ON_DEMAND_PAGING) &&
                !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
                   IB_ODP_SUPPORT_WRITE) &&
                !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
 
        ep->re_attr.cap.max_recv_sge = 1;
 
        ep->re_mrtype = IB_MR_TYPE_MEM_REG;
-       if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+       if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG)
                ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
 
        /* Quirk: Some devices advertise a large max_fast_reg_page_list_len