ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
                                   NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
                                   locked);
+       /* Also set the interrupt number for the NAPI */
+       netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
 }
 
 /**
 
        /* control-path-only fields follow */
        struct list_head        dev_list;
        struct hlist_node       napi_hash_node;
+       int                     irq;
 };
 
 enum {
                          enum netdev_queue_type type,
                          struct napi_struct *napi);
 
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+       napi->irq = irq;
+}
+
 /* Default NAPI poll() weight
  * Device drivers are strongly advised to not use bigger value
  */
 
         */
        if (dev->threaded && napi_kthread_create(napi))
                dev->threaded = 0;
+       netif_napi_set_irq(napi, -1);
 }
 EXPORT_SYMBOL(netif_napi_add_weight);
 
 
        if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
                goto nla_put_failure;
 
+       if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
+               goto nla_put_failure;
+
        genlmsg_end(rsp, hdr);
+
        return 0;
 
 nla_put_failure: