r->size = 0;
 }
 
-static __maybe_unused int irq_setup(unsigned int *irqs, unsigned int len, int node)
+static int irq_setup(unsigned int *irqs, unsigned int len, int node)
 {
        const struct cpumask *next, *prev = cpu_none_mask;
        cpumask_var_t cpus __free(free_cpumask_var);
 
 static int mana_gd_setup_irqs(struct pci_dev *pdev)
 {
-       unsigned int max_queues_per_port = num_online_cpus();
        struct gdma_context *gc = pci_get_drvdata(pdev);
+       unsigned int max_queues_per_port;
        struct gdma_irq_context *gic;
        unsigned int max_irqs, cpu;
-       int nvec, irq;
+       int start_irq_index = 1;
+       int nvec, *irqs, irq;
        int err, i = 0, j;
 
+       cpus_read_lock();
+       max_queues_per_port = num_online_cpus();
        if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
                max_queues_per_port = MANA_MAX_NUM_QUEUES;
 
        max_irqs = max_queues_per_port + 1;
 
        nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
-       if (nvec < 0)
+       if (nvec < 0) {
+               cpus_read_unlock();
                return nvec;
+       }
+       if (nvec <= num_online_cpus())
+               start_irq_index = 0;
+
+       irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
+       if (!irqs) {
+               err = -ENOMEM;
+               goto free_irq_vector;
+       }
 
        gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
                                   GFP_KERNEL);
                        goto free_irq;
                }
 
-               err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
-               if (err)
-                       goto free_irq;
-
-               cpu = cpumask_local_spread(i, gc->numa_node);
-               irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+               if (!i) {
+                       err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+                       if (err)
+                               goto free_irq;
+
+                       /* If number of IRQ is one extra than number of online CPUs,
+                        * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+                        * same CPU.
+                        * Else we will use different CPUs for IRQ0 and IRQ1.
+                        * Also we are using cpumask_local_spread instead of
+                        * cpumask_first for the node, because the node can be
+                        * mem only.
+                        */
+                       if (start_irq_index) {
+                               cpu = cpumask_local_spread(i, gc->numa_node);
+                               irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+                       } else {
+                               irqs[start_irq_index] = irq;
+                       }
+               } else {
+                       irqs[i - start_irq_index] = irq;
+                       err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
+                                         gic->name, gic);
+                       if (err)
+                               goto free_irq;
+               }
        }
 
+       err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
+       if (err)
+               goto free_irq;
+
        gc->max_num_msix = nvec;
        gc->num_msix_usable = nvec;
-
+       cpus_read_unlock();
        return 0;
 
 free_irq:
        }
 
        kfree(gc->irq_contexts);
+       kfree(irqs);
        gc->irq_contexts = NULL;
 free_irq_vector:
+       cpus_read_unlock();
        pci_free_irq_vectors(pdev);
        return err;
 }