ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 }
 
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes.  Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 {
-       u32 var = 0;
-       unsigned long hw_flags = 0;
-       struct intr_context *ctx = qdev->intr_context + intr;
-
-       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
-               /* Always enable if we're MSIX multi interrupts and
-                * it's not the default (zeroeth) interrupt.
-                */
-               ql_write32(qdev, INTR_EN,
-                          ctx->intr_en_mask);
-               var = ql_read32(qdev, STS);
-               return var;
-       }
+       struct intr_context *ctx = &qdev->intr_context[intr];
 
-       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-       if (atomic_dec_and_test(&ctx->irq_cnt)) {
-               ql_write32(qdev, INTR_EN,
-                          ctx->intr_en_mask);
-               var = ql_read32(qdev, STS);
-       }
-       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
-       return var;
+       ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
 }
 
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 {
-       u32 var = 0;
-       struct intr_context *ctx;
+       struct intr_context *ctx = &qdev->intr_context[intr];
 
-       /* HW disables for us if we're MSIX multi interrupts and
-        * it's not the default (zeroeth) interrupt.
-        */
-       if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
-               return 0;
-
-       ctx = qdev->intr_context + intr;
-       spin_lock(&qdev->hw_lock);
-       if (!atomic_read(&ctx->irq_cnt)) {
-               ql_write32(qdev, INTR_EN,
-               ctx->intr_dis_mask);
-               var = ql_read32(qdev, STS);
-       }
-       atomic_inc(&ctx->irq_cnt);
-       spin_unlock(&qdev->hw_lock);
-       return var;
+       ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
 }
 
 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 {
        int i;
-       for (i = 0; i < qdev->intr_count; i++) {
-               /* The enable call does a atomic_dec_and_test
-                * and enables only if the result is zero.
-                * So we precharge it here.
-                */
-               if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
-                       i == 0))
-                       atomic_set(&qdev->intr_context[i].irq_cnt, 1);
-               ql_enable_completion_interrupt(qdev, i);
-       }
 
+       for (i = 0; i < qdev->intr_count; i++)
+               ql_enable_completion_interrupt(qdev, i);
 }
 
 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
        u32 var;
        int work_done = 0;
 
-       spin_lock(&qdev->hw_lock);
-       if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
-               netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
-                            "Shared Interrupt, Not ours!\n");
-               spin_unlock(&qdev->hw_lock);
-               return IRQ_NONE;
-       }
-       spin_unlock(&qdev->hw_lock);
+       /* Experience shows that when using INTx interrupts, interrupts must
+        * be masked manually.
+        * When using MSI mode, INTR_EN_EN must be explicitly disabled
+        * (even though it is auto-masked), otherwise a later command to
+        * enable it is not effective.
+        */
+       if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+               ql_disable_completion_interrupt(qdev, 0);
 
-       var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+       var = ql_read32(qdev, STS);
 
        /*
         * Check for fatal error.
         */
        if (var & STS_FE) {
+               ql_disable_completion_interrupt(qdev, 0);
                ql_queue_asic_error(qdev);
                netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
                var = ql_read32(qdev, ERR_STS);
                 */
                netif_err(qdev, intr, qdev->ndev,
                          "Got MPI processor interrupt.\n");
-               ql_disable_completion_interrupt(qdev, intr_context->intr);
                ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
                queue_delayed_work_on(smp_processor_id(),
                                qdev->workqueue, &qdev->mpi_work, 0);
        if (var & intr_context->irq_mask) {
                netif_info(qdev, intr, qdev->ndev,
                           "Waking handler for rx_ring[0].\n");
-               ql_disable_completion_interrupt(qdev, intr_context->intr);
                napi_schedule(&rx_ring->napi);
                work_done++;
+       } else {
+               /* Experience shows that the device sometimes signals an
+                * interrupt but no work is scheduled from this function.
+                * Nevertheless, the interrupt is auto-masked. Therefore, we
+                * systematically re-enable the interrupt if we didn't
+                * schedule napi.
+                */
+               ql_enable_completion_interrupt(qdev, 0);
        }
-       ql_enable_completion_interrupt(qdev, intr_context->intr);
+
        return work_done ? IRQ_HANDLED : IRQ_NONE;
 }
 
        ql_resolve_queues_to_irqs(qdev);
 
        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
-               atomic_set(&intr_context->irq_cnt, 0);
                if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
                        status = request_irq(qdev->msi_x_entry[i].vector,
                                             intr_context->handler,
                goto err_out2;
        }
        qdev->msg_enable = netif_msg_init(debug, default_msg);
-       spin_lock_init(&qdev->hw_lock);
        spin_lock_init(&qdev->stats_lock);
 
        if (qlge_mpi_coredump) {