ap->qc_active = preempted_qc_active;
        ap->nr_active_links = preempted_nr_active_links;
 
-       /* XXX - Some LLDDs (sata_mv) disable port on command failure.
-        * Until those drivers are fixed, we detect the condition
-        * here, fail the command with AC_ERR_SYSTEM and reenable the
-        * port.
-        *
-        * Note that this doesn't change any behavior as internal
-        * command failure results in disabling the device in the
-        * higher layer for LLDDs without new reset/EH callbacks.
-        *
-        * Kill the following code as soon as those drivers are fixed.
-        */
-       if (ap->flags & ATA_FLAG_DISABLED) {
-               err_mask |= AC_ERR_SYSTEM;
-               ata_port_probe(ap);
-       }
-
        spin_unlock_irqrestore(ap->lock, flags);
 
        if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
        int rc;
        struct ata_device *dev;
 
-       ata_port_probe(ap);
-
        ata_for_each_dev(dev, &ap->link, ALL)
                tries[dev->devno] = ATA_PROBE_MAX_TRIES;
 
        ap->ops->phy_reset(ap);
 
        ata_for_each_dev(dev, &ap->link, ALL) {
-               if (!(ap->flags & ATA_FLAG_DISABLED) &&
-                   dev->class != ATA_DEV_UNKNOWN)
+               if (dev->class != ATA_DEV_UNKNOWN)
                        classes[dev->devno] = dev->class;
                else
                        classes[dev->devno] = ATA_DEV_NONE;
                dev->class = ATA_DEV_UNKNOWN;
        }
 
-       ata_port_probe(ap);
-
        /* read IDENTIFY page and configure devices. We have to do the identify
           specific sequence bass-ackwards so that PDIAG- is released by
           the slave device */
        ata_for_each_dev(dev, &ap->link, ENABLED)
                return 0;
 
-       /* no device present, disable port */
-       ata_port_disable(ap);
        return -ENODEV;
 
  fail:
        goto retry;
 }
 
-/**
- *     ata_port_probe - Mark port as enabled
- *     @ap: Port for which we indicate enablement
- *
- *     Modify @ap data structure such that the system
- *     thinks that the entire port is enabled.
- *
- *     LOCKING: host lock, or some other form of
- *     serialization.
- */
-
-void ata_port_probe(struct ata_port *ap)
-{
-       ap->flags &= ~ATA_FLAG_DISABLED;
-}
-
 /**
  *     sata_print_link_status - Print SATA link status
  *     @link: SATA link to printk link status about
        return pair;
 }
 
-/**
- *     ata_port_disable - Disable port.
- *     @ap: Port to be disabled.
- *
- *     Modify @ap data structure such that the system
- *     thinks that the entire port is disabled, and should
- *     never attempt to probe or communicate with devices
- *     on this port.
- *
- *     LOCKING: host lock, or some other form of
- *     serialization.
- */
-
-void ata_port_disable(struct ata_port *ap)
-{
-       ap->link.device[0].class = ATA_DEV_NONE;
-       ap->link.device[1].class = ATA_DEV_NONE;
-       ap->flags |= ATA_FLAG_DISABLED;
-}
-
 /**
  *     sata_down_spd_limit - adjust SATA spd limit downward
  *     @link: Link to adjust SATA spd limit for
 
        ap->pflags |= ATA_PFLAG_INITIALIZING;
        ap->lock = &host->lock;
-       ap->flags = ATA_FLAG_DISABLED;
        ap->print_id = -1;
        ap->ctl = ATA_DEVCTL_OBS;
        ap->host = host;
                struct ata_eh_info *ehi = &ap->link.eh_info;
                unsigned long flags;
 
-               ata_port_probe(ap);
-
                /* kick EH for boot probing */
                spin_lock_irqsave(ap->lock, flags);
 
 EXPORT_SYMBOL_GPL(ata_do_set_mode);
 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_port_probe);
 EXPORT_SYMBOL_GPL(ata_dev_disable);
 EXPORT_SYMBOL_GPL(sata_set_spd);
 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 EXPORT_SYMBOL_GPL(ata_std_postreset);
 EXPORT_SYMBOL_GPL(ata_dev_classify);
 EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_wait_register);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 
        struct ata_link *link;
        struct ata_device *dev;
 
-       if (ap->flags & ATA_FLAG_DISABLED)
-               return;
-
  repeat:
        ata_for_each_link(link, ap, EDGE) {
                ata_for_each_dev(dev, link, ENABLED) {
 
                struct ata_port *ap = host->ports[i];
                struct ata_queued_cmd *qc;
 
-               if (unlikely(ap->flags & ATA_FLAG_DISABLED))
-                       continue;
-
                qc = ata_qc_from_tag(ap, ap->link.active_tag);
                if (qc) {
                        if (!(qc->tf.flags & ATA_TFLAG_POLLING))
 
        /* Only one outstanding command per SFF channel */
        qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       /* Check we have a live one.. */
-       if (qc == NULL ||  !(qc->flags & ATA_QCFLAG_ACTIVE))
-               return;
-       /* We cannot lose an interrupt on a polled command */
-       if (qc->tf.flags & ATA_TFLAG_POLLING)
+       /* We cannot lose an interrupt on a non-existent or polled command */
+       if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
                return;
        /* See if the controller thinks it is still busy - if so the command
           isn't a lost IRQ but is still in progress */
 
        spin_lock_irqsave(&host->lock, flags);
 
        for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap;
+               struct ata_port *ap = host->ports[i];
+               struct ata_queued_cmd *qc;
 
-               ap = host->ports[i];
-               if (ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
-                       struct ata_queued_cmd *qc;
-
-                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-                       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
-                           (qc->flags & ATA_QCFLAG_ACTIVE))
-                               handled |= bfin_ata_host_intr(ap, qc);
-               }
+               qc = ata_qc_from_tag(ap, ap->link.active_tag);
+               if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+                       handled |= bfin_ata_host_intr(ap, qc);
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
 
                ap = host->ports[i];
                ocd = ap->dev->platform_data;
 
-               if (ap->flags & ATA_FLAG_DISABLED)
-                       continue;
-
                ocd = ap->dev->platform_data;
                cf_port = ap->private_data;
                dma_int.u64 =
 
                qc = ata_qc_from_tag(ap, ap->link.active_tag);
 
-               if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
-                   (qc->flags & ATA_QCFLAG_ACTIVE)) {
+               if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
                        if (dma_int.s.done && !dma_cfg.s.en) {
                                if (!sg_is_last(qc->cursg)) {
                                        qc->cursg = sg_next(qc->cursg);
                goto out;
        }
        qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
-           (qc->flags & ATA_QCFLAG_ACTIVE))
+       if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
                octeon_cf_dma_finished(ap, qc);
 out:
        spin_unlock_irqrestore(&host->lock, flags);
 
                        continue;
                handled = 1;
                adma_enter_reg_mode(ap);
-               if (ap->flags & ATA_FLAG_DISABLED)
-                       continue;
                pp = ap->private_data;
                if (!pp || pp->state != adma_state_pkt)
                        continue;
        unsigned int handled = 0, port_no;
 
        for (port_no = 0; port_no < host->n_ports; ++port_no) {
-               struct ata_port *ap;
-               ap = host->ports[port_no];
-               if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
-                       struct ata_queued_cmd *qc;
-                       struct adma_port_priv *pp = ap->private_data;
-                       if (!pp || pp->state != adma_state_mmio)
+               struct ata_port *ap = host->ports[port_no];
+               struct adma_port_priv *pp = ap->private_data;
+               struct ata_queued_cmd *qc;
+
+               if (!pp || pp->state != adma_state_mmio)
+                       continue;
+               qc = ata_qc_from_tag(ap, ap->link.active_tag);
+               if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+
+                       /* check main status, clearing INTRQ */
+                       u8 status = ata_sff_check_status(ap);
+                       if ((status & ATA_BUSY))
                                continue;
-                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-                       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-
-                               /* check main status, clearing INTRQ */
-                               u8 status = ata_sff_check_status(ap);
-                               if ((status & ATA_BUSY))
-                                       continue;
-                               DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
-                                       ap->print_id, qc->tf.protocol, status);
-
-                               /* complete taskfile transaction */
-                               pp->state = adma_state_idle;
-                               qc->err_mask |= ac_err_mask(status);
-                               if (!qc->err_mask)
-                                       ata_qc_complete(qc);
-                               else {
-                                       struct ata_eh_info *ehi =
-                                               &ap->link.eh_info;
-                                       ata_ehi_clear_desc(ehi);
-                                       ata_ehi_push_desc(ehi,
-                                               "status 0x%02X", status);
-
-                                       if (qc->err_mask == AC_ERR_DEV)
-                                               ata_port_abort(ap);
-                                       else
-                                               ata_port_freeze(ap);
-                               }
-                               handled = 1;
+                       DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+                               ap->print_id, qc->tf.protocol, status);
+
+                       /* complete taskfile transaction */
+                       pp->state = adma_state_idle;
+                       qc->err_mask |= ac_err_mask(status);
+                       if (!qc->err_mask)
+                               ata_qc_complete(qc);
+                       else {
+                               struct ata_eh_info *ehi = &ap->link.eh_info;
+                               ata_ehi_clear_desc(ehi);
+                               ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+                               if (qc->err_mask == AC_ERR_DEV)
+                                       ata_port_abort(ap);
+                               else
+                                       ata_port_freeze(ap);
                        }
+                       handled = 1;
                }
        }
        return handled;
 
 
        spin_lock(&host->lock);
 
-       for (i = 0; i < NR_PORTS; i++) {
-               struct ata_port *ap = host->ports[i];
-
-               if (!(host_irq_stat & (HIRQ_PORT0 << i)))
-                       continue;
-
-               if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
-                       inic_host_intr(ap);
+       for (i = 0; i < NR_PORTS; i++)
+               if (host_irq_stat & (HIRQ_PORT0 << i)) {
+                       inic_host_intr(host->ports[i]);
                        handled++;
-               } else {
-                       if (ata_ratelimit())
-                               dev_printk(KERN_ERR, host->dev, "interrupt "
-                                          "from disabled port %d (0x%x)\n",
-                                          i, host_irq_stat);
                }
-       }
 
        spin_unlock(&host->lock);
 
 
        if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
                return NULL;
        qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (qc) {
-               if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       qc = NULL;
-               else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
-                       qc = NULL;
-       }
-       return qc;
+       if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+               return qc;
+       return NULL;
 }
 
 static void mv_pmp_error_handler(struct ata_port *ap)
        char *when = "idle";
 
        ata_ehi_clear_desc(ehi);
-       if (ap->flags & ATA_FLAG_DISABLED) {
-               when = "disabled";
-       } else if (edma_was_enabled) {
+       if (edma_was_enabled) {
                when = "EDMA enabled";
        } else {
                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
        struct mv_port_priv *pp;
        int edma_was_enabled;
 
-       if (ap->flags & ATA_FLAG_DISABLED) {
-               mv_unexpected_intr(ap, 0);
-               return;
-       }
        /*
         * Grab a snapshot of the EDMA_EN flag setting,
         * so that we have a consistent view for this port,
 
 
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
+               struct nv_adma_port_priv *pp = ap->private_data;
+               void __iomem *mmio = pp->ctl_block;
+               u16 status;
+               u32 gen_ctl;
+               u32 notifier, notifier_error;
+
                notifier_clears[i] = 0;
 
-               if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-                       struct nv_adma_port_priv *pp = ap->private_data;
-                       void __iomem *mmio = pp->ctl_block;
-                       u16 status;
-                       u32 gen_ctl;
-                       u32 notifier, notifier_error;
-
-                       /* if ADMA is disabled, use standard ata interrupt handler */
-                       if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
-                               u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
-                                       >> (NV_INT_PORT_SHIFT * i);
-                               handled += nv_host_intr(ap, irq_stat);
-                               continue;
-                       }
+               /* if ADMA is disabled, use standard ata interrupt handler */
+               if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+                       u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+                               >> (NV_INT_PORT_SHIFT * i);
+                       handled += nv_host_intr(ap, irq_stat);
+                       continue;
+               }
 
-                       /* if in ATA register mode, check for standard interrupts */
-                       if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
-                               u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
-                                       >> (NV_INT_PORT_SHIFT * i);
-                               if (ata_tag_valid(ap->link.active_tag))
-                                       /** NV_INT_DEV indication seems unreliable at times
-                                           at least in ADMA mode. Force it on always when a
-                                           command is active, to prevent losing interrupts. */
-                                       irq_stat |= NV_INT_DEV;
-                               handled += nv_host_intr(ap, irq_stat);
-                       }
+               /* if in ATA register mode, check for standard interrupts */
+               if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+                       u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+                               >> (NV_INT_PORT_SHIFT * i);
+                       if (ata_tag_valid(ap->link.active_tag))
+                               /** NV_INT_DEV indication seems unreliable
+                                   at times at least in ADMA mode. Force it
+                                   on always when a command is active, to
+                                   prevent losing interrupts. */
+                               irq_stat |= NV_INT_DEV;
+                       handled += nv_host_intr(ap, irq_stat);
+               }
+
+               notifier = readl(mmio + NV_ADMA_NOTIFIER);
+               notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+               notifier_clears[i] = notifier | notifier_error;
+
+               gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+               if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+                   !notifier_error)
+                       /* Nothing to do */
+                       continue;
+
+               status = readw(mmio + NV_ADMA_STAT);
+
+               /*
+                * Clear status. Ensure the controller sees the
+                * clearing before we start looking at any of the CPB
+                * statuses, so that any CPB completions after this
+                * point in the handler will raise another interrupt.
+                */
+               writew(status, mmio + NV_ADMA_STAT);
+               readw(mmio + NV_ADMA_STAT); /* flush posted write */
+               rmb();
+
+               handled++; /* irq handled if we got here */
 
-                       notifier = readl(mmio + NV_ADMA_NOTIFIER);
-                       notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
-                       notifier_clears[i] = notifier | notifier_error;
-
-                       gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
-
-                       if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
-                           !notifier_error)
-                               /* Nothing to do */
-                               continue;
-
-                       status = readw(mmio + NV_ADMA_STAT);
-
-                       /* Clear status. Ensure the controller sees the clearing before we start
-                          looking at any of the CPB statuses, so that any CPB completions after
-                          this point in the handler will raise another interrupt. */
-                       writew(status, mmio + NV_ADMA_STAT);
-                       readw(mmio + NV_ADMA_STAT); /* flush posted write */
-                       rmb();
-
-                       handled++; /* irq handled if we got here */
-
-                       /* freeze if hotplugged or controller error */
-                       if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
-                                              NV_ADMA_STAT_HOTUNPLUG |
-                                              NV_ADMA_STAT_TIMEOUT |
-                                              NV_ADMA_STAT_SERROR))) {
-                               struct ata_eh_info *ehi = &ap->link.eh_info;
-
-                               ata_ehi_clear_desc(ehi);
-                               __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
-                               if (status & NV_ADMA_STAT_TIMEOUT) {
-                                       ehi->err_mask |= AC_ERR_SYSTEM;
-                                       ata_ehi_push_desc(ehi, "timeout");
-                               } else if (status & NV_ADMA_STAT_HOTPLUG) {
-                                       ata_ehi_hotplugged(ehi);
-                                       ata_ehi_push_desc(ehi, "hotplug");
-                               } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
-                                       ata_ehi_hotplugged(ehi);
-                                       ata_ehi_push_desc(ehi, "hot unplug");
-                               } else if (status & NV_ADMA_STAT_SERROR) {
-                                       /* let libata analyze SError and figure out the cause */
-                                       ata_ehi_push_desc(ehi, "SError");
-                               } else
-                                       ata_ehi_push_desc(ehi, "unknown");
-                               ata_port_freeze(ap);
-                               continue;
+               /* freeze if hotplugged or controller error */
+               if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+                                      NV_ADMA_STAT_HOTUNPLUG |
+                                      NV_ADMA_STAT_TIMEOUT |
+                                      NV_ADMA_STAT_SERROR))) {
+                       struct ata_eh_info *ehi = &ap->link.eh_info;
+
+                       ata_ehi_clear_desc(ehi);
+                       __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
+                       if (status & NV_ADMA_STAT_TIMEOUT) {
+                               ehi->err_mask |= AC_ERR_SYSTEM;
+                               ata_ehi_push_desc(ehi, "timeout");
+                       } else if (status & NV_ADMA_STAT_HOTPLUG) {
+                               ata_ehi_hotplugged(ehi);
+                               ata_ehi_push_desc(ehi, "hotplug");
+                       } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+                               ata_ehi_hotplugged(ehi);
+                               ata_ehi_push_desc(ehi, "hot unplug");
+                       } else if (status & NV_ADMA_STAT_SERROR) {
+                               /* let EH analyze SError and figure out cause */
+                               ata_ehi_push_desc(ehi, "SError");
+                       } else
+                               ata_ehi_push_desc(ehi, "unknown");
+                       ata_port_freeze(ap);
+                       continue;
+               }
+
+               if (status & (NV_ADMA_STAT_DONE |
+                             NV_ADMA_STAT_CPBERR |
+                             NV_ADMA_STAT_CMD_COMPLETE)) {
+                       u32 check_commands = notifier_clears[i];
+                       int pos, error = 0;
+
+                       if (status & NV_ADMA_STAT_CPBERR) {
+                               /* check all active commands */
+                               if (ata_tag_valid(ap->link.active_tag))
+                                       check_commands = 1 <<
+                                               ap->link.active_tag;
+                               else
+                                       check_commands = ap->link.sactive;
                        }
 
-                       if (status & (NV_ADMA_STAT_DONE |
-                                     NV_ADMA_STAT_CPBERR |
-                                     NV_ADMA_STAT_CMD_COMPLETE)) {
-                               u32 check_commands = notifier_clears[i];
-                               int pos, error = 0;
-
-                               if (status & NV_ADMA_STAT_CPBERR) {
-                                       /* Check all active commands */
-                                       if (ata_tag_valid(ap->link.active_tag))
-                                               check_commands = 1 <<
-                                                       ap->link.active_tag;
-                                       else
-                                               check_commands = ap->
-                                                       link.sactive;
-                               }
-
-                               /** Check CPBs for completed commands */
-                               while ((pos = ffs(check_commands)) && !error) {
-                                       pos--;
-                                       error = nv_adma_check_cpb(ap, pos,
+                       /* check CPBs for completed commands */
+                       while ((pos = ffs(check_commands)) && !error) {
+                               pos--;
+                               error = nv_adma_check_cpb(ap, pos,
                                                notifier_error & (1 << pos));
-                                       check_commands &= ~(1 << pos);
-                               }
+                               check_commands &= ~(1 << pos);
                        }
                }
        }
        spin_lock_irqsave(&host->lock, flags);
 
        for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap;
-
-               ap = host->ports[i];
-               if (ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
-                       struct ata_queued_cmd *qc;
+               struct ata_port *ap = host->ports[i];
+               struct ata_queued_cmd *qc;
 
-                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-                       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
-                               handled += ata_sff_host_intr(ap, qc);
-                       else
-                               // No request pending?  Clear interrupt status
-                               // anyway, in case there's one pending.
-                               ap->ops->sff_check_status(ap);
+               qc = ata_qc_from_tag(ap, ap->link.active_tag);
+               if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+                       handled += ata_sff_host_intr(ap, qc);
+               } else {
+                       /*
+                        * No request pending?  Clear interrupt status
+                        * anyway, in case there's one pending.
+                        */
+                       ap->ops->sff_check_status(ap);
                }
-
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
        int i, handled = 0;
 
        for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap = host->ports[i];
-
-               if (ap && !(ap->flags & ATA_FLAG_DISABLED))
-                       handled += nv_host_intr(ap, irq_stat);
-
+               handled += nv_host_intr(host->ports[i], irq_stat);
                irq_stat >>= NV_INT_PORT_SHIFT;
        }
 
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
 
-               if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-                       if (ap->link.sactive) {
-                               nv_swncq_host_interrupt(ap, (u16)irq_stat);
-                               handled = 1;
-                       } else {
-                               if (irq_stat)   /* reserve Hotplug */
-                                       nv_swncq_irq_clear(ap, 0xfff0);
+               if (ap->link.sactive) {
+                       nv_swncq_host_interrupt(ap, (u16)irq_stat);
+                       handled = 1;
+               } else {
+                       if (irq_stat)   /* reserve Hotplug */
+                               nv_swncq_irq_clear(ap, 0xfff0);
 
-                               handled += nv_host_intr(ap, (u8)irq_stat);
-                       }
+                       handled += nv_host_intr(ap, (u8)irq_stat);
                }
                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
        }
 
                /* check for a plug or unplug event */
                ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
                tmp = hotplug_status & (0x11 << ata_no);
-               if (tmp && ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
+               if (tmp) {
                        struct ata_eh_info *ehi = &ap->link.eh_info;
                        ata_ehi_clear_desc(ehi);
                        ata_ehi_hotplugged(ehi);
 
                /* check for a packet interrupt */
                tmp = mask & (1 << (i + 1));
-               if (tmp && ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
+               if (tmp) {
                        struct ata_queued_cmd *qc;
 
                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
 
                        u8 sHST = sff1 & 0x3f;  /* host status */
                        unsigned int port_no = (sff1 >> 8) & 0x03;
                        struct ata_port *ap = host->ports[port_no];
+                       struct qs_port_priv *pp = ap->private_data;
+                       struct ata_queued_cmd *qc;
 
                        DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
                                        sff1, sff0, port_no, sHST, sDST);
                        handled = 1;
-                       if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-                               struct ata_queued_cmd *qc;
-                               struct qs_port_priv *pp = ap->private_data;
-                               if (!pp || pp->state != qs_state_pkt)
-                                       continue;
-                               qc = ata_qc_from_tag(ap, ap->link.active_tag);
-                               if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-                                       switch (sHST) {
-                                       case 0: /* successful CPB */
-                                       case 3: /* device error */
-                                               qs_enter_reg_mode(qc->ap);
-                                               qs_do_or_die(qc, sDST);
-                                               break;
-                                       default:
-                                               break;
-                                       }
+                       if (!pp || pp->state != qs_state_pkt)
+                               continue;
+                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
+                       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+                               switch (sHST) {
+                               case 0: /* successful CPB */
+                               case 3: /* device error */
+                                       qs_enter_reg_mode(qc->ap);
+                                       qs_do_or_die(qc, sDST);
+                                       break;
+                               default:
+                                       break;
                                }
                        }
                }
        unsigned int handled = 0, port_no;
 
        for (port_no = 0; port_no < host->n_ports; ++port_no) {
-               struct ata_port *ap;
-               ap = host->ports[port_no];
-               if (ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
-                       struct ata_queued_cmd *qc;
-                       struct qs_port_priv *pp;
-                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-                       if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
-                               /*
-                                * The qstor hardware generates spurious
-                                * interrupts from time to time when switching
-                                * in and out of packet mode.
-                                * There's no obvious way to know if we're
-                                * here now due to that, so just ack the irq
-                                * and pretend we knew it was ours.. (ugh).
-                                * This does not affect packet mode.
-                                */
-                               ata_sff_check_status(ap);
-                               handled = 1;
-                               continue;
-                       }
-                       pp = ap->private_data;
-                       if (!pp || pp->state != qs_state_mmio)
-                               continue;
-                       if (!(qc->tf.flags & ATA_TFLAG_POLLING))
-                               handled |= ata_sff_host_intr(ap, qc);
+               struct ata_port *ap = host->ports[port_no];
+               struct qs_port_priv *pp = ap->private_data;
+               struct ata_queued_cmd *qc;
+
+               qc = ata_qc_from_tag(ap, ap->link.active_tag);
+               if (!qc) {
+                       /*
+                        * The qstor hardware generates spurious
+                        * interrupts from time to time when switching
+                        * in and out of packet mode.  There's no
+                        * obvious way to know if we're here now due
+                        * to that, so just ack the irq and pretend we
+                        * knew it was ours.. (ugh).  This does not
+                        * affect packet mode.
+                        */
+                       ata_sff_check_status(ap);
+                       handled = 1;
+                       continue;
                }
+
+               if (!pp || pp->state != qs_state_mmio)
+                       continue;
+               if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+                       handled |= ata_sff_host_intr(ap, qc);
        }
        return handled;
 }
 
                struct ata_port *ap = host->ports[i];
                u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 
-               if (unlikely(ap->flags & ATA_FLAG_DISABLED))
-                       continue;
-
                /* turn off SATA_IRQ if not supported */
                if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
                        bmdma2 &= ~SIL_DMA_SATA_IRQ;
 
 
        for (i = 0; i < host->n_ports; i++)
                if (status & (1 << i)) {
-                       struct ata_port *ap = host->ports[i];
-                       if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-                               sil24_host_intr(ap);
-                               handled++;
-                       } else
-                               printk(KERN_ERR DRV_NAME
-                                      ": interrupt from disabled port %d\n", i);
+                       sil24_host_intr(host->ports[i]);
+                       handled++;
                }
 
        spin_unlock(&host->lock);
 
                        ap = host->ports[port_no];
                tmp = mask & (1 << i);
                VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
-               if (tmp && ap &&
-                   !(ap->flags & ATA_FLAG_DISABLED)) {
+               if (tmp && ap) {
                        struct ata_queued_cmd *qc;
 
                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
 
        for (i = 0; i < host->n_ports; i++) {
                u8 port_status = (status >> (8 * i)) & 0xff;
                if (port_status) {
-                       struct ata_port *ap = host->ports[i];
-
-                       if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-                               vsc_port_intr(port_status, ap);
-                               handled++;
-                       } else
-                               dev_printk(KERN_ERR, host->dev,
-                                       "interrupt from disabled port %d\n", i);
+                       vsc_port_intr(port_status, host->ports[i]);
+                       handled++;
                }
        }
 
 
        res = (struct ipr_resource_entry *) sdev->hostdata;
        if (res) {
                if (res->sata_port)
-                       ata_port_disable(res->sata_port->ap);
+                       res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
                sdev->hostdata = NULL;
                res->sdev = NULL;
                res->sata_port = NULL;
        rc = ipr_device_reset(ioa_cfg, res);
 
        if (rc) {
-               ata_port_disable(ap);
+               ap->link.device[0].class = ATA_DEV_NONE;
                goto out_unlock;
        }
 
        ap->link.device[0].class = res->ata_class;
        if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
-               ata_port_disable(ap);
+               ap->link.device[0].class = ATA_DEV_NONE;
 
 out_unlock:
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 
        struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
 
        if (dev_is_sata(dev))
-               ata_port_disable(dev->sata_dev.ap);
+               dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE;
 }
 
 int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
 
        ATA_FLAG_SW_ACTIVITY    = (1 << 22), /* driver supports sw activity
                                              * led */
 
-       /* The following flag belongs to ap->pflags but is kept in
-        * ap->flags because it's referenced in many LLDs and will be
-        * removed in not-too-distant future.
-        */
-       ATA_FLAG_DISABLED       = (1 << 23), /* port is disabled, ignore it */
-
        /* bits 24:31 of ap->flags are reserved for LLD specific flags */
 
 
        return ap->ops == &ata_dummy_port_ops;
 }
 
-extern void ata_port_probe(struct ata_port *);
 extern int sata_set_spd(struct ata_link *link);
 extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
 extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
                              unsigned long deadline);
 extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
-extern void ata_port_disable(struct ata_port *);
 
 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,