memory you intend to sync partially.
 
 void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 
 Do a partial sync of memory that was allocated by
 
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 
-void dma_cache_sync(void *vaddr, size_t size, int direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 {
        /*
         * No need to sync an uncached area
 
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
 
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
 
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction direction)
 {
        if (direction == DMA_NONE)
                return;
 
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction direction)
 {
        if (direction == DMA_NONE)
                return;
 
 #define DEB(x,y)       if (i596_debug & (x)) { y; }
 
 
-#define  CHECK_WBACK(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
+#define  CHECK_WBACK(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
-#define  CHECK_INV(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+#define  CHECK_INV(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
 
-#define  CHECK_WBACK_INV(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+#define  CHECK_WBACK_INV(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
 
 
 #define PA_I82596_RESET                0       /* Offsets relative to LASI-LAN-Addr.*/
 
 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-       CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+       CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
        while (--delcnt && lp->iscp.stat) {
                udelay(10);
-               CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+               CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
        }
        if (!delcnt) {
                printk("%s: %s, iscp.stat %04x, didn't clear\n",
 
 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-       CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
        while (--delcnt && lp->scb.command) {
                udelay(10);
-               CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
        }
        if (!delcnt) {
                printk("%s: %s, status %4.4x, cmd %4.4x.\n",
                        rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
                rbd = rbd->v_next;
        } while (rbd != lp->rbd_head);
-       CHECK_INV(lp, sizeof(struct i596_private));
+       CHECK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
        rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
        rfd->cmd = CMD_EOL|CMD_FLEX;
 
-       CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 static inline void remove_rx_bufs(struct net_device *dev)
        lp->rbd_head = lp->rbds;
        lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
 
-       CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
 
        DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
 
-       CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
-       CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
+       CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
+       CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
 
        MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
 
        rebuild_rx_bufs(dev);
 
        lp->scb.command = 0;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
        enable_irq(dev->irq);   /* enable IRQs from LAN */
 
        DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
        memcpy(lp->cf_cmd.i596_config, init_setup, 14);
        lp->cf_cmd.cmd.command = CmdConfigure;
-       CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
+       CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
        DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
        memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
        lp->sa_cmd.cmd.command = CmdSASetup;
-       CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
+       CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
        DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
        lp->tdr_cmd.cmd.command = CmdTDR;
-       CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
+       CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
        i596_add_cmd(dev, &lp->tdr_cmd.cmd);
 
        spin_lock_irqsave (&lp->lock, flags);
        DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
        lp->scb.command = RX_START;
        lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
        CA(dev);
 
 
        rfd = lp->rfd_head;             /* Ref next frame to check */
 
-       CHECK_INV(rfd, sizeof(struct i596_rfd));
+       CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
        while ((rfd->stat) & STAT_C) {  /* Loop while complete frames */
                if (rfd->rbd == I596_NULL)
                        rbd = NULL;
                else if (rfd->rbd == lp->rbd_head->b_addr) {
                        rbd = lp->rbd_head;
-                       CHECK_INV(rbd, sizeof(struct i596_rbd));
+                       CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
                }
                else {
                        printk("%s: rbd chain broken!\n", dev->name);
                                dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
                                rbd->v_data = newskb->data;
                                rbd->b_data = WSWAPchar(dma_addr);
-                               CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+                               CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
                        }
                        else
                                skb = dev_alloc_skb(pkt_len + 2);
                if (rbd != NULL && (rbd->count & 0x4000)) {
                        rbd->count = 0;
                        lp->rbd_head = rbd->v_next;
-                       CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+                       CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
                }
 
                /* Tidy the frame descriptor, marking it as end of list */
 
                lp->scb.rfd = rfd->b_next;
                lp->rfd_head = rfd->v_next;
-               CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
-               CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
+               CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
+               CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
                rfd = lp->rfd_head;
-               CHECK_INV(rfd, sizeof(struct i596_rfd));
+               CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
        }
 
        DEB(DEB_RXFRAME, printk("frames %d\n", frames));
                        ptr->v_next = NULL;
                        ptr->b_next = I596_NULL;
                }
-               CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
+               CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
        }
 
        wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
        lp->scb.cmd = I596_NULL;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 }
 
 
 
        /* FIXME: this command might cause an lpmc */
        lp->scb.command = CUC_ABORT | RX_ABORT;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
        CA(dev);
 
        /* wait for shutdown */
        cmd->command |= (CMD_EOL | CMD_INTR);
        cmd->v_next = NULL;
        cmd->b_next = I596_NULL;
-       CHECK_WBACK(cmd, sizeof(struct i596_cmd));
+       CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
 
        spin_lock_irqsave (&lp->lock, flags);
 
        if (lp->cmd_head != NULL) {
                lp->cmd_tail->v_next = cmd;
                lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-               CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
+               CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
        } else {
                lp->cmd_head = cmd;
                wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
                lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
                lp->scb.command = CUC_START;
-               CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
                CA(dev);
        }
        lp->cmd_tail = cmd;
        data = virt_to_dma(lp,tint);
 
        tint[1] = -1;
-       CHECK_WBACK(tint,PAGE_SIZE);
+       CHECK_WBACK(lp, tint, PAGE_SIZE);
 
        MPU_PORT(dev, 1, data);
 
        for(data = 1000000; data; data--) {
-               CHECK_INV(tint,PAGE_SIZE);
+               CHECK_INV(lp, tint, PAGE_SIZE);
                if(tint[1] != -1)
                        break;
 
                /* Issue a channel attention signal */
                DEB(DEB_ERRORS, printk("Kicking board.\n"));
                lp->scb.command = CUC_START | RX_START;
-               CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
                CA (dev);
                lp->last_restart = lp->stats.tx_packets;
        }
                tbd->data = WSWAPchar(tx_cmd->dma_addr);
 
                DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
-               CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
-               CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
+               CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
+               CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
                i596_add_cmd(dev, &tx_cmd->cmd);
 
                lp->stats.tx_packets++;
        lp->dma_addr = dma_addr;
        lp->dev = gen_dev;
 
-       CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
 
        i = register_netdev(dev);
        if (i) {
                        DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
 
                while (lp->cmd_head != NULL) {
-                       CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
+                       CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
                        if (!(lp->cmd_head->status & STAT_C))
                                break;
 
                        }
                        ptr->v_next = NULL;
                        ptr->b_next = I596_NULL;
-                       CHECK_WBACK(ptr, sizeof(struct i596_cmd));
+                       CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
                        lp->last_cmd = jiffies;
                }
 
 
                        ptr->command &= 0x1fff;
                        ptr = ptr->v_next;
-                       CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
+                       CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
                }
 
                if ((lp->cmd_head != NULL))
                        ack_cmd |= CUC_START;
                lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
-               CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
+               CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
        }
        if ((status & 0x1000) || (status & 0x4000)) {
                if ((status & 0x4000))
        }
        wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
        lp->scb.command = ack_cmd;
-       CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
        /* DANGER: I suspect that some kind of interrupt
         acknowledgement aside from acking the 82596 might be needed
 
        wait_cmd(dev, lp, 100, "close1 timed out");
        lp->scb.command = CUC_ABORT | RX_ABORT;
-       CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
        CA(dev);
 
                               dev->name);
                else {
                        lp->cf_cmd.cmd.command = CmdConfigure;
-                       CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
+                       CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
                        i596_add_cmd(dev, &lp->cf_cmd.cmd);
                }
        }
                                DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
                                                dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
                }
-               CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
+               CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
                i596_add_cmd(dev, &cmd->cmd);
        }
 }
 
        for (j = 0; j < PATCHES; j++)
                script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
        /* now patch up fixed addresses. */
-       script_patch_32(script, MessageLocation,
+       script_patch_32(hostdata->dev, script, MessageLocation,
                        pScript + MSGOUT_OFFSET);
-       script_patch_32(script, StatusAddress,
+       script_patch_32(hostdata->dev, script, StatusAddress,
                        pScript + STATUS_OFFSET);
-       script_patch_32(script, ReceiveMsgAddress,
+       script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
                        pScript + MSGIN_OFFSET);
 
        hostdata->script = script;
                        shost_printk(KERN_WARNING, host,
                                "Unexpected SDTR msg\n");
                        hostdata->msgout[0] = A_REJECT_MSG;
-                       dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-                       script_patch_16(hostdata->script, MessageCount, 1);
+                       dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+                       script_patch_16(hostdata->dev, hostdata->script,
+                                       MessageCount, 1);
                        /* SendMsgOut returns, so set up the return
                         * address */
                        resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
                printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
                       host->host_no, pun, lun);
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 
                break;
                printk("\n");
                /* just reject it */
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                /* SendMsgOut returns, so set up the return
                 * address */
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
                printk("\n");
                /* just reject it */
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                /* SendMsgOut returns, so set up the return
                 * address */
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
        }
        NCR_700_writel(temp, host, TEMP_REG);
        /* set us up to receive another message */
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
        return resume_offset;
 }
 
                                slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
                                slot->SG[1].pAddr = 0;
                                slot->resume_offset = hostdata->pScript;
-                               dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
-                               dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
-                               
+                               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
+                               dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+
                                /* queue the command for reissue */
                                slot->state = NCR_700_SLOT_QUEUED;
                                slot->flags = NCR_700_FLAG_AUTOSENSE;
                        hostdata->cmd = slot->cmnd;
 
                        /* re-patch for this command */
-                       script_patch_32_abs(hostdata->script, CommandAddress, 
-                                           slot->pCmd);
-                       script_patch_16(hostdata->script,
+                       script_patch_32_abs(hostdata->dev, hostdata->script,
+                                           CommandAddress, slot->pCmd);
+                       script_patch_16(hostdata->dev, hostdata->script,
                                        CommandCount, slot->cmnd->cmd_len);
-                       script_patch_32_abs(hostdata->script, SGScriptStartAddress,
+                       script_patch_32_abs(hostdata->dev, hostdata->script,
+                                           SGScriptStartAddress,
                                            to32bit(&slot->pSG[0].ins));
 
                        /* Note: setting SXFER only works if we're
                         * should therefore always clear ACK */
                        NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
                                       host, SXFER_REG);
-                       dma_cache_sync(hostdata->msgin,
+                       dma_cache_sync(hostdata->dev, hostdata->msgin,
                                       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
-                       dma_cache_sync(hostdata->msgout,
+                       dma_cache_sync(hostdata->dev, hostdata->msgout,
                                       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
                        /* I'm just being paranoid here, the command should
                         * already have been flushed from the cache */
-                       dma_cache_sync(slot->cmnd->cmnd,
+                       dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
                                       slot->cmnd->cmd_len, DMA_TO_DEVICE);
 
 
                hostdata->reselection_id = reselection_id;
                /* just in case we have a stale simple tag message, clear it */
                hostdata->msgin[1] = 0;
-               dma_cache_sync(hostdata->msgin,
+               dma_cache_sync(hostdata->dev, hostdata->msgin,
                               MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
                if(hostdata->tag_negotiated & (1<<reselection_id)) {
                        resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
        hostdata->cmd = NULL;
        /* clear any stale simple tag message */
        hostdata->msgin[1] = 0;
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
                       DMA_BIDIRECTIONAL);
 
        if(id == 0xff) {
                NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
        }
 
-       script_patch_16(hostdata->script, MessageCount, count);
+       script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
 
 
-       script_patch_ID(hostdata->script,
+       script_patch_ID(hostdata->dev, hostdata->script,
                        Device_ID, 1<<scmd_id(SCp));
 
-       script_patch_32_abs(hostdata->script, CommandAddress, 
+       script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
                            slot->pCmd);
-       script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
+       script_patch_16(hostdata->dev, hostdata->script, CommandCount,
+                       SCp->cmd_len);
        /* finally plumb the beginning of the SG list into the script
         * */
-       script_patch_32_abs(hostdata->script, SGScriptStartAddress,
-                           to32bit(&slot->pSG[0].ins));
+       script_patch_32_abs(hostdata->dev, hostdata->script,
+                           SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
        NCR_700_clear_fifo(SCp->device->host);
 
        if(slot->resume_offset == 0)
                slot->resume_offset = hostdata->pScript;
        /* now perform all the writebacks and invalidates */
-       dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+       dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
                       DMA_FROM_DEVICE);
-       dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
-       dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
+       dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
 
        /* set the synchronous period/offset */
        NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
                                        slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
                                        slot->SG[i].pAddr = 0;
                                }
-                               dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+                               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
                                /* and pretend we disconnected after
                                 * the command phase */
                                resume_offset = hostdata->pScript + Ent_MsgInDuringData;
                }
                slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
                slot->SG[i].pAddr = 0;
-               dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
                DEBUG((" SETTING %08lx to %x\n",
-                      (&slot->pSG[i].ins), 
+                      (&slot->pSG[i].ins),
                       slot->SG[i].ins));
        }
        slot->resume_offset = 0;
 
 #define NCR_710_MIN_XFERP      0
 #define NCR_700_MIN_PERIOD     25 /* for SDTR message, 100ns */
 
-#define script_patch_32(script, symbol, value) \
+#define script_patch_32(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching %s at %d to 0x%lx\n", \
                       #symbol, A_##symbol##_used[i], (value))); \
        } \
 }
 
-#define script_patch_32_abs(script, symbol, value) \
+#define script_patch_32_abs(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                (script)[A_##symbol##_used[i]] = bS_to_host(value); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching %s at %d to 0x%lx\n", \
                       #symbol, A_##symbol##_used[i], (value))); \
        } \
 }
 
 /* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(script, symbol, value) \
+#define script_patch_ID(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                val &= 0xff00ffff; \
                val |= ((value) & 0xff) << 16; \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
                       #symbol, A_##symbol##_used[i], val)); \
        } \
 }
 
-#define script_patch_16(script, symbol, value) \
+#define script_patch_16(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                val &= 0xffff0000; \
                val |= ((value) & 0xffff); \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching short field %s at %d to 0x%x\n", \
                       #symbol, A_##symbol##_used[i], val)); \
        } \
 
        if (!mpsc_sdma_tx_active(pi)) {
                txre = (struct mpsc_tx_desc *)(pi->txr +
                        (pi->txr_tail * MPSC_TXRE_SIZE));
-               dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)txre,
        }
        txre->link = cpu_to_be32(pi->txr_p);    /* Wrap last back to first */
 
-       dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
+       dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
                DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 
        rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
 
-       dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+       dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                invalidate_dcache_range((ulong)rxre,
                }
 
                bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
-               dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)bp,
                                            SDMA_DESC_CMDSTAT_F |
                                            SDMA_DESC_CMDSTAT_L);
                wmb();
-               dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)rxre,
                pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
                rxre = (struct mpsc_rx_desc *)(pi->rxr +
                        (pi->rxr_posn * MPSC_RXRE_SIZE));
-               dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)rxre,
                                                           SDMA_DESC_CMDSTAT_EI
                                                           : 0));
        wmb();
-       dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
+       dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                flush_dcache_range((ulong)txre,
                else /* All tx data copied into ring bufs */
                        return;
 
-               dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)bp,
                txre = (struct mpsc_tx_desc *)(pi->txr +
                        (pi->txr_tail * MPSC_TXRE_SIZE));
 
-               dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)txre,
 
                        txre = (struct mpsc_tx_desc *)(pi->txr +
                                (pi->txr_tail * MPSC_TXRE_SIZE));
-                       dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
+                       dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
                                DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        count--;
                }
 
-               dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)bp,
 
 #define dma_sync_single_range(dev, addr, off, size, dir)  do { } while (0)
 #define dma_sync_sg_for_cpu(dev, sg, nents, dir)         do { } while (0)
 #define dma_sync_sg_for_device(dev, sg, nents, dir)      do { } while (0)
-#define dma_cache_sync(va, size, dir)                    do { } while (0)
+#define dma_cache_sync(dev, va, size, dir)               do { } while (0)
 
 #define dma_get_cache_alignment()                        L1_CACHE_BYTES
 
 
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-extern void dma_cache_sync(void *vaddr, size_t size, int direction);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       int direction);
 
 /*
  * Return whether the given device DMA address mask can be supported
 
 #define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
 }
 
 #define dma_is_consistent(d, h)        (1)
 
 static inline
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction direction)
 {
        flush_write_buffers();
 
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        /* could define this in terms of the dma_cache ... operations,
 
 #define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        flush_write_buffers();
 
 extern int dma_get_cache_alignment(void);
 
 static inline void
-dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction dir)
 {
        /*
         * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
 
 {
        dma_free_coherent(dev, size, addr, handle);
 }
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        /* we use coherent allocation, so not much to do here. */
 
 
 extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
 
-extern void dma_cache_sync(void *vaddr, size_t size,
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction);
 
 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
 
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        if(hppa_dma_ops->dma_sync_single_for_cpu)
 
        dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
 
        consistent_free(vaddr, size);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        consistent_sync(vaddr, size, (int)dir);
 
        consistent_free(NULL, size, vaddr, dma_handle);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        dma_cache_wback_inv((unsigned long)vaddr, size);
 
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        /* could define this in terms of the dma_cache ... operations,
 
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG();
 
 extern int dma_set_mask(struct device *dev, u64 mask);
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction dir)
 {
        flush_write_buffers();
 }
 
 #define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        consistent_sync(vaddr, size, direction);