#include <asm/x86_init.h>
 #include <asm/iommu_table.h>
 
+#define CALGARY_MAPPING_ERROR  0
+
 #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
 int use_calgary __read_mostly = 1;
 #else
                        if (panic_on_overflow)
                                panic("Calgary: fix the allocator.\n");
                        else
-                               return DMA_ERROR_CODE;
+                               return CALGARY_MAPPING_ERROR;
                }
        }
 
 
        entry = iommu_range_alloc(dev, tbl, npages);
 
-       if (unlikely(entry == DMA_ERROR_CODE)) {
+       if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
                pr_warn("failed to allocate %u pages in iommu %p\n",
                        npages, tbl);
-               return DMA_ERROR_CODE;
+               return CALGARY_MAPPING_ERROR;
        }
 
        /* set the return dma address */
        unsigned long flags;
 
        /* were we called with bad_dma_address? */
-       badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+       badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
        if (unlikely(dma_addr < badend)) {
                WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
                       "address 0x%Lx\n", dma_addr);
                npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
 
                entry = iommu_range_alloc(dev, tbl, npages);
-               if (entry == DMA_ERROR_CODE) {
+               if (entry == CALGARY_MAPPING_ERROR) {
                        /* makes sure unmap knows to stop */
                        s->dma_length = 0;
                        goto error;
 error:
        calgary_unmap_sg(dev, sg, nelems, dir, 0);
        for_each_sg(sg, s, nelems, i) {
-               sg->dma_address = DMA_ERROR_CODE;
+               sg->dma_address = CALGARY_MAPPING_ERROR;
                sg->dma_length = 0;
        }
        return 0;
 
        /* set up tces to cover the allocated range */
        mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
-       if (mapping == DMA_ERROR_CODE)
+       if (mapping == CALGARY_MAPPING_ERROR)
                goto free;
        *dma_handle = mapping;
        return ret;
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
+static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr == CALGARY_MAPPING_ERROR;
+}
+
 static const struct dma_map_ops calgary_dma_ops = {
        .alloc = calgary_alloc_coherent,
        .free = calgary_free_coherent,
        .unmap_sg = calgary_unmap_sg,
        .map_page = calgary_map_page,
        .unmap_page = calgary_unmap_page,
+       .mapping_error = calgary_mapping_error,
 };
 
 static inline void __iomem * busno_to_bbar(unsigned char num)
        struct iommu_table *tbl = pci_iommu(dev->bus);
 
        /* reserve EMERGENCY_PAGES from bad_dma_address and up */
-       iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
+       iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
 
        /* avoid the BIOS/VGA first 640KB-1MB region */
        /* for CalIOC2 - avoid the entire first MB */