const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-#ifdef CONFIG_SWIOTLB
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
{
return page_to_pfn(virt_to_page(cpu_addr));
}
-#endif
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
-#ifdef CONFIG_SWIOTLB
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
} while (++pfn <= PHYS_PFN(paddr + size - 1));
}
-#endif
inline void
ia64_set_rbs_bot (void)