#ifndef ASM_ARM_DMA_DIRECT_H
#define ASM_ARM_DMA_DIRECT_H 1
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
unsigned int offset = paddr & ~PAGE_MASK;
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
#define FLUSH_RAC 0x100
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa)
{
struct bmips_dma_range *r;
}
#endif /* CONFIG_PCI */
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
#ifdef CONFIG_PCI
if (dev && dev_is_pci(dev))
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
#endif /* _MIPS_DMA_DIRECT_H */
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr | 0x80000000;
}
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr | 0x80000000;
}
#include <linux/swiotlb.h>
#include <boot_param.h>
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
* Loongson-3's 48bit address space and embed it into 40bit */
return 0;
}
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr + ar2315_dev_offset(dev);
}
/*
* Common phys<->dma mapping for platforms using pci xtalk bridge
*/
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
#define RAM_OFFSET_MASK 0x3fffffffUL
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
#ifndef ASM_POWERPC_DMA_DIRECT_H
#define ASM_POWERPC_DMA_DIRECT_H 1
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr + dev->archdata.dma_offset;
}
*/
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
tlb_addr = swiotlb_tbl_map_single(dev,
- __phys_to_dma(dev, io_tlb_start),
+ phys_to_dma_unencrypted(dev, io_tlb_start),
paddr, size, aligned_size, dir, attrs);
if (tlb_addr == DMA_MAPPING_ERROR) {
goto swiotlb_error;
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
+#ifndef phys_to_dma_unencrypted
+#define phys_to_dma_unencrypted phys_to_dma
+#endif
#else
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
{
dma_addr_t dev_addr = (dma_addr_t)paddr;
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
+/*
+ * If memory encryption is supported, phys_to_dma will set the memory encryption
+ * bit in the DMA address, and dma_to_phys will clear it.
+ * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
+ * buffers.
+ */
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return __sme_set(phys_to_dma_unencrypted(dev, paddr));
+}
+
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{
phys_addr_t paddr = (phys_addr_t)dev_addr +
}
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
-/*
- * If memory encryption is supported, phys_to_dma will set the memory encryption
- * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
- * version should only be used on non-encrypted memory for
- * special occasions like DMA coherent buffers.
- */
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return __sme_set(__phys_to_dma(dev, paddr));
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
bool is_ram)
{
phys_addr_t phys)
{
if (force_dma_unencrypted(dev))
- return __phys_to_dma(dev, phys);
+ return phys_to_dma_unencrypted(dev, phys);
return phys_to_dma(dev, phys);
}
return 1;
/*
- * This check needs to be against the actual bit mask value, so
- * use __phys_to_dma() here so that the SME encryption mask isn't
+ * This check needs to be against the actual bit mask value, so use
+ * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
* part of the check.
*/
if (IS_ENABLED(CONFIG_ZONE_DMA))
min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
- return mask >= __phys_to_dma(dev, min_mask);
+ return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
size_t dma_direct_max_mapping_size(struct device *dev)
swiotlb_force);
swiotlb_addr = swiotlb_tbl_map_single(dev,
- __phys_to_dma(dev, io_tlb_start),
+ phys_to_dma_unencrypted(dev, io_tlb_start),
paddr, size, size, dir, attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
/* Ensure that the address returned is DMA'ble */
- dma_addr = __phys_to_dma(dev, swiotlb_addr);
+ dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);