default y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
* almost all conceivable cases a device driver should not be using
* this function
*/
-static inline unsigned long virt_to_phys(volatile const void *address)
+static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
{
return __pa(address);
}
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(volatile const void *x);
+#else
+#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
+#endif
+
+#define virt_to_phys virt_to_phys
+static inline phys_addr_t virt_to_phys(const volatile void *x)
+{
+ return __virt_to_phys(x);
+}
+
/*
* phys_to_virt - map physical address to virtual
* @address: address to remap
* also affect MIPS so we keep this one until GCC 3.x has been retired
* before we can apply https://patchwork.linux-mips.org/patch/1541/
*/
+#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+#endif
#ifndef __pa_symbol
-#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
- unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
struct vm_area_struct *vma;
int ret;
/* Map GIC user page. */
if (gic_size) {
- gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT;
+ gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
+ gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT;
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/dma.h>
+
+static inline bool __debug_virt_addr_valid(unsigned long x)
+{
+ /* high_memory does not get immediately defined, and there
+ * are early callers of __pa() against PAGE_OFFSET
+ */
+ if (!high_memory && x >= PAGE_OFFSET)
+ return true;
+
+ if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
+ return true;
+
+ /*
+ * MAX_DMA_ADDRESS is a virtual address that may not correspond to an
+ * actual physical address. Enough code relies on
+ * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it
+ * and always return true.
+ */
+ if (x == MAX_DMA_ADDRESS)
+ return true;
+
+ return false;
+}
+
+phys_addr_t __virt_to_phys(volatile const void *x)
+{
+ WARN(!__debug_virt_addr_valid((unsigned long)x),
+ "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ x, x);
+
+ return __virt_to_phys_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+ /* This is bounds checking against the kernel image only.
+ * __pa_symbol should only be used on kernel symbol addresses.
+ */
+ VIRTUAL_BUG_ON(x < (unsigned long)_text ||
+ x > (unsigned long)_end);
+
+ return __pa_symbol_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);