/*
  * __pa()/__va() should be used only during mem init.
  */
-#ifdef CONFIG_64BIT
-#define __pa(x)                                                                \
-({                                                                     \
-    unsigned long __x = (unsigned long)(x);                            \
-    __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x);                    \
-})
-#else
-#define __pa(x)                                                                \
-    ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
-#endif
+static inline unsigned long ___pa(unsigned long x)
+{
+       if (config_enabled(CONFIG_64BIT)) {
+               /*
+                * For MIPS64 the virtual address may either be in one of
+                * the compatibility segements ckseg0 or ckseg1, or it may
+                * be in xkphys.
+                */
+               return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+       }
+
+       if (!config_enabled(CONFIG_EVA)) {
+               /*
+                * We're using the standard MIPS32 legacy memory map, ie.
+                * the address x is going to be in kseg0 or kseg1. We can
+                * handle either case by masking out the desired bits using
+                * CPHYSADDR.
+                */
+               return CPHYSADDR(x);
+       }
+
+       /*
+        * EVA is in use so the memory map could be anything, making it not
+        * safe to just mask out bits.
+        */
+       return x - PAGE_OFFSET + PHYS_OFFSET;
+}
+#define __pa(x)                ___pa((unsigned long)(x))
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
 #include <asm/io.h>