KVM: arm64: selftests: Add MMIO readl/writel support
authorRaghavendra Rao Ananta <rananta@google.com>
Thu, 7 Oct 2021 23:34:25 +0000 (23:34 +0000)
committerMarc Zyngier <maz@kernel.org>
Sun, 17 Oct 2021 10:15:11 +0000 (11:15 +0100)
Define the readl() and writel() functions for the guests to
access (4-byte) the MMIO region.

The routines, and their dependents, are inspired from the kernel's
arch/arm64/include/asm/io.h and arch/arm64/include/asm/barrier.h.

Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211007233439.1826892-2-rananta@google.com
tools/testing/selftests/kvm/include/aarch64/processor.h

index c0273aefa63de11d4a245130f6c5d3e50eae922c..96578bd46a851ee594f942f3da73cd7da239a80b 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "kvm_util.h"
 #include <linux/stringify.h>
+#include <linux/types.h>
 
 
 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
@@ -130,6 +131,49 @@ void vm_install_sync_handler(struct kvm_vm *vm,
        val;                                                              \
 })
 
-#define isb()  asm volatile("isb" : : : "memory")
+#define isb()          asm volatile("isb" : : : "memory")
+#define dsb(opt)       asm volatile("dsb " #opt : : : "memory")
+#define dmb(opt)       asm volatile("dmb " #opt : : : "memory")
+
+#define dma_wmb()      dmb(oshst)
+#define __iowmb()      dma_wmb()
+
+#define dma_rmb()      dmb(oshld)
+
+#define __iormb(v)                                                     \
+({                                                                     \
+       unsigned long tmp;                                              \
+                                                                       \
+       dma_rmb();                                                      \
+                                                                       \
+       /*                                                              \
+        * Courtesy of arch/arm64/include/asm/io.h:                     \
+        * Create a dummy control dependency from the IO read to any    \
+        * later instructions. This ensures that a subsequent call      \
+        * to udelay() will be ordered due to the ISB in __delay().     \
+        */                                                             \
+       asm volatile("eor       %0, %1, %1\n"                           \
+                    "cbnz      %0, ."                                  \
+                    : "=r" (tmp) : "r" ((unsigned long)(v))            \
+                    : "memory");                                       \
+})
+
+static __always_inline void __raw_writel(u32 val, volatile void *addr)
+{
+       asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
+}
+
+static __always_inline u32 __raw_readl(const volatile void *addr)
+{
+       u32 val;
+       asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+       return val;
+}
+
+#define writel_relaxed(v,c)    ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+#define readl_relaxed(c)       ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writel(v,c)            ({ __iowmb(); writel_relaxed((v),(c));})
+#define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
 
 #endif /* SELFTEST_KVM_PROCESSOR_H */