KVM: arm64: Provide the host_stage2_try() helper macro
authorQuentin Perret <qperret@google.com>
Mon, 9 Aug 2021 15:24:30 +0000 (16:24 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 11 Aug 2021 10:39:36 +0000 (11:39 +0100)
We currently unmap all MMIO mappings from the host stage-2 to recycle
the pages whenever we run out. In order to make this pattern easy to
re-use from other places, factor the logic out into a dedicated macro.
While at it, apply the macro for the kvm_pgtable_stage2_set_owner()
calls. They're currently only called early on and are guaranteed to
succeed, but making them robust to the -ENOMEM case doesn't hurt and
will avoid painful debugging sessions later on.

Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210809152448.1810400-4-qperret@google.com
arch/arm64/kvm/hyp/nvhe/mem_protect.c

index d938ce95d3bdf255251cc0c69722c89275832749..74280a753efb3a1a4ff76318602326a311c42b17 100644 (file)
@@ -208,6 +208,25 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
                                      prot, &host_s2_pool);
 }
 
+/*
+ * The pool has been provided with enough pages to cover all of memory with
+ * page granularity, but it is difficult to know how much of the MMIO range
+ * we will need to cover upfront, so we may need to 'recycle' the pages if we
+ * run out.
+ */
+#define host_stage2_try(fn, ...)                                       \
+       ({                                                              \
+               int __ret;                                              \
+               hyp_assert_lock_held(&host_kvm.lock);                   \
+               __ret = fn(__VA_ARGS__);                                \
+               if (__ret == -ENOMEM) {                                 \
+                       __ret = host_stage2_unmap_dev_all();            \
+                       if (!__ret)                                     \
+                               __ret = fn(__VA_ARGS__);                \
+               }                                                       \
+               __ret;                                                  \
+        })
+
 static int host_stage2_idmap(u64 addr)
 {
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
@@ -223,22 +242,7 @@ static int host_stage2_idmap(u64 addr)
        if (ret)
                goto unlock;
 
-       ret = __host_stage2_idmap(range.start, range.end, prot);
-       if (ret != -ENOMEM)
-               goto unlock;
-
-       /*
-        * The pool has been provided with enough pages to cover all of memory
-        * with page granularity, but it is difficult to know how much of the
-        * MMIO range we will need to cover upfront, so we may need to 'recycle'
-        * the pages if we run out.
-        */
-       ret = host_stage2_unmap_dev_all();
-       if (ret)
-               goto unlock;
-
-       ret = __host_stage2_idmap(range.start, range.end, prot);
-
+       ret = host_stage2_try(__host_stage2_idmap, range.start, range.end, prot);
 unlock:
        hyp_spin_unlock(&host_kvm.lock);
 
@@ -257,8 +261,8 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
                return -EINVAL;
 
        hyp_spin_lock(&host_kvm.lock);
-       ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
-                                          &host_s2_pool, pkvm_hyp_id);
+       ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
+                             start, end - start, &host_s2_pool, pkvm_hyp_id);
        hyp_spin_unlock(&host_kvm.lock);
 
        return ret != -EAGAIN ? ret : 0;