KVM: arm64: Add helpers to pin memory shared with the hypervisor at EL2
authorQuentin Perret <qperret@google.com>
Thu, 10 Nov 2022 19:02:41 +0000 (19:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Nov 2022 16:40:54 +0000 (16:40 +0000)
Add helpers allowing the hypervisor to check whether a range of pages
are currently shared by the host, and 'pin' them if so by blocking host
unshare operations until the memory has been unpinned.

This will allow the hypervisor to take references on host-provided
data-structures (e.g. 'struct kvm') with the guarantee that these pages
will remain in a stable state until the hypervisor decides to release
them, for example during guest teardown.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-9-will@kernel.org
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/include/nvhe/memory.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c

index c87b19b2d468085b4e262ca77454e0cf00c558a3..998bf165af711d106118bdd4b3cccbcd7da2c75b 100644 (file)
@@ -69,6 +69,9 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
 int kvm_host_prepare_stage2(void *pgt_pool_base);
 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 
+int hyp_pin_shared_mem(void *from, void *to);
+void hyp_unpin_shared_mem(void *from, void *to);
+
 static __always_inline void __load_host_stage2(void)
 {
        if (static_branch_likely(&kvm_protected_mode_initialized))
index 9422900e5c6a0ca24caff3950f63ee774cb76983..ab205c4d67748f688c95ed63d00c18b1eca1c7ef 100644 (file)
@@ -55,10 +55,15 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
        p->refcount++;
 }
 
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+static inline void hyp_page_ref_dec(struct hyp_page *p)
 {
        BUG_ON(!p->refcount);
        p->refcount--;
+}
+
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+       hyp_page_ref_dec(p);
        return (p->refcount == 0);
 }
 
index f7e3afaf9f1192eb876e9a2d6a562f969062d46b..83c2f67e1b5827ffc76b514e60e050524725994c 100644 (file)
@@ -625,6 +625,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
 {
        u64 size = tx->nr_pages * PAGE_SIZE;
 
+       if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
+               return -EBUSY;
+
        if (__hyp_ack_skip_pgtable_check(tx))
                return 0;
 
@@ -1038,3 +1041,48 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
 
        return ret;
 }
+
+int hyp_pin_shared_mem(void *from, void *to)
+{
+       u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+       u64 end = PAGE_ALIGN((u64)to);
+       u64 size = end - start;
+       int ret;
+
+       host_lock_component();
+       hyp_lock_component();
+
+       ret = __host_check_page_state_range(__hyp_pa(start), size,
+                                           PKVM_PAGE_SHARED_OWNED);
+       if (ret)
+               goto unlock;
+
+       ret = __hyp_check_page_state_range(start, size,
+                                          PKVM_PAGE_SHARED_BORROWED);
+       if (ret)
+               goto unlock;
+
+       for (cur = start; cur < end; cur += PAGE_SIZE)
+               hyp_page_ref_inc(hyp_virt_to_page(cur));
+
+unlock:
+       hyp_unlock_component();
+       host_unlock_component();
+
+       return ret;
+}
+
+void hyp_unpin_shared_mem(void *from, void *to)
+{
+       u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+       u64 end = PAGE_ALIGN((u64)to);
+
+       host_lock_component();
+       hyp_lock_component();
+
+       for (cur = start; cur < end; cur += PAGE_SIZE)
+               hyp_page_ref_dec(hyp_virt_to_page(cur));
+
+       hyp_unlock_component();
+       host_unlock_component();
+}