KVM: arm64: Add helper for creating unlinked stage2 subtrees
authorRicardo Koller <ricarkol@google.com>
Wed, 26 Apr 2023 17:23:21 +0000 (17:23 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 16 May 2023 17:39:18 +0000 (17:39 +0000)
Add a stage2 helper, kvm_pgtable_stage2_create_unlinked(), for
creating unlinked tables (which is the opposite of
kvm_pgtable_stage2_free_unlinked()).  Creating an unlinked table is
useful for splitting level 1 and 2 entries into subtrees of PAGE_SIZE
PTEs.  For example, a level 1 entry can be split into PAGE_SIZE PTEs
by first creating a fully populated tree, and then use it to replace
the level 1 entry in a single step.  This will be used in a subsequent
commit for eager huge-page splitting (a dirty-logging optimization).

Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-4-ricarkol@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c

index 3f2d43ba2b6286c03625a4e71aa46e0a0d895b59..c8e0e7d9303b26833ccc5df12767906964956d08 100644 (file)
@@ -458,6 +458,32 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
  */
 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
 
+/**
+ * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
+ * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @phys:      Physical address of the memory to map.
+ * @level:     Starting level of the stage-2 paging structure to be created.
+ * @prot:      Permissions and attributes for the mapping.
+ * @mc:                Cache of pre-allocated and zeroed memory from which to allocate
+ *             page-table pages.
+ * @force_pte:  Force mappings to PAGE_SIZE granularity.
+ *
+ * Returns an unlinked page-table tree.  This new page-table tree is
+ * not reachable (i.e., it is unlinked) from the root pgd and it's
+ * therefore unreachableby the hardware page-table walker. No TLB
+ * invalidation or CMOs are performed.
+ *
+ * If device attributes are not explicitly requested in @prot, then the
+ * mapping will be normal, cacheable.
+ *
+ * Return: The fully populated (unlinked) stage-2 paging structure, or
+ * an ERR_PTR(error) on failure.
+ */
+kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
+                                             u64 phys, u32 level,
+                                             enum kvm_pgtable_prot prot,
+                                             void *mc, bool force_pte);
+
 /**
  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
  * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
index 633679ee3c49a35de4f7e926ac60c66965fc7df0..56edffc02bc64f31888d8909d31095e044abcc7f 100644 (file)
@@ -1222,6 +1222,59 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
        return kvm_pgtable_walk(pgt, addr, size, &walker);
 }
 
+kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
+                                             u64 phys, u32 level,
+                                             enum kvm_pgtable_prot prot,
+                                             void *mc, bool force_pte)
+{
+       struct stage2_map_data map_data = {
+               .phys           = phys,
+               .mmu            = pgt->mmu,
+               .memcache       = mc,
+               .force_pte      = force_pte,
+       };
+       struct kvm_pgtable_walker walker = {
+               .cb             = stage2_map_walker,
+               .flags          = KVM_PGTABLE_WALK_LEAF |
+                                 KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
+                                 KVM_PGTABLE_WALK_SKIP_CMO,
+               .arg            = &map_data,
+       };
+       /*
+        * The input address (.addr) is irrelevant for walking an
+        * unlinked table. Construct an ambiguous IA range to map
+        * kvm_granule_size(level) worth of memory.
+        */
+       struct kvm_pgtable_walk_data data = {
+               .walker = &walker,
+               .addr   = 0,
+               .end    = kvm_granule_size(level),
+       };
+       struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
+       kvm_pte_t *pgtable;
+       int ret;
+
+       if (!IS_ALIGNED(phys, kvm_granule_size(level)))
+               return ERR_PTR(-EINVAL);
+
+       ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
+       if (ret)
+               return ERR_PTR(ret);
+
+       pgtable = mm_ops->zalloc_page(mc);
+       if (!pgtable)
+               return ERR_PTR(-ENOMEM);
+
+       ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
+                                level + 1);
+       if (ret) {
+               kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
+               mm_ops->put_page(pgtable);
+               return ERR_PTR(ret);
+       }
+
+       return pgtable;
+}
 
 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
                              struct kvm_pgtable_mm_ops *mm_ops,