KVM: arm64: Add KVM_PGTABLE_WALK flags for skipping CMOs and BBM TLBIs
authorRicardo Koller <ricarkol@google.com>
Wed, 26 Apr 2023 17:23:20 +0000 (17:23 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 16 May 2023 17:39:17 +0000 (17:39 +0000)
Add two flags to kvm_pgtable_visit_ctx, KVM_PGTABLE_WALK_SKIP_BBM_TLBI
and KVM_PGTABLE_WALK_SKIP_CMO, to indicate that the walk should not
perform TLB invalidations (TLBIs) in break-before-make (BBM) nor cache
maintenance operations (CMO). This will be used by a future commit to
create unlinked tables not accessible to the HW page-table walker.

Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-3-ricarkol@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c

index 26a4293726c14c9b9355adfdd748c15ac47ba9be..3f2d43ba2b6286c03625a4e71aa46e0a0d895b59 100644 (file)
@@ -195,6 +195,12 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
  *                                     with other software walkers.
  * @KVM_PGTABLE_WALK_HANDLE_FAULT:     Indicates the page-table walk was
  *                                     invoked from a fault handler.
+ * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI:    Visit and update table entries
+ *                                     without Break-before-make's
+ *                                     TLB invalidation.
+ * @KVM_PGTABLE_WALK_SKIP_CMO:         Visit and update table entries
+ *                                     without Cache maintenance
+ *                                     operations required.
  */
 enum kvm_pgtable_walk_flags {
        KVM_PGTABLE_WALK_LEAF                   = BIT(0),
@@ -202,6 +208,8 @@ enum kvm_pgtable_walk_flags {
        KVM_PGTABLE_WALK_TABLE_POST             = BIT(2),
        KVM_PGTABLE_WALK_SHARED                 = BIT(3),
        KVM_PGTABLE_WALK_HANDLE_FAULT           = BIT(4),
+       KVM_PGTABLE_WALK_SKIP_BBM_TLBI          = BIT(5),
+       KVM_PGTABLE_WALK_SKIP_CMO               = BIT(6),
 };
 
 struct kvm_pgtable_visit_ctx {
index a3246d6cddec7e9d1e167ca50f3779265141bfe0..633679ee3c49a35de4f7e926ac60c66965fc7df0 100644 (file)
@@ -62,6 +62,16 @@ struct kvm_pgtable_walk_data {
        u64                             end;
 };
 
+static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
+{
+       return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
+}
+
+static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
+{
+       return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
+}
+
 static bool kvm_phys_is_valid(u64 phys)
 {
        return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
@@ -741,14 +751,17 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
        if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
                return false;
 
-       /*
-        * Perform the appropriate TLB invalidation based on the evicted pte
-        * value (if any).
-        */
-       if (kvm_pte_table(ctx->old, ctx->level))
-               kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
-       else if (kvm_pte_valid(ctx->old))
-               kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+       if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
+               /*
+                * Perform the appropriate TLB invalidation based on the
+                * evicted pte value (if any).
+                */
+               if (kvm_pte_table(ctx->old, ctx->level))
+                       kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+               else if (kvm_pte_valid(ctx->old))
+                       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+                                    ctx->addr, ctx->level);
+       }
 
        if (stage2_pte_is_counted(ctx->old))
                mm_ops->put_page(ctx->ptep);
@@ -832,11 +845,13 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
                return -EAGAIN;
 
        /* Perform CMOs before installation of the guest stage-2 PTE */
-       if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
+       if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
+           stage2_pte_cacheable(pgt, new))
                mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
-                                               granule);
+                                              granule);
 
-       if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
+       if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
+           stage2_pte_executable(new))
                mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
 
        stage2_make_pte(ctx, new);