static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
+       vcpu_set_flag(vcpu, INCREMENT_PC);
 }
 
+#define kvm_pend_exception(v, e)                                       \
+       do {                                                            \
+               vcpu_set_flag((v), PENDING_EXCEPTION);                  \
+               vcpu_set_flag((v), e);                                  \
+       } while (0)
+
+
 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
 {
        return test_bit(feature, vcpu->arch.features);
 
 /* PTRAUTH exposed to guest */
 #define GUEST_HAS_PTRAUTH      __vcpu_single_flag(cflags, BIT(2))
 
+/* Exception pending */
+#define PENDING_EXCEPTION      __vcpu_single_flag(iflags, BIT(0))
+/*
+ * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
+ * be set together with an exception...
+ */
+#define INCREMENT_PC           __vcpu_single_flag(iflags, BIT(1))
+/* Target EL/MODE (not a single flag, but let's abuse the macro) */
+#define EXCEPT_MASK            __vcpu_single_flag(iflags, GENMASK(3, 1))
+
+/* Helpers to encode exceptions with minimum fuss */
+#define __EXCEPT_MASK_VAL      unpack_vcpu_flag(EXCEPT_MASK)
+#define __EXCEPT_SHIFT         __builtin_ctzl(__EXCEPT_MASK_VAL)
+#define __vcpu_except_flags(_f)        iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
+
+/*
+ * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
+ * values:
+ *
+ * For AArch32 EL1:
+ */
+#define EXCEPT_AA32_UND                __vcpu_except_flags(0)
+#define EXCEPT_AA32_IABT       __vcpu_except_flags(1)
+#define EXCEPT_AA32_DABT       __vcpu_except_flags(2)
+/* For AArch64: */
+#define EXCEPT_AA64_EL1_SYNC   __vcpu_except_flags(0)
+#define EXCEPT_AA64_EL1_IRQ    __vcpu_except_flags(1)
+#define EXCEPT_AA64_EL1_FIQ    __vcpu_except_flags(2)
+#define EXCEPT_AA64_EL1_SERR   __vcpu_except_flags(3)
+/* For AArch64 with NV (one day): */
+#define EXCEPT_AA64_EL2_SYNC   __vcpu_except_flags(4)
+#define EXCEPT_AA64_EL2_IRQ    __vcpu_except_flags(5)
+#define EXCEPT_AA64_EL2_FIQ    __vcpu_except_flags(6)
+#define EXCEPT_AA64_EL2_SERR   __vcpu_except_flags(7)
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +     \
 /* vcpu_arch flags field values: */
 #define KVM_ARM64_DEBUG_DIRTY          (1 << 0)
 #define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
-#define KVM_ARM64_PENDING_EXCEPTION    (1 << 8) /* Exception pending */
-/*
- * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
- * set together with an exception...
- */
-#define KVM_ARM64_INCREMENT_PC         (1 << 9) /* Increment PC */
-#define KVM_ARM64_EXCEPT_MASK          (7 << 9) /* Target EL/MODE */
-/*
- * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
- * take the following values:
- *
- * For AArch32 EL1:
- */
-#define KVM_ARM64_EXCEPT_AA32_UND      (0 << 9)
-#define KVM_ARM64_EXCEPT_AA32_IABT     (1 << 9)
-#define KVM_ARM64_EXCEPT_AA32_DABT     (2 << 9)
-/* For AArch64: */
-#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ  (1 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ  (2 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
-#define KVM_ARM64_EXCEPT_AA64_EL1      (0 << 11)
-#define KVM_ARM64_EXCEPT_AA64_EL2      (1 << 11)
-
 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active  */
 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE        (1 << 13) /* Save TRBE context if active  */
 #define KVM_ARM64_ON_UNSUPPORTED_CPU   (1 << 15) /* Physical CPU not in supported_cpus */
 
         * the vcpu state. Note that this relies on __kvm_adjust_pc()
         * being preempt-safe on VHE.
         */
-       if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
-                                        KVM_ARM64_INCREMENT_PC)))
+       if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
+                    vcpu_get_flag(vcpu, INCREMENT_PC)))
                kvm_call_hyp(__kvm_adjust_pc, vcpu);
 
        vcpu_put(vcpu);
 
 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
 {
        if (vcpu_el1_is_32bit(vcpu)) {
-               switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
-               case KVM_ARM64_EXCEPT_AA32_UND:
+               switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+               case unpack_vcpu_flag(EXCEPT_AA32_UND):
                        enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
                        break;
-               case KVM_ARM64_EXCEPT_AA32_IABT:
+               case unpack_vcpu_flag(EXCEPT_AA32_IABT):
                        enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
                        break;
-               case KVM_ARM64_EXCEPT_AA32_DABT:
+               case unpack_vcpu_flag(EXCEPT_AA32_DABT):
                        enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
                        break;
                default:
                        break;
                }
        } else {
-               switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
-               case (KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
-                     KVM_ARM64_EXCEPT_AA64_EL1):
+               switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+               case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
                        enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
                        break;
                default:
  */
 void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+       if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
                kvm_inject_exception(vcpu);
-               vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
-                                     KVM_ARM64_EXCEPT_MASK);
-       } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+               vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
+               vcpu_clear_flag(vcpu, EXCEPT_MASK);
+       } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
                kvm_skip_instr(vcpu);
-               vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+               vcpu_clear_flag(vcpu, INCREMENT_PC);
        }
 }
 
        *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
        *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
 
-       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
-                            KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
-                            KVM_ARM64_PENDING_EXCEPTION);
+       kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 
        __kvm_adjust_pc(vcpu);
 
 
        bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
        u64 esr = 0;
 
-       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1          |
-                            KVM_ARM64_EXCEPT_AA64_ELx_SYNC     |
-                            KVM_ARM64_PENDING_EXCEPTION);
+       kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 
        vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 
 {
        u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 
-       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1          |
-                            KVM_ARM64_EXCEPT_AA64_ELx_SYNC     |
-                            KVM_ARM64_PENDING_EXCEPTION);
+       kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 
        /*
         * Build an unknown exception, depending on the instruction
 
 static void inject_undef32(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND |
-                            KVM_ARM64_PENDING_EXCEPTION);
+       kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
 }
 
 /*
        far = vcpu_read_sys_reg(vcpu, FAR_EL1);
 
        if (is_pabt) {
-               vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
-                                    KVM_ARM64_PENDING_EXCEPTION);
+               kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
                far &= GENMASK(31, 0);
                far |= (u64)addr << 32;
                vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
        } else { /* !iabt */
-               vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
-                                    KVM_ARM64_PENDING_EXCEPTION);
+               kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
                far &= GENMASK(63, 32);
                far |= addr;
                vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);