return kvm_hv_get_msr(vcpu, msr, pdata);
 }
 
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
+                           u16 rep_cnt)
+{
+       struct kvm *kvm = current_vcpu->kvm;
+       struct kvm_vcpu_hv *hv_current = ¤t_vcpu->arch.hyperv;
+       struct hv_tlb_flush flush;
+       struct kvm_vcpu *vcpu;
+       unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
+       int i;
+
+       if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
+               return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+       trace_kvm_hv_flush_tlb(flush.processor_mask, flush.address_space,
+                              flush.flags);
+
+       cpumask_clear(&hv_current->tlb_lush);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+
+               if (!(flush.flags & HV_FLUSH_ALL_PROCESSORS) &&
+                   (hv->vp_index >= 64 ||
+                   !(flush.processor_mask & BIT_ULL(hv->vp_index))))
+                       continue;
+
+               /*
+                * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
+                * can't analyze it here, flush TLB regardless of the specified
+                * address space.
+                */
+               __set_bit(i, vcpu_bitmap);
+       }
+
+       kvm_make_vcpus_request_mask(kvm,
+                                   KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
+                                   vcpu_bitmap, &hv_current->tlb_lush);
+
+       /* We always do full TLB flush, set rep_done = rep_cnt. */
+       return (u64)HV_STATUS_SUCCESS |
+               ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
+}
+
 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
 {
        return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
                vcpu->arch.complete_userspace_io =
                                kvm_hv_hypercall_complete_userspace;
                return 0;
+       case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+               if (unlikely(fast || !rep_cnt || rep_idx)) {
+                       ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+                       break;
+               }
+               ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt);
+               break;
+       case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+               if (unlikely(fast || rep)) {
+                       ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+                       break;
+               }
+               ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt);
+               break;
        default:
                ret = HV_STATUS_INVALID_HYPERCALL_CODE;
                break;
        }
 
-set_result:
        kvm_hv_hypercall_set_result(vcpu, ret);
        return 1;
 }
 
                        __entry->vcpu_id,
                        __entry->hv_timer_in_use)
 );
+
+/*
+ * Tracepoint for kvm_hv_flush_tlb.
+ */
+TRACE_EVENT(kvm_hv_flush_tlb,
+       TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
+       TP_ARGS(processor_mask, address_space, flags),
+
+       TP_STRUCT__entry(
+               __field(u64, processor_mask)
+               __field(u64, address_space)
+               __field(u64, flags)
+       ),
+
+       TP_fast_assign(
+               __entry->processor_mask = processor_mask;
+               __entry->address_space = address_space;
+               __entry->flags = flags;
+       ),
+
+       TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
+                 __entry->processor_mask, __entry->address_space,
+                 __entry->flags)
+);
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH