struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg);
+static bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
u32 timer_get_ctl(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
- if (ctxt->offset.vm_offset)
+ if (ctxt && ctxt->offset.vm_offset)
return *ctxt->offset.vm_offset;
return 0;
kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
}
+static void set_cntpoff(u64 cntpoff)
+{
+ if (has_cntpoff())
+ write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
+}
+
static void timer_save_state(struct arch_timer_context *ctx)
{
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
write_sysreg_el0(0, SYS_CNTP_CTL);
isb();
+ set_cntpoff(0);
break;
case NR_KVM_TIMERS:
BUG();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
+ set_cntpoff(timer_get_offset(ctx));
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
vtimer->vcpu = vcpu;
vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
ptimer->vcpu = vcpu;
+ ptimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.poffset;
/* Synchronize cntvoff across all vtimers of a VM. */
timer_set_offset(vtimer, kvm_phys_timer_read());
val = read_sysreg(cnthctl_el2);
val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+ if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
+ val |= CNTHCTL_ECV;
write_sysreg(val, cnthctl_el2);
}