{
uint64_t gpa;
- for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
- *((volatile uint64_t *)gpa) = gpa;
-
- GUEST_DONE();
+ for (;;) {
+ for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
+ *((volatile uint64_t *)gpa) = gpa;
+ GUEST_SYNC(0);
+ }
}
struct vcpu_info {
static void run_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
- TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
}
static void *vcpu_worker(void *data)
struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
- struct kvm_regs regs;
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
- /* Snapshot regs before the first run. */
- vcpu_regs_get(vcpu, ®s);
rendezvous_with_boss();
run_vcpu(vcpu);
rendezvous_with_boss();
- vcpu_regs_set(vcpu, ®s);
vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */