struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
*out_val = 0;
if (kvm_riscv_aia_available())
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (kvm_riscv_aia_available()) {
((unsigned long *)csr)[reg_num] = val;
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
- return -EINVAL;
+ return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
- return -EINVAL;
+ return -ENOENT;
} else
- return -EINVAL;
+ return -ENOENT;
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
- return -EINVAL;
+ return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
- return -EINVAL;
+ return -ENOENT;
} else
- return -EINVAL;
+ return -ENOENT;
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_val = satp_mode >> SATP_MODE_SHIFT;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EINVAL;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
reg_val = cntx->sepc;
reg_val = (cntx->sstatus & SR_SPP) ?
KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
else
- return -EINVAL;
+ return -ENOENT;
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
else
cntx->sstatus &= ~SR_SPP;
} else
- return -EINVAL;
+ return -ENOENT;
return 0;
}
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
kvm_riscv_vcpu_flush_interrupts(vcpu);
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
reg_val &= VSIP_VALID_MASK;
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
break;
default:
- rc = -EINVAL;
+ rc = -ENOENT;
break;
}
if (rc)
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
default:
- rc = -EINVAL;
+ rc = -ENOENT;
break;
}
if (rc)
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
+ return -ENOENT;
*reg_val = 0;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
+ return -ENOENT;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
unsigned long i, ext_id, ext_val;
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for (i = 0; i < BITS_PER_LONG; i++) {
ext_id = i + reg_num * BITS_PER_LONG;
unsigned long i, ext_id;
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for_each_set_bit(i, ®_val, BITS_PER_LONG) {
ext_id = i + reg_num * BITS_PER_LONG;
reg_val = ~reg_val;
break;
default:
- rc = -EINVAL;
+ rc = -ENOENT;
}
if (rc)
return rc;
case KVM_REG_RISCV_SBI_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
break;
}
- return -EINVAL;
+ return -ENOENT;
}
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
break;
}
- return -EINVAL;
+ return -ENOENT;
}
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
- if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
- (reg_val != 1 && reg_val != 0))
+ if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
+ return -ENOENT;
+
+ if (reg_val != 1 && reg_val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
- return -EINVAL;
+ return -ENOENT;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
unsigned long i, ext_id;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for_each_set_bit(i, ®_val, BITS_PER_LONG) {
ext_id = i + reg_num * BITS_PER_LONG;
unsigned long i, ext_id, ext_val;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for (i = 0; i < BITS_PER_LONG; i++) {
ext_id = i + reg_num * BITS_PER_LONG;
case KVM_REG_RISCV_SBI_MULTI_DIS:
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
reg_val = ~reg_val;
break;
default:
- rc = -EINVAL;
+ rc = -ENOENT;
}
if (rc)
return rc;
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
- return -EINVAL;
+ return -ENOENT;
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
KVM_RISCV_TIMER_STATE_OFF;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
- return -EINVAL;
+ return -ENOENT;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
ret = kvm_riscv_vcpu_timer_cancel(t);
break;
default:
- ret = -EINVAL;
+ ret = -ENOENT;
break;
}