#include "qemu/atomic.h"
#include "qemu/osdep.h"
+#include "qemu/bswap.h"
#include "qemu/memfd.h"
#include "libvhost-user.h"
DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
dev->features = vmsg->payload.u64;
+ if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) {
+ /*
+ * We only support devices conforming to VIRTIO 1.0 or
+ * later
+ */
+ vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user");
+ return false;
+ }
if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
vu_set_enable_all_rings(dev, true);
return false;
}
- vq->used_idx = vq->vring.used->idx;
+ vq->used_idx = lduw_le_p(&vq->vring.used->idx);
if (vq->last_avail_idx != vq->used_idx) {
bool resume = dev->iface->queue_is_processed_in_order &&
return 0;
}
- vq->used_idx = vq->vring.used->idx;
+ vq->used_idx = lduw_le_p(&vq->vring.used->idx);
vq->resubmit_num = 0;
vq->resubmit_list = NULL;
vq->counter = 0;
static inline uint16_t
vring_avail_flags(VuVirtq *vq)
{
- return vq->vring.avail->flags;
+ return lduw_le_p(&vq->vring.avail->flags);
}
static inline uint16_t
vring_avail_idx(VuVirtq *vq)
{
- vq->shadow_avail_idx = vq->vring.avail->idx;
+ vq->shadow_avail_idx = lduw_le_p(&vq->vring.avail->idx);
return vq->shadow_avail_idx;
}
static inline uint16_t
vring_avail_ring(VuVirtq *vq, int i)
{
- return vq->vring.avail->ring[i];
+ return lduw_le_p(&vq->vring.avail->ring[i]);
}
static inline uint16_t
int i, unsigned int max, unsigned int *next)
{
/* If this descriptor says it doesn't chain, we're done. */
- if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
+ if (!(lduw_le_p(&desc[i].flags) & VRING_DESC_F_NEXT)) {
return VIRTQUEUE_READ_DESC_DONE;
}
/* Check they're not leading us off end of descriptors. */
- *next = desc[i].next;
+ *next = lduw_le_p(&desc[i].next);
/* Make sure compiler knows to grab that: we don't want it changing! */
smp_wmb();
}
desc = vq->vring.desc;
- if (desc[i].flags & VRING_DESC_F_INDIRECT) {
- if (desc[i].len % sizeof(struct vring_desc)) {
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) {
+ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
goto err;
}
/* loop over the indirect descriptor table */
indirect = 1;
- desc_addr = desc[i].addr;
- desc_len = desc[i].len;
+ desc_addr = ldq_le_p(&desc[i].addr);
+ desc_len = ldl_le_p(&desc[i].len);
max = desc_len / sizeof(struct vring_desc);
read_len = desc_len;
desc = vu_gpa_to_va(dev, &read_len, desc_addr);
goto err;
}
- if (desc[i].flags & VRING_DESC_F_WRITE) {
- in_total += desc[i].len;
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) {
+ in_total += ldl_le_p(&desc[i].len);
} else {
- out_total += desc[i].len;
+ out_total += ldl_le_p(&desc[i].len);
}
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
goto done;
flags = (uint16_t *)((char*)vq->vring.used +
offsetof(struct vring_used, flags));
- *flags |= mask;
+ stw_le_p(flags, lduw_le_p(flags) | mask);
}
static inline void
flags = (uint16_t *)((char*)vq->vring.used +
offsetof(struct vring_used, flags));
- *flags &= ~mask;
+ stw_le_p(flags, lduw_le_p(flags) & ~mask);
}
static inline void
return;
}
- *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
+ stw_le_p(&vq->vring.used->ring[vq->vring.num], val);
}
void
struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
int rc;
- if (desc[i].flags & VRING_DESC_F_INDIRECT) {
- if (desc[i].len % sizeof(struct vring_desc)) {
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) {
+ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
}
/* loop over the indirect descriptor table */
- desc_addr = desc[i].addr;
- desc_len = desc[i].len;
+ desc_addr = ldq_le_p(&desc[i].addr);
+ desc_len = ldl_le_p(&desc[i].len);
max = desc_len / sizeof(struct vring_desc);
read_len = desc_len;
desc = vu_gpa_to_va(dev, &read_len, desc_addr);
/* Collect all the descriptors */
do {
- if (desc[i].flags & VRING_DESC_F_WRITE) {
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) {
virtqueue_map_desc(dev, &in_num, iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true,
- desc[i].addr, desc[i].len);
+ ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len));
} else {
if (in_num) {
vu_panic(dev, "Incorrect order for descriptors");
}
virtqueue_map_desc(dev, &out_num, iov,
VIRTQUEUE_MAX_SIZE, false,
- desc[i].addr, desc[i].len);
+ ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len));
}
/* If we've got too many, that implies a descriptor loop. */
max = vq->vring.num;
i = elem->index;
- if (desc[i].flags & VRING_DESC_F_INDIRECT) {
- if (desc[i].len % sizeof(struct vring_desc)) {
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) {
+ if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) {
vu_panic(dev, "Invalid size for indirect buffer table");
}
/* loop over the indirect descriptor table */
- desc_addr = desc[i].addr;
- desc_len = desc[i].len;
+ desc_addr = ldq_le_p(&desc[i].addr);
+ desc_len = ldl_le_p(&desc[i].len);
max = desc_len / sizeof(struct vring_desc);
read_len = desc_len;
desc = vu_gpa_to_va(dev, &read_len, desc_addr);
return;
}
- if (desc[i].flags & VRING_DESC_F_WRITE) {
- min = MIN(desc[i].len, len);
- vu_log_write(dev, desc[i].addr, min);
+ if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) {
+ min = MIN(ldl_le_p(&desc[i].len), len);
+ vu_log_write(dev, ldq_le_p(&desc[i].addr), min);
len -= min;
}
idx = (idx + vq->used_idx) % vq->vring.num;
- uelem.id = elem->index;
- uelem.len = len;
+ stl_le_p(&uelem.id, elem->index);
+ stl_le_p(&uelem.len, len);
vring_used_write(dev, vq, &uelem, idx);
}
static inline
void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
{
- vq->vring.used->idx = val;
+ stw_le_p(&vq->vring.used->idx, val);
vu_log_write(dev,
vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
sizeof(vq->vring.used->idx));