return ret;
}
+static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
+}
+
static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
}
+static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
+ addr->desc_user_addr, addr->used_user_addr,
+ addr->avail_user_addr,
+ addr->log_guest_addr);
+
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
+
+}
+
/**
* Set the shadow virtqueue descriptors to the device
*
static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
- trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
- addr->desc_user_addr, addr->used_user_addr,
- addr->avail_user_addr,
- addr->log_guest_addr);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ /*
+ * Device vring addr was set at device start. SVQ base is handled by
+ * VirtQueue code.
+ */
+ return 0;
+ }
+
+ return vhost_vdpa_set_vring_dev_addr(dev, addr);
}
static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ /*
+ * Device vring base was set at device start. SVQ base is handled by
+ * VirtQueue code.
+ */
+ return 0;
+ }
+
+ return vhost_vdpa_set_dev_vring_base(dev, ring);
}
static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,