virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
+ spin_lock_init(&dev->vqs_list_lock);
/*
* device_add() causes the bus infrastructure to look for a matching
cpu_to_le16(vq->packed.event_flags_shadow);
}
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_desc_extra:
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_extra:
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
if (vq->we_own_ring) {
if (vq->packed_ring) {
{
struct virtqueue *_vq;
+ spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
WRITE_ONCE(vq->broken, true);
}
+ spin_unlock(&dev->vqs_list_lock);
}
EXPORT_SYMBOL_GPL(virtio_break_device);
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
+ spinlock_t vqs_list_lock; /* Protects VQs list access */
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;