#include <linux/spinlock.h>
 #include <xen/xen.h>
 
+static bool force_used_validation = false;
+module_param(force_used_validation, bool, 0444);
+
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
 #define BAD_RING(_vq, fmt, args...)                            \
                } packed;
        };
 
+       /* Per-descriptor in buffer length */
+       u32 *buflen;
+
        /* How to notify other side. FIXME: commonalize hcalls! */
        bool (*notify)(struct virtqueue *vq);
 
        unsigned int i, n, avail, descs_used, prev, err_idx;
        int head;
        bool indirect;
+       u32 buflen = 0;
 
        START_USE(vq);
 
                                                     VRING_DESC_F_NEXT |
                                                     VRING_DESC_F_WRITE,
                                                     indirect);
+                       buflen += sg->length;
                }
        }
        /* Last one doesn't continue. */
        else
                vq->split.desc_state[head].indir_desc = ctx;
 
+       /* Store in buffer length if necessary */
+       if (vq->buflen)
+               vq->buflen[head] = buflen;
+
        /* Put entry in available array (but don't update avail->idx until they
         * do sync). */
        avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
                BAD_RING(vq, "id %u is not a head!\n", i);
                return NULL;
        }
+       if (vq->buflen && unlikely(*len > vq->buflen[i])) {
+               BAD_RING(vq, "used len %d is larger than in buflen %u\n",
+                       *len, vq->buflen[i]);
+               return NULL;
+       }
 
        /* detach_buf_split clears data, so grab it now. */
        ret = vq->split.desc_state[i].data;
        unsigned int i, n, err_idx;
        u16 head, id;
        dma_addr_t addr;
+       u32 buflen = 0;
 
        head = vq->packed.next_avail_idx;
        desc = alloc_indirect_packed(total_sg, gfp);
                        desc[i].addr = cpu_to_le64(addr);
                        desc[i].len = cpu_to_le32(sg->length);
                        i++;
+                       if (n >= out_sgs)
+                               buflen += sg->length;
                }
        }
 
        vq->packed.desc_state[id].indir_desc = desc;
        vq->packed.desc_state[id].last = id;
 
+       /* Store in buffer length if necessary */
+       if (vq->buflen)
+               vq->buflen[id] = buflen;
+
        vq->num_added += 1;
 
        pr_debug("Added buffer head %i to %p\n", head, vq);
        __le16 head_flags, flags;
        u16 head, id, prev, curr, avail_used_flags;
        int err;
+       u32 buflen = 0;
 
        START_USE(vq);
 
                                        1 << VRING_PACKED_DESC_F_AVAIL |
                                        1 << VRING_PACKED_DESC_F_USED;
                        }
+                       if (n >= out_sgs)
+                               buflen += sg->length;
                }
        }
 
        vq->packed.desc_state[id].indir_desc = ctx;
        vq->packed.desc_state[id].last = prev;
 
+       /* Store in buffer length if necessary */
+       if (vq->buflen)
+               vq->buflen[id] = buflen;
+
        /*
         * A driver MUST NOT make the first descriptor in the list
         * available before all subsequent descriptors comprising
                BAD_RING(vq, "id %u is not a head!\n", id);
                return NULL;
        }
+       if (vq->buflen && unlikely(*len > vq->buflen[id])) {
+               BAD_RING(vq, "used len %d is larger than in buflen %u\n",
+                       *len, vq->buflen[id]);
+               return NULL;
+       }
 
        /* detach_buf_packed clears data, so grab it now. */
        ret = vq->packed.desc_state[id].data;
        struct vring_virtqueue *vq;
        struct vring_packed_desc *ring;
        struct vring_packed_desc_event *driver, *device;
+       struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
        dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
        size_t ring_size_in_bytes, event_size_in_bytes;
 
        if (!vq->packed.desc_extra)
                goto err_desc_extra;
 
+       if (!drv->suppress_used_validation || force_used_validation) {
+               vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
+                                          GFP_KERNEL);
+               if (!vq->buflen)
+                       goto err_buflen;
+       } else {
+               vq->buflen = NULL;
+       }
+
        /* No callback?  Tell other side not to bother us. */
        if (!callback) {
                vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
        spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
+err_buflen:
+       kfree(vq->packed.desc_extra);
 err_desc_extra:
        kfree(vq->packed.desc_state);
 err_desc_state:
                                        void (*callback)(struct virtqueue *),
                                        const char *name)
 {
+       struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
        struct vring_virtqueue *vq;
 
        if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
        if (!vq->split.desc_extra)
                goto err_extra;
 
+       if (!drv->suppress_used_validation || force_used_validation) {
+               vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
+                                          GFP_KERNEL);
+               if (!vq->buflen)
+                       goto err_buflen;
+       } else {
+               vq->buflen = NULL;
+       }
+
        /* Put everything in free lists. */
        vq->free_head = 0;
        memset(vq->split.desc_state, 0, vring.num *
        spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
+err_buflen:
+       kfree(vq->split.desc_extra);
 err_extra:
        kfree(vq->split.desc_state);
 err_state: