Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer.
Add new member variables - bytesused, length, offset, userptr, fd,
data_offset - to struct vb2_plane in order to cover all information
of v4l2_plane.
struct vb2_plane {
        <snip>
        unsigned int            bytesused;
        unsigned int            length;
        union {
                unsigned int    offset;
                unsigned long   userptr;
                int             fd;
        } m;
        unsigned int            data_offset;
}
Replace v4l2_buf with new member variables - index, type, memory - which
are common fields for buffer management.
struct vb2_buffer {
        <snip>
        unsigned int            index;
        unsigned int            type;
        unsigned int            memory;
        unsigned int            num_planes;
        struct vb2_plane        planes[VIDEO_MAX_PLANES];
        <snip>
};
v4l2 specific fields - flags, field, timestamp, timecode,
sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c
struct vb2_v4l2_buffer {
        struct vb2_buffer       vb2_buf;
        __u32                   flags;
        __u32                   field;
        struct timeval          timestamp;
        struct v4l2_timecode    timecode;
        __u32                   sequence;
};
Signed-off-by: Junghak Sung <jh1009.sung@samsung.com>
Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com>
Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
 #include <media/v4l2-device.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-sg.h>
 
 /* read 512 bytes from endpoint 0x86 -> get header + blobs */
 };
 
 struct sur40_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
        dev_dbg(sur40->dev, "header acquired\n");
 
-       sgt = vb2_dma_sg_plane_desc(&new_buf->vb, 0);
+       sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0);
 
        result = usb_sg_init(&sgr, sur40->usbdev,
                usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
                goto err_poll;
 
        /* mark as finished */
-       v4l2_get_timestamp(&new_buf->vb.v4l2_buf.timestamp);
-       new_buf->vb.v4l2_buf.sequence = sur40->sequence++;
-       new_buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
-       vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&new_buf->vb.timestamp);
+       new_buf->vb.sequence = sur40->sequence++;
+       new_buf->vb.field = V4L2_FIELD_NONE;
+       vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        dev_dbg(sur40->dev, "buffer marked done\n");
        return;
 
 err_poll:
-       vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_ERROR);
+       vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
 }
 
 /* Initialize input device parameters. */
 
        spin_lock(&sur40->qlock);
        list_for_each_entry_safe(buf, node, &sur40->buf_list, list) {
-               vb2_buffer_done(&buf->vb, state);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
                list_del(&buf->list);
        }
        spin_unlock(&sur40->qlock);
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 #include <linux/platform_device.h>
 
 /* intermediate buffers with raw data from the USB device */
 struct rtl2832_sdr_frame_buf {
-       struct vb2_buffer vb;   /* common v4l buffer stuff -- must be first */
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
                }
 
                /* fill framebuffer */
-               ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+               ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
                len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer,
                                urb->actual_length);
-               vb2_set_plane_payload(&fbuf->vb, 0, len);
-               v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
-               fbuf->vb.v4l2_buf.sequence = dev->sequence++;
-               vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
+               v4l2_get_timestamp(&fbuf->vb.timestamp);
+               fbuf->vb.sequence = dev->sequence++;
+               vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 skip:
        usb_submit_urb(urb, GFP_ATOMIC);
                buf = list_entry(dev->queued_bufs.next,
                                struct rtl2832_sdr_frame_buf, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
 }
 
 static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
        struct rtl2832_sdr_frame_buf *buf =
-                       container_of(vb, struct rtl2832_sdr_frame_buf, vb);
+                       container_of(vbuf, struct rtl2832_sdr_frame_buf, vb);
        unsigned long flags;
 
        /* Check the device has not disconnected between prep and queuing */
        if (!dev->udev) {
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                return;
        }
 
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-sg.h>
 
 #include "m00233_video_measure_memmap_package.h"
 #define COBALT_STREAM_FL_ADV_IRQ               1
 
 struct cobalt_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
-static inline struct cobalt_buffer *to_cobalt_buffer(struct vb2_buffer *vb2)
+static inline
+struct cobalt_buffer *to_cobalt_buffer(struct vb2_v4l2_buffer *vb2)
 {
        return container_of(vb2, struct cobalt_buffer, vb);
 }
 
                skip = true;
                s->skip_first_frames--;
        }
-       v4l2_get_timestamp(&cb->vb.v4l2_buf.timestamp);
+       v4l2_get_timestamp(&cb->vb.timestamp);
        /* TODO: the sequence number should be read from the FPGA so we
           also know about dropped frames. */
-       cb->vb.v4l2_buf.sequence = s->sequence++;
-       vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
+       cb->vb.sequence = s->sequence++;
+       vb2_buffer_done(&cb->vb.vb2_buf,
+                       (skip || s->unstable_frame) ?
                        VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
 }
 
 
        const size_t bytes =
                COBALT_MAX_HEIGHT * max_pages_per_line * 0x20;
        const size_t audio_bytes = ((1920 * 4) / PAGE_SIZE + 1) * 0x20;
-       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
        struct sg_table *sg_desc = vb2_dma_sg_plane_desc(vb, 0);
        unsigned size;
        int ret;
 static void cobalt_buf_cleanup(struct vb2_buffer *vb)
 {
        struct cobalt_stream *s = vb->vb2_queue->drv_priv;
-       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
 
        descriptor_list_free(desc);
 }
 
 static int cobalt_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cobalt_stream *s = vb->vb2_queue->drv_priv;
 
        vb2_set_plane_payload(vb, 0, s->stride * s->height);
-       vb->v4l2_buf.field = V4L2_FIELD_NONE;
+       vbuf->field = V4L2_FIELD_NONE;
        return 0;
 }
 
 
        list_for_each(p, &s->bufs) {
                cb = list_entry(p, struct cobalt_buffer, list);
-               desc[i] = &s->dma_desc_info[cb->vb.v4l2_buf.index];
+               desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
                if (i > 0)
                        descriptor_list_chain(desc[i-1], desc[i]);
                i++;
 
 static void cobalt_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *q = vb->vb2_queue;
        struct cobalt_stream *s = q->drv_priv;
-       struct cobalt_buffer *cb = to_cobalt_buffer(vb);
-       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->v4l2_buf.index];
+       struct cobalt_buffer *cb = to_cobalt_buffer(vbuf);
+       struct sg_dma_desc_info *desc = &s->dma_desc_info[vb->index];
        unsigned long flags;
 
        /* Prepare new buffer */
                          &vo->control);
        }
        cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
-       omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.v4l2_buf.index]);
+       omni_sg_dma_start(s, &s->dma_desc_info[cb->vb.vb2_buf.index]);
        spin_unlock_irqrestore(&s->irqlock, flags);
 }
 
        spin_lock_irqsave(&s->irqlock, flags);
        list_for_each(p, &s->bufs) {
                cb = list_entry(p, struct cobalt_buffer, list);
-               desc = &s->dma_desc_info[cb->vb.v4l2_buf.index];
+               desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
                /* Stop DMA after this descriptor chain */
                descriptor_list_end_of_chain(desc);
        }
        list_for_each_safe(p, safe, &s->bufs) {
                cb = list_entry(p, struct cobalt_buffer, list);
                list_del(&cb->list);
-               vb2_buffer_done(&cb->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&cb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&s->irqlock, flags);
 
 
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
        struct cx23885_buffer *buf =
-               container_of(vb, struct cx23885_buffer, vb);
+               container_of(vbuf, struct cx23885_buffer, vb);
 
        return cx23885_buf_prepare(buf, &dev->ts1);
 }
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer *buf = container_of(vb,
+       struct cx23885_buffer *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_free_buffer(dev, buf);
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer   *buf = container_of(vb,
+       struct cx23885_buffer   *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_buf_queue(&dev->ts1, buf);
                        struct cx23885_buffer, queue);
 
                list_del(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
        return ret;
 
        buf = list_entry(q->active.next,
                         struct cx23885_buffer, queue);
 
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.sequence = q->count++;
-       dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.sequence = q->count++;
+       dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
+               buf->vb.vb2_buf.index,
                count, q->count);
        list_del(&buf->queue);
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
 {
        struct cx23885_dev *dev = port->dev;
        int size = port->ts_packet_size * port->ts_packet_count;
-       struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
+       struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
 
        dprintk(1, "%s: %p\n", __func__, buf);
-       if (vb2_plane_size(&buf->vb, 0) < size)
+       if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
                return -EINVAL;
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
 
        cx23885_risc_databuffer(dev->pci, &buf->risc,
                                sgt->sgl,
        if (list_empty(&cx88q->active)) {
                list_add_tail(&buf->queue, &cx88q->active);
                dprintk(1, "[%p/%d] %s - first active\n",
-                       buf, buf->vb.v4l2_buf.index, __func__);
+                       buf, buf->vb.vb2_buf.index, __func__);
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
                list_add_tail(&buf->queue, &cx88q->active);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk(1, "[%p/%d] %s - append to active\n",
-                        buf, buf->vb.v4l2_buf.index, __func__);
+                        buf, buf->vb.vb2_buf.index, __func__);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
                buf = list_entry(q->active.next, struct cx23885_buffer,
                                 queue);
                list_del(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
-                       buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
+                       buf, buf->vb.vb2_buf.index, reason,
+                       (unsigned long)buf->risc.dma);
        }
        spin_unlock_irqrestore(&port->slock, flags);
 }
 
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
        struct cx23885_buffer *buf =
-               container_of(vb, struct cx23885_buffer, vb);
+               container_of(vbuf, struct cx23885_buffer, vb);
 
        return cx23885_buf_prepare(buf, port);
 }
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
        struct cx23885_dev *dev = port->dev;
-       struct cx23885_buffer *buf = container_of(vb,
+       struct cx23885_buffer *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_free_buffer(dev, buf);
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer   *buf = container_of(vb,
+       struct cx23885_buffer   *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_buf_queue(port, buf);
 
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer *buf = container_of(vb,
+       struct cx23885_buffer *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
        struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        unsigned lines = VBI_PAL_LINE_COUNT;
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
-       struct cx23885_buffer *buf = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct cx23885_buffer *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
  */
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb);
+       struct cx23885_buffer *buf = container_of(vbuf,
+                       struct cx23885_buffer, vb);
        struct cx23885_buffer *prev;
        struct cx23885_dmaqueue *q = &dev->vbiq;
        unsigned long flags;
                list_add_tail(&buf->queue, &q->active);
                spin_unlock_irqrestore(&dev->slock, flags);
                dprintk(2, "[%p/%d] vbi_queue - first active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
 
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                spin_unlock_irqrestore(&dev->slock, flags);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk(2, "[%p/%d] buffer_queue - append to active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
        }
 }
 
                        struct cx23885_buffer, queue);
 
                list_del(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
        buf = list_entry(q->active.next,
                        struct cx23885_buffer, queue);
 
-       buf->vb.v4l2_buf.sequence = q->count++;
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
-                       count, q->count);
+       buf->vb.sequence = q->count++;
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
+                       buf->vb.vb2_buf.index, count, q->count);
        list_del(&buf->queue);
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
        struct cx23885_buffer *buf =
-               container_of(vb, struct cx23885_buffer, vb);
+               container_of(vbuf, struct cx23885_buffer, vb);
        u32 line0_offset, line1_offset;
        struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        int field_tff;
                BUG();
        }
        dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
-               buf, buf->vb.v4l2_buf.index,
+               buf, buf->vb.vb2_buf.index,
                dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
                (unsigned long)buf->risc.dma);
        return 0;
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
-       struct cx23885_buffer *buf = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct cx23885_buffer *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
 
        cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
  */
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx23885_buffer   *buf = container_of(vb,
+       struct cx23885_buffer   *buf = container_of(vbuf,
                struct cx23885_buffer, vb);
        struct cx23885_buffer   *prev;
        struct cx23885_dmaqueue *q    = &dev->vidq;
        if (list_empty(&q->active)) {
                list_add_tail(&buf->queue, &q->active);
                dprintk(2, "[%p/%d] buffer_queue - first active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                prev = list_entry(q->active.prev, struct cx23885_buffer,
                list_add_tail(&buf->queue, &q->active);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk(2, "[%p/%d] buffer_queue - append to active\n",
-                               buf, buf->vb.v4l2_buf.index);
+                               buf, buf->vb.vb2_buf.index);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
                        struct cx23885_buffer, queue);
 
                list_del(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 /* buffer for one video frame */
 struct cx23885_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head queue;
 
        /* cx23885 specific */
 
                        buf = list_entry(dmaq->active.next,
                                         struct cx25821_buffer, queue);
 
-                       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-                       buf->vb.v4l2_buf.sequence = dmaq->count++;
+                       v4l2_get_timestamp(&buf->vb.timestamp);
+                       buf->vb.sequence = dmaq->count++;
                        list_del(&buf->queue);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
                }
                spin_unlock(&dev->slock);
                handled++;
 
 static int cx25821_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
        struct cx25821_dev *dev = chan->dev;
        struct cx25821_buffer *buf =
-               container_of(vb, struct cx25821_buffer, vb);
+               container_of(vbuf, struct cx25821_buffer, vb);
        struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        u32 line0_offset;
        int bpl_local = LINE_SIZE_D1;
        if (vb2_plane_size(vb, 0) < chan->height * buf->bpl)
                return -EINVAL;
        vb2_set_plane_payload(vb, 0, chan->height * buf->bpl);
-       buf->vb.v4l2_buf.field = chan->field;
+       buf->vb.field = chan->field;
 
        if (chan->pixel_formats == PIXEL_FRMT_411) {
                bpl_local = buf->bpl;
        }
 
        dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
-               buf, buf->vb.v4l2_buf.index, chan->width, chan->height,
+               buf, buf->vb.vb2_buf.index, chan->width, chan->height,
                chan->fmt->depth, chan->fmt->name,
                (unsigned long)buf->risc.dma);
 
 
 static void cx25821_buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx25821_buffer *buf =
-               container_of(vb, struct cx25821_buffer, vb);
+               container_of(vbuf, struct cx25821_buffer, vb);
        struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
        struct cx25821_dev *dev = chan->dev;
 
 
 static void cx25821_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx25821_buffer *buf =
-               container_of(vb, struct cx25821_buffer, vb);
+               container_of(vbuf, struct cx25821_buffer, vb);
        struct cx25821_channel *chan = vb->vb2_queue->drv_priv;
        struct cx25821_dev *dev = chan->dev;
        struct cx25821_buffer *prev;
                        struct cx25821_buffer, queue);
 
                list_del(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 #include <media/v4l2-common.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-sg.h>
 
 #include "cx25821-reg.h"
 /* buffer for one video frame */
 struct cx25821_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head queue;
 
        /* cx25821 specific */
 
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
 
        return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
 }
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_riscmem *risc = &buf->risc;
 
        if (risc->cpu)
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer    *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer    *buf = container_of(vbuf, struct cx88_buffer, vb);
 
        cx8802_buf_queue(dev, buf);
 }
                        struct cx88_buffer, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
        return err;
                        struct cx88_buffer, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 
        buf = list_entry(q->active.next,
                         struct cx88_buffer, list);
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.field = core->field;
-       buf->vb.v4l2_buf.sequence = q->count++;
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.field = core->field;
+       buf->vb.sequence = q->count++;
        list_del(&buf->list);
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 void cx88_shutdown(struct cx88_core *core)
 
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
 
        return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
 }
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_riscmem *risc = &buf->risc;
 
        if (risc->cpu)
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer    *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer    *buf = container_of(vbuf, struct cx88_buffer, vb);
 
        cx8802_buf_queue(dev, buf);
 }
                        struct cx88_buffer, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 
        buf = list_entry(q->active.next, struct cx88_buffer, list);
        dprintk(2,"restart_queue [%p/%d]: restart dma\n",
-               buf, buf->vb.v4l2_buf.index);
+               buf, buf->vb.vb2_buf.index);
        cx8802_start_dma(dev, q, buf);
        return 0;
 }
                        struct cx88_buffer *buf)
 {
        int size = dev->ts_packet_size * dev->ts_packet_count;
-       struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
+       struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
        struct cx88_riscmem *risc = &buf->risc;
        int rc;
 
-       if (vb2_plane_size(&buf->vb, 0) < size)
+       if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
                return -EINVAL;
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
 
        rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
                             dev->ts_packet_size, dev->ts_packet_count, 0);
                dprintk( 1, "queue is empty - first active\n" );
                list_add_tail(&buf->list, &cx88q->active);
                dprintk(1,"[%p/%d] %s - first active\n",
-                       buf, buf->vb.v4l2_buf.index, __func__);
+                       buf, buf->vb.vb2_buf.index, __func__);
 
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                list_add_tail(&buf->list, &cx88q->active);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk( 1, "[%p/%d] %s - append to active\n",
-                       buf, buf->vb.v4l2_buf.index, __func__);
+                       buf, buf->vb.vb2_buf.index, __func__);
        }
 }
 
        while (!list_empty(&q->active)) {
                buf = list_entry(q->active.next, struct cx88_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock,flags);
 }
 
 
        buf = list_entry(q->active.next, struct cx88_buffer, list);
        dprintk(2,"restart_queue [%p/%d]: restart dma\n",
-               buf, buf->vb.v4l2_buf.index);
+               buf, buf->vb.vb2_buf.index);
        cx8800_start_vbi_dma(dev, q, buf);
        return 0;
 }
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        unsigned int lines;
        unsigned int size;
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_riscmem *risc = &buf->risc;
 
        if (risc->cpu)
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer    *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer    *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_buffer    *prev;
        struct cx88_dmaqueue  *q    = &dev->vbiq;
 
                list_add_tail(&buf->list, &q->active);
                cx8800_start_vbi_dma(dev, q, buf);
                dprintk(2,"[%p/%d] vbi_queue - first active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
 
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                list_add_tail(&buf->list, &q->active);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk(2,"[%p/%d] buffer_queue - append to active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
        }
 }
 
                        struct cx88_buffer, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
        if (!list_empty(&q->active)) {
                buf = list_entry(q->active.next, struct cx88_buffer, list);
                dprintk(2,"restart_queue [%p/%d]: restart dma\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
                start_video_dma(dev, q, buf);
        }
        return 0;
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
        struct cx88_core *core = dev->core;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
 
        buf->bpl = core->width * dev->fmt->depth >> 3;
                break;
        }
        dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
-               buf, buf->vb.v4l2_buf.index,
+               buf, buf->vb.vb2_buf.index,
                core->width, core->height, dev->fmt->depth, dev->fmt->name,
                (unsigned long)buf->risc.dma);
        return 0;
 
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_riscmem *risc = &buf->risc;
 
        if (risc->cpu)
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
-       struct cx88_buffer    *buf = container_of(vb, struct cx88_buffer, vb);
+       struct cx88_buffer    *buf = container_of(vbuf, struct cx88_buffer, vb);
        struct cx88_buffer    *prev;
        struct cx88_core      *core = dev->core;
        struct cx88_dmaqueue  *q    = &dev->vidq;
        if (list_empty(&q->active)) {
                list_add_tail(&buf->list, &q->active);
                dprintk(2,"[%p/%d] buffer_queue - first active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
 
        } else {
                buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
                list_add_tail(&buf->list, &q->active);
                prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
                dprintk(2, "[%p/%d] buffer_queue - append to active\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
        }
 }
 
                        struct cx88_buffer, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 /* buffer for one video frame */
 struct cx88_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head       list;
 
        /* cx88 specific */
 
 static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
 {
        struct dt3155_priv *pd = vb2_get_drv_priv(q);
-       struct vb2_buffer *vb = pd->curr_buf;
+       struct vb2_buffer *vb = &pd->curr_buf->vb2_buf;
        dma_addr_t dma_addr;
 
        pd->sequence = 0;
 
        spin_lock_irq(&pd->lock);
        if (pd->curr_buf) {
-               vb2_buffer_done(pd->curr_buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&pd->curr_buf->vb2_buf, VB2_BUF_STATE_ERROR);
                pd->curr_buf = NULL;
        }
 
 
 static void dt3155_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue);
 
        /*  pd->vidq.streaming = 1 when dt3155_buf_queue() is invoked  */
        if (pd->curr_buf)
                list_add_tail(&vb->done_entry, &pd->dmaq);
        else
-               pd->curr_buf = vb;
+               pd->curr_buf = vbuf;
        spin_unlock_irq(&pd->lock);
 }
 
 
        spin_lock(&ipd->lock);
        if (ipd->curr_buf && !list_empty(&ipd->dmaq)) {
-               v4l2_get_timestamp(&ipd->curr_buf->v4l2_buf.timestamp);
-               ipd->curr_buf->v4l2_buf.sequence = ipd->sequence++;
-               ipd->curr_buf->v4l2_buf.field = V4L2_FIELD_NONE;
-               vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE);
+               v4l2_get_timestamp(&ipd->curr_buf->timestamp);
+               ipd->curr_buf->sequence = ipd->sequence++;
+               ipd->curr_buf->field = V4L2_FIELD_NONE;
+               vb2_buffer_done(&ipd->curr_buf->vb2_buf, VB2_BUF_STATE_DONE);
 
                ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
                list_del(&ivb->done_entry);
-               ipd->curr_buf = ivb;
+               ipd->curr_buf = to_vb2_v4l2_buffer(ivb);
                dma_addr = vb2_dma_contig_plane_dma_addr(ivb, 0);
                iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
                iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
 
 #include <linux/interrupt.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
 
 #define DT3155_NAME "dt3155"
 #define DT3155_VER_MAJ 2
        struct pci_dev *pdev;
        struct vb2_queue vidq;
        struct vb2_alloc_ctx *alloc_ctx;
-       struct vb2_buffer *curr_buf;
+       struct vb2_v4l2_buffer *curr_buf;
        struct mutex mux;
        struct list_head dmaq;
        spinlock_t lock;
 
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 #include "netup_unidvb.h"
 } __packed __aligned(1);
 
 struct netup_unidvb_buffer {
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head        list;
        u32                     size;
 };
 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
 {
        struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
-       struct netup_unidvb_buffer *buf = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct netup_unidvb_buffer *buf = container_of(vbuf,
                                struct netup_unidvb_buffer, vb);
 
        dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
 {
        unsigned long flags;
        struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
-       struct netup_unidvb_buffer *buf = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct netup_unidvb_buffer *buf = container_of(vbuf,
                                struct netup_unidvb_buffer, vb);
 
        dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
 {
        u32 copy_bytes, ring_bytes;
        u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
-       u8 *p = vb2_plane_vaddr(&buf->vb, 0);
+       u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
        struct netup_unidvb_dev *ndev = dma->ndev;
 
        if (p == NULL) {
                        dev_dbg(&ndev->pci_dev->dev,
                                "%s(): buffer %p done, size %d\n",
                                __func__, buf, buf->size);
-                       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-                       vb2_set_plane_payload(&buf->vb, 0, buf->size);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+                       v4l2_get_timestamp(&buf->vb.timestamp);
+                       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
                }
        }
 work_done:
                buf = list_first_entry(&dma->free_buffers,
                        struct netup_unidvb_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dma->lock, flags);
 }
 
 
 int saa7134_buffer_startpage(struct saa7134_buf *buf)
 {
-       return saa7134_buffer_pages(vb2_plane_size(&buf->vb2, 0)) * buf->vb2.v4l2_buf.index;
+       return saa7134_buffer_pages(vb2_plane_size(&buf->vb2.vb2_buf, 0))
+                       * buf->vb2.vb2_buf.index;
 }
 
 unsigned long saa7134_buffer_base(struct saa7134_buf *buf)
 {
        unsigned long base;
-       struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+       struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2.vb2_buf, 0);
 
        base  = saa7134_buffer_startpage(buf) * 4096;
        base += dma->sgl[0].offset;
        core_dbg("buffer_finish %p\n", q->curr);
 
        /* finish current buffer */
-       v4l2_get_timestamp(&q->curr->vb2.v4l2_buf.timestamp);
-       q->curr->vb2.v4l2_buf.sequence = q->seq_nr++;
-       vb2_buffer_done(&q->curr->vb2, state);
+       v4l2_get_timestamp(&q->curr->vb2.timestamp);
+       q->curr->vb2.sequence = q->seq_nr++;
+       vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
        q->curr = NULL;
 }
 
        if (!list_empty(&q->queue)) {
                list_for_each_safe(pos, n, &q->queue) {
                         tmp = list_entry(pos, struct saa7134_buf, entry);
-                        vb2_buffer_done(&tmp->vb2, VB2_BUF_STATE_ERROR);
+                        vb2_buffer_done(&tmp->vb2.vb2_buf,
+                                        VB2_BUF_STATE_ERROR);
                         list_del(pos);
                         tmp = NULL;
                }
 
 
 int saa7134_ts_buffer_init(struct vb2_buffer *vb2)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
 
        dmaq->curr = NULL;
        buf->activate = buffer_activate;
 
 int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
        struct saa7134_dev *dev = dmaq->dev;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
        struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
        unsigned int lines, llength, size;
 
                return -EINVAL;
 
        vb2_set_plane_payload(vb2, 0, size);
-       vb2->v4l2_buf.field = dev->field;
+       vbuf->field = dev->field;
 
        return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
                                    saa7134_buffer_startpage(buf));
 
                list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
                        list_del(&buf->entry);
-                       vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb2.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
                if (dmaq->curr) {
-                       vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                        dmaq->curr = NULL;
                }
                return -EBUSY;
 
                           struct saa7134_buf *buf,
                           struct saa7134_buf *next)
 {
-       struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv;
+       struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
        unsigned long control, base;
 
        vbi_dbg("buffer_activate [%p]\n", buf);
 {
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
        struct saa7134_dev *dev = dmaq->dev;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
-       struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
+       struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
        unsigned int size;
 
        if (dma->sgl->offset) {
 static int buffer_init(struct vb2_buffer *vb2)
 {
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
 
        dmaq->curr = NULL;
        buf->activate = buffer_activate;
 
                           struct saa7134_buf *buf,
                           struct saa7134_buf *next)
 {
-       struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_queue->drv_priv;
+       struct saa7134_dmaqueue *dmaq = buf->vb2.vb2_buf.vb2_queue->drv_priv;
        unsigned long base,control,bpl;
        unsigned long bpl_uv,lines_uv,base2,base3,tmp; /* planar */
 
 static int buffer_init(struct vb2_buffer *vb2)
 {
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
 
        dmaq->curr = NULL;
        buf->activate = buffer_activate;
 {
        struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
        struct saa7134_dev *dev = dmaq->dev;
-       struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
-       struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
+       struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
        unsigned int size;
 
        if (dma->sgl->offset) {
                return -EINVAL;
 
        vb2_set_plane_payload(vb2, 0, size);
-       vb2->v4l2_buf.field = dev->field;
+       vbuf->field = dev->field;
 
        return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
                                    saa7134_buffer_startpage(buf));
 {
        struct saa7134_dmaqueue *dmaq = vb->vb2_queue->drv_priv;
        struct saa7134_dev *dev = dmaq->dev;
-       struct saa7134_buf *buf = container_of(vb, struct saa7134_buf, vb2);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct saa7134_buf *buf = container_of(vbuf, struct saa7134_buf, vb2);
 
        saa7134_buffer_queue(dev, dmaq, buf);
 }
 
                list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
                        list_del(&buf->entry);
-                       vb2_buffer_done(&buf->vb2, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb2.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
                if (dmaq->curr) {
-                       vb2_buffer_done(&dmaq->curr->vb2, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&dmaq->curr->vb2.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                        dmaq->curr = NULL;
                }
                return -EBUSY;
 
 /* buffer for one video/vbi/ts frame */
 struct saa7134_buf {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb2;
+       struct vb2_v4l2_buffer vb2;
 
        /* saa7134 specific */
        unsigned int            top_seen;
 
 static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
                          struct vb2_buffer *vb, const vop_header *vh)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        int frame_size;
 
-       vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+       vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
 
        if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
                return -EIO;
        frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
        vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
 
-       return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
+       return solo_send_desc(solo_enc, solo_enc->jpeg_len, sgt,
                             vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
                             frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
                             SOLO_JPEG_EXT_SIZE(solo_dev));
 static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
                struct vb2_buffer *vb, const vop_header *vh)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
        int frame_off, frame_size;
        int skip = 0;
 
                return -EIO;
 
        /* If this is a key frame, add extra header */
-       vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+       vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
                V4L2_BUF_FLAG_BFRAME);
        if (!vop_type(vh)) {
                skip = solo_enc->vop_len;
-               vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+               vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
                vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
                        solo_enc->vop_len);
        } else {
-               vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+               vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
                vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
        }
 
                sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
        frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
 
-       return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
+       return solo_send_desc(solo_enc, skip, sgt, frame_off, frame_size,
                        SOLO_MP4E_EXT_ADDR(solo_dev),
                        SOLO_MP4E_EXT_SIZE(solo_dev));
 }
 static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
                            struct vb2_buffer *vb, struct solo_enc_buf *enc_buf)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        const vop_header *vh = enc_buf->vh;
        int ret;
 
        }
 
        if (!ret) {
-               vb->v4l2_buf.sequence = solo_enc->sequence++;
-               vb->v4l2_buf.timestamp.tv_sec = vop_sec(vh);
-               vb->v4l2_buf.timestamp.tv_usec = vop_usec(vh);
+               vbuf->sequence = solo_enc->sequence++;
+               vbuf->timestamp.tv_sec = vop_sec(vh);
+               vbuf->timestamp.tv_usec = vop_usec(vh);
 
                /* Check for motion flags */
                if (solo_is_motion_on(solo_enc) && enc_buf->motion) {
                        struct v4l2_event ev = {
                                .type = V4L2_EVENT_MOTION_DET,
                                .u.motion_det = {
-                                       .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
-                                       .frame_sequence = vb->v4l2_buf.sequence,
+                                       .flags
+                                       = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
+                                       .frame_sequence = vbuf->sequence,
                                        .region_mask = enc_buf->motion ? 1 : 0,
                                },
                        };
        list_del(&vb->list);
        spin_unlock_irqrestore(&solo_enc->av_lock, flags);
 
-       solo_enc_fillbuf(solo_enc, &vb->vb, enc_buf);
+       solo_enc_fillbuf(solo_enc, &vb->vb.vb2_buf, enc_buf);
 unlock:
        mutex_unlock(&solo_enc->lock);
 }
 
 static void solo_enc_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vq);
        struct solo_vb2_buf *solo_vb =
-               container_of(vb, struct solo_vb2_buf, vb);
+               container_of(vbuf, struct solo_vb2_buf, vb);
 
        spin_lock(&solo_enc->av_lock);
        list_add_tail(&solo_vb->list, &solo_enc->vidq_active);
                                struct solo_vb2_buf, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&solo_enc->av_lock, flags);
 }
 
 static void solo_enc_buf_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
-       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
 
        switch (solo_enc->fmt) {
        case V4L2_PIX_FMT_MPEG4:
        case V4L2_PIX_FMT_H264:
-               if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
-                       sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+               if (vbuf->flags & V4L2_BUF_FLAG_KEYFRAME)
+                       sg_copy_from_buffer(sgt->sgl, sgt->nents,
                                        solo_enc->vop, solo_enc->vop_len);
                break;
        default: /* V4L2_PIX_FMT_MJPEG */
-               sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+               sg_copy_from_buffer(sgt->sgl, sgt->nents,
                                solo_enc->jpeg_header, solo_enc->jpeg_len);
                break;
        }
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-contig.h>
 
 #include "solo6x10.h"
 static void solo_fillbuf(struct solo_dev *solo_dev,
                         struct vb2_buffer *vb)
 {
-       dma_addr_t vbuf;
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       dma_addr_t addr;
        unsigned int fdma_addr;
        int error = -1;
        int i;
 
-       vbuf = vb2_dma_contig_plane_dma_addr(vb, 0);
-       if (!vbuf)
+       addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+       if (!addr)
                goto finish_buf;
 
        if (erase_off(solo_dev)) {
                fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
                                (SOLO_HW_BPL * solo_vlines(solo_dev)));
 
-               error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr,
+               error = solo_p2m_dma_t(solo_dev, 0, addr, fdma_addr,
                                       solo_bytesperline(solo_dev),
                                       solo_vlines(solo_dev), SOLO_HW_BPL);
        }
        if (!error) {
                vb2_set_plane_payload(vb, 0,
                        solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
-               vb->v4l2_buf.sequence = solo_dev->sequence++;
-               v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+               vbuf->sequence = solo_dev->sequence++;
+               v4l2_get_timestamp(&vbuf->timestamp);
        }
 
        vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
 
                spin_unlock(&solo_dev->slock);
 
-               solo_fillbuf(solo_dev, &vb->vb);
+               solo_fillbuf(solo_dev, &vb->vb.vb2_buf);
        }
 
        assert_spin_locked(&solo_dev->slock);
 
 static void solo_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct solo_dev *solo_dev = vb2_get_drv_priv(vq);
        struct solo_vb2_buf *solo_vb =
-               container_of(vb, struct solo_vb2_buf, vb);
+               container_of(vbuf, struct solo_vb2_buf, vb);
 
        spin_lock(&solo_dev->slock);
        list_add_tail(&solo_vb->list, &solo_dev->vidq_active);
 
 #define OSD_TEXT_MAX           44
 
 struct solo_vb2_buf {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 
 
 struct vip_buffer {
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head        list;
        dma_addr_t              dma;
 };
-static inline struct vip_buffer *to_vip_buffer(struct vb2_buffer *vb2)
+static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
 {
        return container_of(vb2, struct vip_buffer, vb);
 }
 };
 static int buffer_init(struct vb2_buffer *vb)
 {
-       struct vip_buffer *vip_buf = to_vip_buffer(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
 
        vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
        INIT_LIST_HEAD(&vip_buf->list);
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
-       struct vip_buffer *vip_buf = to_vip_buffer(vb);
+       struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
        unsigned long size;
 
        size = vip->format.sizeimage;
                return -EINVAL;
        }
 
-       vb2_set_plane_payload(&vip_buf->vb, 0, size);
+       vb2_set_plane_payload(&vip_buf->vb.vb2_buf, 0, size);
 
        return 0;
 }
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
-       struct vip_buffer *vip_buf = to_vip_buffer(vb);
+       struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
 
        spin_lock(&vip->lock);
        list_add_tail(&vip_buf->list, &vip->buffer_list);
 }
 static void buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
-       struct vip_buffer *vip_buf = to_vip_buffer(vb);
+       struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
 
        /* Buffer handled, remove it from the list */
        spin_lock(&vip->lock);
        /* Release all active buffers */
        spin_lock(&vip->lock);
        list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
-               vb2_buffer_done(&vip_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&vip_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&vip_buf->list);
        }
        spin_unlock(&vip->lock);
                /* Disable acquisition */
                reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
                /* Remove the active buffer from the list */
-               v4l2_get_timestamp(&vip->active->vb.v4l2_buf.timestamp);
-               vip->active->vb.v4l2_buf.sequence = vip->sequence++;
-               vb2_buffer_done(&vip->active->vb, VB2_BUF_STATE_DONE);
+               v4l2_get_timestamp(&vip->active->vb.timestamp);
+               vip->active->vb.sequence = vip->sequence++;
+               vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 
        return IRQ_HANDLED;
 
  */
 static void tw68_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct tw68_dev *dev = vb2_get_drv_priv(vq);
-       struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+       struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
        struct tw68_buf *prev;
        unsigned long flags;
 
  */
 static int tw68_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct tw68_dev *dev = vb2_get_drv_priv(vq);
-       struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+       struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
        struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
        unsigned size, bpl;
 
 
 static void tw68_buf_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct tw68_dev *dev = vb2_get_drv_priv(vq);
-       struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+       struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
 
        pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
 }
                        container_of(dev->active.next, struct tw68_buf, list);
 
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 }
 
                buf = list_entry(dev->active.next, struct tw68_buf, list);
                list_del(&buf->list);
                spin_unlock(&dev->slock);
-               v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-               buf->vb.v4l2_buf.field = dev->field;
-               buf->vb.v4l2_buf.sequence = dev->seqnr++;
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+               v4l2_get_timestamp(&buf->vb.timestamp);
+               buf->vb.field = dev->field;
+               buf->vb.sequence = dev->seqnr++;
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
                status &= ~(TW68_DMAPI);
                if (0 == status)
                        return;
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-sg.h>
 
 #include "tw68-reg.h"
 
 /* buffer for one video/vbi/ts frame */
 struct tw68_buf {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 
        unsigned int   size;
 
        return container_of(ccdc, struct vpfe_device, ccdc);
 }
 
-static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb)
+static inline
+struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct vpfe_cap_buffer, vb);
 }
        list_del(&vpfe->next_frm->list);
 
        vpfe_set_sdr_addr(&vpfe->ccdc,
-                      vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0));
+              vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
 }
 
 static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
 {
        unsigned long addr;
 
-       addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) +
+       addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
                                        vpfe->field_off;
 
        vpfe_set_sdr_addr(&vpfe->ccdc, addr);
  */
 static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
 {
-       v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp);
-       vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field;
-       vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++;
-       vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&vpfe->cur_frm->vb.timestamp);
+       vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
+       vpfe->cur_frm->vb.sequence = vpfe->sequence++;
+       vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
        vpfe->cur_frm = vpfe->next_frm;
 }
 
  */
 static int vpfe_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
 
        vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
        if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
                return -EINVAL;
 
-       vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+       vbuf->field = vpfe->fmt.fmt.pix.field;
 
        return 0;
 }
  */
 static void vpfe_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
-       struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb);
+       struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
        unsigned long flags = 0;
 
        /* add the buffer to the DMA queue */
        list_del(&vpfe->cur_frm->list);
        spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
 
-       addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
 
        vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
 
 err:
        list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
 
        return ret;
        /* release all active buffers */
        spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
        if (vpfe->cur_frm == vpfe->next_frm) {
-               vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        } else {
                if (vpfe->cur_frm != NULL)
-                       vb2_buffer_done(&vpfe->cur_frm->vb,
+                       vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                if (vpfe->next_frm != NULL)
-                       vb2_buffer_done(&vpfe->next_frm->vb,
+                       vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
        }
 
                vpfe->next_frm = list_entry(vpfe->dma_queue.next,
                                                struct vpfe_cap_buffer, list);
                list_del(&vpfe->next_frm->list);
-               vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
 }
 
 #include <media/v4l2-dev.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-contig.h>
 
 #include "am437x-vpfe_regs.h"
 };
 
 struct vpfe_cap_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 };
 
 struct bcap_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 static irqreturn_t bcap_isr(int irq, void *dev_id);
 
-static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
+static struct bcap_buffer *to_bcap_vb(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct bcap_buffer, vb);
 }
 
 static int bcap_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
        unsigned long size = bcap_dev->fmt.sizeimage;
 
        }
        vb2_set_plane_payload(vb, 0, size);
 
-       vb->v4l2_buf.field = bcap_dev->fmt.field;
+       vbuf->field = bcap_dev->fmt.field;
 
        return 0;
 }
 
 static void bcap_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct bcap_buffer *buf = to_bcap_vb(vb);
+       struct bcap_buffer *buf = to_bcap_vb(vbuf);
        unsigned long flags;
 
        spin_lock_irqsave(&bcap_dev->lock, flags);
 
 static void bcap_buffer_cleanup(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct bcap_buffer *buf = to_bcap_vb(vb);
+       struct bcap_buffer *buf = to_bcap_vb(vbuf);
        unsigned long flags;
 
        spin_lock_irqsave(&bcap_dev->lock, flags);
                                        struct bcap_buffer, list);
        /* remove buffer from the dma queue */
        list_del_init(&bcap_dev->cur_frm->list);
-       addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb.vb2_buf,
+                                               0);
        /* update DMA address */
        ppi->ops->update_addr(ppi, (unsigned long)addr);
        /* enable ppi */
 err:
        list_for_each_entry_safe(buf, tmp, &bcap_dev->dma_queue, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
 
        return ret;
 
        /* release all active buffers */
        if (bcap_dev->cur_frm)
-               vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
 
        while (!list_empty(&bcap_dev->dma_queue)) {
                bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
                                                struct bcap_buffer, list);
                list_del_init(&bcap_dev->cur_frm->list);
-               vb2_buffer_done(&bcap_dev->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&bcap_dev->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
 }
 
 {
        struct ppi_if *ppi = dev_id;
        struct bcap_device *bcap_dev = ppi->priv;
-       struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
+       struct vb2_v4l2_buffer *vbuf = &bcap_dev->cur_frm->vb;
+       struct vb2_buffer *vb = &vbuf->vb2_buf;
        dma_addr_t addr;
 
        spin_lock(&bcap_dev->lock);
 
        if (!list_empty(&bcap_dev->dma_queue)) {
-               v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+               v4l2_get_timestamp(&vbuf->timestamp);
                if (ppi->err) {
                        vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
                        ppi->err = false;
                } else {
-                       vb->v4l2_buf.sequence = bcap_dev->sequence++;
+                       vbuf->sequence = bcap_dev->sequence++;
                        vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
                }
                bcap_dev->cur_frm = list_entry(bcap_dev->dma_queue.next,
        if (bcap_dev->stop) {
                complete(&bcap_dev->comp);
        } else {
-               addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+               addr = vb2_dma_contig_plane_dma_addr(
+                               &bcap_dev->cur_frm->vb.vb2_buf, 0);
                ppi->ops->update_addr(ppi, (unsigned long)addr);
                ppi->ops->start(ppi);
        }
 
 }
 
 static int coda_bitstream_queue(struct coda_ctx *ctx,
-                               struct vb2_buffer *src_buf)
+                               struct vb2_v4l2_buffer *src_buf)
 {
-       u32 src_size = vb2_get_plane_payload(src_buf, 0);
+       u32 src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
        u32 n;
 
-       n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0),
-                    src_size);
+       n = kfifo_in(&ctx->bitstream_fifo,
+                       vb2_plane_vaddr(&src_buf->vb2_buf, 0), src_size);
        if (n < src_size)
                return -ENOSPC;
 
-       src_buf->v4l2_buf.sequence = ctx->qsequence++;
+       src_buf->sequence = ctx->qsequence++;
 
        return 0;
 }
 
 static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
-                                    struct vb2_buffer *src_buf)
+                                    struct vb2_v4l2_buffer *src_buf)
 {
        int ret;
 
        if (coda_get_bitstream_payload(ctx) +
-           vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+           vb2_get_plane_payload(&src_buf->vb2_buf, 0) + 512 >=
+           ctx->bitstream.size)
                return false;
 
-       if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+       if (vb2_plane_vaddr(&src_buf->vb2_buf, 0) == NULL) {
                v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
                return true;
        }
 
 void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
 {
-       struct vb2_buffer *src_buf;
+       struct vb2_v4l2_buffer *src_buf;
        struct coda_buffer_meta *meta;
        unsigned long flags;
        u32 start;
                }
 
                /* Dump empty buffers */
-               if (!vb2_get_plane_payload(src_buf, 0)) {
+               if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
                        src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
                        v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
                        continue;
 
                        meta = kmalloc(sizeof(*meta), GFP_KERNEL);
                        if (meta) {
-                               meta->sequence = src_buf->v4l2_buf.sequence;
-                               meta->timecode = src_buf->v4l2_buf.timecode;
-                               meta->timestamp = src_buf->v4l2_buf.timestamp;
+                               meta->sequence = src_buf->sequence;
+                               meta->timecode = src_buf->timecode;
+                               meta->timestamp = src_buf->timestamp;
                                meta->start = start;
                                meta->end = ctx->bitstream_fifo.kfifo.in &
                                            ctx->bitstream_fifo.kfifo.mask;
        return ret;
 }
 
-static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
+static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                              int header_code, u8 *header, int *size)
 {
+       struct vb2_buffer *vb = &buf->vb2_buf;
        struct coda_dev *dev = ctx->dev;
        size_t bufsize;
        int ret;
        int i;
 
        if (dev->devtype->product == CODA_960)
-               memset(vb2_plane_vaddr(buf, 0), 0, 64);
+               memset(vb2_plane_vaddr(vb, 0), 0, 64);
 
-       coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0),
+       coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
                   CODA_CMD_ENC_HEADER_BB_START);
-       bufsize = vb2_plane_size(buf, 0);
+       bufsize = vb2_plane_size(vb, 0);
        if (dev->devtype->product == CODA_960)
                bufsize /= 1024;
        coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
 
        if (dev->devtype->product == CODA_960) {
                for (i = 63; i > 0; i--)
-                       if (((char *)vb2_plane_vaddr(buf, 0))[i] != 0)
+                       if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
                                break;
                *size = i + 1;
        } else {
                *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
                        coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
        }
-       memcpy(header, vb2_plane_vaddr(buf, 0), *size);
+       memcpy(header, vb2_plane_vaddr(vb, 0), *size);
 
        return 0;
 }
        struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
        struct coda_q_data *q_data_src, *q_data_dst;
        u32 bitstream_buf, bitstream_size;
-       struct vb2_buffer *buf;
+       struct vb2_v4l2_buffer *buf;
        int gamma, ret, value;
        u32 dst_fourcc;
        int num_fb;
        dst_fourcc = q_data_dst->fourcc;
 
        buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-       bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
+       bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
        bitstream_size = q_data_dst->sizeimage;
 
        if (!coda_is_initialized(dev)) {
 static int coda_prepare_encode(struct coda_ctx *ctx)
 {
        struct coda_q_data *q_data_src, *q_data_dst;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        struct coda_dev *dev = ctx->dev;
        int force_ipicture;
        int quant_param = 0;
        q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
        dst_fourcc = q_data_dst->fourcc;
 
-       src_buf->v4l2_buf.sequence = ctx->osequence;
-       dst_buf->v4l2_buf.sequence = ctx->osequence;
+       src_buf->sequence = ctx->osequence;
+       dst_buf->sequence = ctx->osequence;
        ctx->osequence++;
 
        /*
         * frame as IDR. This is a problem for some decoders that can't
         * recover when a frame is lost.
         */
-       if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
-               src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
-               src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+       if (src_buf->sequence % ctx->params.gop_size) {
+               src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+               src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
        } else {
-               src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
-               src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+               src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+               src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
        }
 
        if (dev->devtype->product == CODA_960)
         * Copy headers at the beginning of the first frame for H.264 only.
         * In MPEG4 they are already copied by the coda.
         */
-       if (src_buf->v4l2_buf.sequence == 0) {
+       if (src_buf->sequence == 0) {
                pic_stream_buffer_addr =
-                       vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+                       vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
                        ctx->vpu_header_size[0] +
                        ctx->vpu_header_size[1] +
                        ctx->vpu_header_size[2];
                        ctx->vpu_header_size[0] -
                        ctx->vpu_header_size[1] -
                        ctx->vpu_header_size[2];
-               memcpy(vb2_plane_vaddr(dst_buf, 0),
+               memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
                       &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
-               memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
-                      &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
-               memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
-                       ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
-                       ctx->vpu_header_size[2]);
+               memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+                       + ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
+                       ctx->vpu_header_size[1]);
+               memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+                       + ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
+                       &ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
        } else {
                pic_stream_buffer_addr =
-                       vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+                       vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
                pic_stream_buffer_size = q_data_dst->sizeimage;
        }
 
-       if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+       if (src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
                force_ipicture = 1;
                switch (dst_fourcc) {
                case V4L2_PIX_FMT_H264:
 
 static void coda_finish_encode(struct coda_ctx *ctx)
 {
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        struct coda_dev *dev = ctx->dev;
        u32 wr_ptr, start_ptr;
 
        wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
 
        /* Calculate bytesused field */
-       if (dst_buf->v4l2_buf.sequence == 0) {
-               vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+       if (dst_buf->sequence == 0) {
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
                                        ctx->vpu_header_size[0] +
                                        ctx->vpu_header_size[1] +
                                        ctx->vpu_header_size[2]);
        } else {
-               vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
        }
 
        v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
        coda_read(dev, CODA_RET_ENC_PIC_FLAG);
 
        if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
-               dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
-               dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+               dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+               dst_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
        } else {
-               dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
-               dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+               dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+               dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
        }
 
-       dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
-       dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_buf->v4l2_buf.flags |=
-               src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+       dst_buf->timestamp = src_buf->timestamp;
+       dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_buf->flags |=
+               src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_buf->timecode = src_buf->timecode;
 
        v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 
 
        v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
                "job finished: encoding frame (%d) (%s)\n",
-               dst_buf->v4l2_buf.sequence,
-               (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+               dst_buf->sequence,
+               (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
                "KEYFRAME" : "PFRAME");
 }
 
 
 static int coda_prepare_decode(struct coda_ctx *ctx)
 {
-       struct vb2_buffer *dst_buf;
+       struct vb2_v4l2_buffer *dst_buf;
        struct coda_dev *dev = ctx->dev;
        struct coda_q_data *q_data_dst;
        struct coda_buffer_meta *meta;
                 * well as the rotator buffer output.
                 * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
                 */
-               coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
+               coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
                                CODA9_CMD_DEC_PIC_ROT_INDEX);
 
                reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
        struct coda_dev *dev = ctx->dev;
        struct coda_q_data *q_data_src;
        struct coda_q_data *q_data_dst;
-       struct vb2_buffer *dst_buf;
+       struct vb2_v4l2_buffer *dst_buf;
        struct coda_buffer_meta *meta;
        unsigned long payload;
        unsigned long flags;
        if (ctx->display_idx >= 0 &&
            ctx->display_idx < ctx->num_internal_frames) {
                dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-               dst_buf->v4l2_buf.sequence = ctx->osequence++;
+               dst_buf->sequence = ctx->osequence++;
 
-               dst_buf->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
+               dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
                                             V4L2_BUF_FLAG_PFRAME |
                                             V4L2_BUF_FLAG_BFRAME);
-               dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
+               dst_buf->flags |= ctx->frame_types[ctx->display_idx];
                meta = &ctx->frame_metas[ctx->display_idx];
-               dst_buf->v4l2_buf.timecode = meta->timecode;
-               dst_buf->v4l2_buf.timestamp = meta->timestamp;
+               dst_buf->timecode = meta->timecode;
+               dst_buf->timestamp = meta->timestamp;
 
                trace_coda_dec_rot_done(ctx, dst_buf, meta);
 
                        payload = width * height * 2;
                        break;
                }
-               vb2_set_plane_payload(dst_buf, 0, payload);
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
 
                coda_m2m_buf_done(ctx, dst_buf, ctx->frame_errors[display_idx] ?
                                  VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
 
                v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
                        "job finished: decoding frame (%d) (%s)\n",
-                       dst_buf->v4l2_buf.sequence,
-                       (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+                       dst_buf->sequence,
+                       (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
                        "KEYFRAME" : "PFRAME");
        } else {
                v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
 
 }
 
 void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
-                    struct vb2_buffer *buf, unsigned int reg_y)
+                    struct vb2_v4l2_buffer *buf, unsigned int reg_y)
 {
-       u32 base_y = vb2_dma_contig_plane_dma_addr(buf, 0);
+       u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
        u32 base_cb, base_cr;
 
        switch (q_data->fourcc) {
 }
 
 static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
-                                     struct vb2_buffer *buf)
+                                     struct vb2_v4l2_buffer *buf)
 {
        struct vb2_queue *src_vq;
 
        src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 
        return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
-               (buf->v4l2_buf.sequence == (ctx->qsequence - 1)));
+               (buf->sequence == (ctx->qsequence - 1)));
 }
 
-void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                       enum vb2_buffer_state state)
 {
        const struct v4l2_event eos_event = {
        };
 
        if (coda_buf_is_end_of_stream(ctx, buf)) {
-               buf->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST;
+               buf->flags |= V4L2_BUF_FLAG_LAST;
 
                v4l2_event_queue_fh(&ctx->fh, &eos_event);
        }
 
 static void coda_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct vb2_queue *vq = vb->vb2_queue;
        struct coda_q_data *q_data;
                if (vb2_get_plane_payload(vb, 0) == 0)
                        coda_bit_stream_end_flag(ctx);
                mutex_lock(&ctx->bitstream_mutex);
-               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
                if (vb2_is_streaming(vb->vb2_queue))
                        coda_fill_bitstream(ctx, true);
                mutex_unlock(&ctx->bitstream_mutex);
        } else {
-               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
        }
 }
 
        struct coda_ctx *ctx = vb2_get_drv_priv(q);
        struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
        struct coda_q_data *q_data_src, *q_data_dst;
-       struct vb2_buffer *buf;
+       struct vb2_v4l2_buffer *buf;
        int ret = 0;
 
        q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 {
        struct coda_ctx *ctx = vb2_get_drv_priv(q);
        struct coda_dev *dev = ctx->dev;
-       struct vb2_buffer *buf;
+       struct vb2_v4l2_buffer *buf;
        unsigned long flags;
        bool stop;
 
 
        return 0;
 }
 
-bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb)
 {
-       void *vaddr = vb2_plane_vaddr(vb, 0);
+       void *vaddr = vb2_plane_vaddr(&vb->vb2_buf, 0);
        u16 soi = be16_to_cpup((__be16 *)vaddr);
        u16 eoi = be16_to_cpup((__be16 *)(vaddr +
-                                         vb2_get_plane_payload(vb, 0) - 2));
+                         vb2_get_plane_payload(&vb->vb2_buf, 0) - 2));
 
        return soi == SOI_MARKER && eoi == EOI_MARKER;
 }
 
 void coda_write(struct coda_dev *dev, u32 data, u32 reg);
 unsigned int coda_read(struct coda_dev *dev, u32 reg);
 void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
-                    struct vb2_buffer *buf, unsigned int reg_y);
+                    struct vb2_v4l2_buffer *buf, unsigned int reg_y);
 
 int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
                       size_t size, const char *name, struct dentry *parent);
 
 void coda_bit_stream_end_flag(struct coda_ctx *ctx);
 
-void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_buffer *buf,
+void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                       enum vb2_buffer_state state);
 
 int coda_h264_padding(int size, char *p);
 
-bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_v4l2_buffer *vb);
 int coda_jpeg_write_tables(struct coda_ctx *ctx);
 void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
 
 
 );
 
 DECLARE_EVENT_CLASS(coda_buf_class,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
 
        TP_ARGS(ctx, buf),
 
 
        TP_fast_assign(
                __entry->minor = ctx->fh.vdev->minor;
-               __entry->index = buf->v4l2_buf.index;
+               __entry->index = buf->vb2_buf.index;
                __entry->ctx = ctx->idx;
        ),
 
 );
 
 DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
        TP_ARGS(ctx, buf)
 );
 
 DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
        TP_ARGS(ctx, buf)
 );
 
 DECLARE_EVENT_CLASS(coda_buf_meta_class,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                 struct coda_buffer_meta *meta),
 
        TP_ARGS(ctx, buf, meta),
 
        TP_fast_assign(
                __entry->minor = ctx->fh.vdev->minor;
-               __entry->index = buf->v4l2_buf.index;
+               __entry->index = buf->vb2_buf.index;
                __entry->start = meta->start;
                __entry->end = meta->end;
                __entry->ctx = ctx->idx;
 );
 
 DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                 struct coda_buffer_meta *meta),
        TP_ARGS(ctx, buf, meta)
 );
 );
 
 DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
-       TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+       TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
                 struct coda_buffer_meta *meta),
        TP_ARGS(ctx, buf, meta)
 );
 
        if (layer->cur_frm == layer->next_frm)
                return;
 
-       v4l2_get_timestamp(&layer->cur_frm->vb.v4l2_buf.timestamp);
-       vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&layer->cur_frm->vb.timestamp);
+       vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
        /* Make cur_frm pointing to next_frm */
        layer->cur_frm = layer->next_frm;
 }
        list_del(&layer->next_frm->list);
        spin_unlock(&disp_obj->dma_queue_lock);
        /* Mark state of the frame to active */
-       layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
-       addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
+       layer->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+       addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb.vb2_buf, 0);
        osd_device->ops.start_layer(osd_device,
                        layer->layer_info.id,
                        addr,
  */
 static void vpbe_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        /* Get the file handle object and layer object */
-       struct vpbe_disp_buffer *buf = container_of(vb,
+       struct vpbe_disp_buffer *buf = container_of(vbuf,
                                struct vpbe_disp_buffer, vb);
        struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
        struct vpbe_display *disp = layer->disp_dev;
        /* Remove buffer from the buffer queue */
        list_del(&layer->cur_frm->list);
        /* Mark state of the current frame to active */
-       layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+       layer->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
        /* Initialize field_id and started member */
        layer->field_id = 0;
 
        if (ret < 0) {
                struct vpbe_disp_buffer *buf, *tmp;
 
-               vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_QUEUED);
                list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
 
                return ret;
        /* release all active buffers */
        spin_lock_irqsave(&disp->dma_queue_lock, flags);
        if (layer->cur_frm == layer->next_frm) {
-               vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        } else {
                if (layer->cur_frm != NULL)
-                       vb2_buffer_done(&layer->cur_frm->vb,
+                       vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                if (layer->next_frm != NULL)
-                       vb2_buffer_done(&layer->next_frm->vb,
+                       vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
        }
 
                layer->next_frm = list_entry(layer->dma_queue.next,
                                                struct vpbe_disp_buffer, list);
                list_del(&layer->next_frm->list);
-               vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
 }
        unsigned long addr;
        int ret;
 
-       addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb.vb2_buf, 0);
        /* Set address in the display registers */
        osd_device->ops.start_layer(osd_device,
                                    layer->layer_info.id,
 
 /* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
 static int ycmux_mode;
 
-static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+static inline
+struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct vpif_cap_buffer, vb);
 }
  */
 static int vpif_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *q = vb->vb2_queue;
        struct channel_obj *ch = vb2_get_drv_priv(q);
        struct common_obj *common;
        if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
                return -EINVAL;
 
-       vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+       vbuf->field = common->fmt.fmt.pix.field;
 
        addr = vb2_dma_contig_plane_dma_addr(vb, 0);
        if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
  */
 static void vpif_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
-       struct vpif_cap_buffer *buf = to_vpif_buffer(vb);
+       struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf);
        struct common_obj *common;
        unsigned long flags;
 
        list_del(&common->cur_frm->list);
        spin_unlock_irqrestore(&common->irqlock, flags);
 
-       addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
 
        common->set_addr(addr + common->ytop_off,
                         addr + common->ybtm_off,
 err:
        list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
        spin_unlock_irqrestore(&common->irqlock, flags);
 
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
        if (common->cur_frm == common->next_frm) {
-               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        } else {
                if (common->cur_frm != NULL)
-                       vb2_buffer_done(&common->cur_frm->vb,
+                       vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                if (common->next_frm != NULL)
-                       vb2_buffer_done(&common->next_frm->vb,
+                       vb2_buffer_done(&common->next_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
        }
 
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_cap_buffer, list);
                list_del(&common->next_frm->list);
-               vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&common->irqlock, flags);
 }
  */
 static void vpif_process_buffer_complete(struct common_obj *common)
 {
-       v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
-       vb2_buffer_done(&common->cur_frm->vb,
-                                           VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
+       vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
        /* Make curFrm pointing to nextFrm */
        common->cur_frm = common->next_frm;
 }
        /* Remove that buffer from the buffer queue */
        list_del(&common->next_frm->list);
        spin_unlock(&common->irqlock);
-       addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
 
        /* Set top and bottom field addresses in VPIF registers */
        common->set_addr(addr + common->ytop_off,
 
 };
 
 struct vpif_cap_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 static void vpif_calculate_offsets(struct channel_obj *ch);
 static void vpif_config_addr(struct channel_obj *ch, int muxmode);
 
-static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_buffer *vb)
+static inline
+struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct vpif_disp_buffer, vb);
 }
  */
 static int vpif_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
        struct common_obj *common;
 
        if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
                return -EINVAL;
 
-       vb->v4l2_buf.field = common->fmt.fmt.pix.field;
+       vbuf->field = common->fmt.fmt.pix.field;
 
        if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
                unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0);
  */
 static void vpif_buffer_queue(struct vb2_buffer *vb)
 {
-       struct vpif_disp_buffer *buf = to_vpif_buffer(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf);
        struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
        struct common_obj *common;
        unsigned long flags;
        list_del(&common->cur_frm->list);
        spin_unlock_irqrestore(&common->irqlock, flags);
 
-       addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
        common->set_addr((addr + common->ytop_off),
                            (addr + common->ybtm_off),
                            (addr + common->ctop_off),
 err:
        list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
        }
        spin_unlock_irqrestore(&common->irqlock, flags);
 
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
        if (common->cur_frm == common->next_frm) {
-               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        } else {
                if (common->cur_frm != NULL)
-                       vb2_buffer_done(&common->cur_frm->vb,
+                       vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                if (common->next_frm != NULL)
-                       vb2_buffer_done(&common->next_frm->vb,
+                       vb2_buffer_done(&common->next_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
        }
 
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_disp_buffer, list);
                list_del(&common->next_frm->list);
-               vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&common->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&common->irqlock, flags);
 }
        spin_unlock(&common->irqlock);
 
        /* Set top and bottom field addrs in VPIF registers */
-       addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
        common->set_addr(addr + common->ytop_off,
                                 addr + common->ybtm_off,
                                 addr + common->ctop_off,
                /* one frame is displayed If next frame is
                 *  available, release cur_frm and move on */
                /* Copy frame display time */
-               v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp);
+               v4l2_get_timestamp(&common->cur_frm->vb.timestamp);
                /* Change status of the cur_frm */
-               vb2_buffer_done(&common->cur_frm->vb,
-                                           VB2_BUF_STATE_DONE);
+               vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+                                       VB2_BUF_STATE_DONE);
                /* Make cur_frm pointing to next_frm */
                common->cur_frm = common->next_frm;
 
                        if (!channel_first_int[i][channel_id]) {
                                /* Mark status of the cur_frm to
                                 * done and unlock semaphore on it */
-                               v4l2_get_timestamp(&common->cur_frm->vb.
-                                                  v4l2_buf.timestamp);
-                               vb2_buffer_done(&common->cur_frm->vb,
-                                           VB2_BUF_STATE_DONE);
+                               v4l2_get_timestamp(
+                                       &common->cur_frm->vb.timestamp);
+                               vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
+                                               VB2_BUF_STATE_DONE);
                                /* Make cur_frm pointing to next_frm */
                                common->cur_frm = common->next_frm;
                        }
 
 };
 
 struct vpif_disp_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
  * @idx : index of G-Scaler input buffer
  */
 struct gsc_input_buf {
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head        list;
        int                     idx;
 };
 
 
 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
 {
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
 
        if (!ctx || !ctx->m2m_ctx)
                return;
        dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
 
        if (src_vb && dst_vb) {
-               dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
-               dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
-               dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-               dst_vb->v4l2_buf.flags |=
-                       src_vb->v4l2_buf.flags
+               dst_vb->timestamp = src_vb->timestamp;
+               dst_vb->timecode = src_vb->timecode;
+               dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+               dst_vb->flags |=
+                       src_vb->flags
                        & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 
                v4l2_m2m_buf_done(src_vb, vb_state);
 static int gsc_get_bufs(struct gsc_ctx *ctx)
 {
        struct gsc_frame *s_frame, *d_frame;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        int ret;
 
        s_frame = &ctx->s_frame;
        d_frame = &ctx->d_frame;
 
        src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
-       ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+       ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
        if (ret)
                return ret;
 
        dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
-       ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+       ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
        if (ret)
                return ret;
 
-       dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+       dst_vb->timestamp = src_vb->timestamp;
 
        return 0;
 }
 
 static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
        pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
 
        if (ctx->m2m_ctx)
-               v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+               v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
 static struct vb2_ops gsc_m2m_qops = {
 
        /* Release unused buffers */
        while (!suspend && !list_empty(&cap->pending_buf_q)) {
                buf = fimc_pending_queue_pop(cap);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        /* If suspending put unused buffers onto pending queue */
        while (!list_empty(&cap->active_buf_q)) {
                if (suspend)
                        fimc_pending_queue_add(cap, buf);
                else
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        fimc_hw_reset(fimc);
            test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
                v_buf = fimc_active_queue_pop(cap);
 
-               v4l2_get_timestamp(&v_buf->vb.v4l2_buf.timestamp);
-               v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+               v4l2_get_timestamp(&v_buf->vb.timestamp);
+               v_buf->vb.sequence = cap->frame_count++;
 
-               vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+               vb2_buffer_done(&v_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 
        if (!list_empty(&cap->pending_buf_q)) {
                list_for_each_entry(v_buf, &cap->active_buf_q, list) {
                        if (v_buf->index != index)
                                continue;
-                       vaddr = vb2_plane_vaddr(&v_buf->vb, plane);
+                       vaddr = vb2_plane_vaddr(&v_buf->vb.vb2_buf, plane);
                        v4l2_subdev_call(csis, video, s_rx_buffer,
                                         vaddr, &size);
                        break;
                if (list_empty(&vid_cap->pending_buf_q))
                        break;
                buf = fimc_pending_queue_pop(vid_cap);
-               buffer_queue(&buf->vb);
+               buffer_queue(&buf->vb.vb2_buf);
        }
        return 0;
 
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct fimc_vid_buffer *buf
-               = container_of(vb, struct fimc_vid_buffer, vb);
+               = container_of(vbuf, struct fimc_vid_buffer, vb);
        struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct fimc_dev *fimc = ctx->fimc_dev;
        struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
        int min_bufs;
 
        spin_lock_irqsave(&fimc->slock, flags);
-       fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);
+       fimc_prepare_addr(ctx, &buf->vb.vb2_buf, &ctx->d_frame, &buf->paddr);
 
        if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
            !test_bit(ST_CAPT_STREAM, &fimc->state) &&
                if (!list_empty(&fimc->vid_cap.active_buf_q)) {
                        buf = list_entry(fimc->vid_cap.active_buf_q.next,
                                         struct fimc_vid_buffer, list);
-                       vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
+                       vb2_set_plane_payload(&buf->vb.vb2_buf, 0,
+                                             *((u32 *)arg));
                }
                fimc_capture_irq_handler(fimc, 1);
                fimc_deactivate_capture(fimc);
 
  * @index: buffer index for the output DMA engine
  */
 struct fimc_vid_buffer {
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head        list;
        struct fimc_addr        paddr;
        int                     index;
 
 
 static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
        struct fimc_is_video *video = &isp->video_capture;
        struct fimc_is *is = fimc_isp_to_is(isp);
-       struct isp_video_buf *ivb = to_isp_video_buf(vb);
+       struct isp_video_buf *ivb = to_isp_video_buf(vbuf);
        unsigned long flags;
        unsigned int i;
 
 
                        isp_dbg(2, &video->ve.vdev,
                                "dma_buf %pad (%d/%d/%d) addr: %pad\n",
-                               &buf_index, ivb->index, i, vb->v4l2_buf.index,
+                               &buf_index, ivb->index, i, vb->index,
                                &ivb->dma_addr[i]);
                }
 
 void fimc_isp_video_irq_handler(struct fimc_is *is)
 {
        struct fimc_is_video *video = &is->isp.video_capture;
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        int buf_index;
 
        /* TODO: Ensure the DMA is really stopped in stop_streaming callback */
                return;
 
        buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
-       vb = &video->buffers[buf_index]->vb;
+       vbuf = &video->buffers[buf_index]->vb;
 
-       v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
-       vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&vbuf->timestamp);
+       vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
 
        video->buf_mask &= ~BIT(buf_index);
        fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
 
 };
 
 struct isp_video_buf {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        dma_addr_t dma_addr[FIMC_ISP_MAX_PLANES];
        unsigned int index;
 };
 
        /* Release unused buffers */
        while (!suspend && !list_empty(&fimc->pending_buf_q)) {
                buf = fimc_lite_pending_queue_pop(fimc);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        /* If suspending put unused buffers onto pending queue */
        while (!list_empty(&fimc->active_buf_q)) {
                if (suspend)
                        fimc_lite_pending_queue_add(fimc, buf);
                else
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&fimc->slock, flags);
            test_bit(ST_FLITE_RUN, &fimc->state) &&
            !list_empty(&fimc->active_buf_q)) {
                vbuf = fimc_lite_active_queue_pop(fimc);
-               v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp);
-               vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+               v4l2_get_timestamp(&vbuf->vb.timestamp);
+               vbuf->vb.sequence = fimc->frame_count++;
                flite_hw_mask_dma_buffer(fimc, vbuf->index);
-               vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+               vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 
        if (test_bit(ST_FLITE_CONFIG, &fimc->state))
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct flite_buffer *buf
-               = container_of(vb, struct flite_buffer, vb);
+               = container_of(vbuf, struct flite_buffer, vb);
        struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
        unsigned long flags;
 
                if (list_empty(&fimc->pending_buf_q))
                        break;
                buf = fimc_lite_pending_queue_pop(fimc);
-               buffer_queue(&buf->vb);
+               buffer_queue(&buf->vb.vb2_buf);
        }
        return 0;
 }
 
  * @index: DMA start address register's index
  */
 struct flite_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
        dma_addr_t paddr;
        unsigned short index;
 
 
 void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
 {
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
 
        if (!ctx || !ctx->fh.m2m_ctx)
                return;
 
 static void fimc_device_run(void *priv)
 {
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        struct fimc_ctx *ctx = priv;
        struct fimc_frame *sf, *df;
        struct fimc_dev *fimc;
        }
 
        src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
-       ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
+       ret = fimc_prepare_addr(ctx, &src_vb->vb2_buf, sf, &sf->paddr);
        if (ret)
                goto dma_unlock;
 
        dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-       ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
+       ret = fimc_prepare_addr(ctx, &dst_vb->vb2_buf, df, &df->paddr);
        if (ret)
                goto dma_unlock;
 
-       dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
-       dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_vb->v4l2_buf.flags |=
-               src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_vb->timestamp = src_vb->timestamp;
+       dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_vb->flags |=
+               src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 
        /* Reconfigure hardware if the context has changed. */
        if (fimc->m2m.ctx != ctx) {
 
 static void fimc_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static struct vb2_ops fimc_qops = {
 
 {
        struct deinterlace_ctx *curr_ctx = data;
        struct deinterlace_dev *pcdev = curr_ctx->dev;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
 
        atomic_set(&pcdev->busy, 0);
 
        src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
        dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
 
-       dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
-       dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_vb->v4l2_buf.flags |=
-               src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+       dst_vb->timestamp = src_vb->timestamp;
+       dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_vb->flags |=
+               src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_vb->timecode = src_vb->timecode;
 
        v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
        v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
                                  int do_callback)
 {
        struct deinterlace_q_data *s_q_data;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        struct deinterlace_dev *pcdev = ctx->dev;
        struct dma_chan *chan = pcdev->dma_chan;
        struct dma_device *dmadev = chan->device;
        s_height = s_q_data->height;
        s_size = s_width * s_height;
 
-       p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
-       p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+       p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+       p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf,
+                                                         0);
        if (!p_in || !p_out) {
                v4l2_err(&pcdev->v4l2_dev,
                         "Acquiring kernel pointers to buffers failed\n");
 
 static void deinterlace_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-       v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+
+       v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
 static struct vb2_ops deinterlace_qops = {
 
 
 /*
  * Our buffer type for working with videobuf2.  Note that the vb2
- * developers have decreed that struct vb2_buffer must be at the
+ * developers have decreed that struct vb2_v4l2_buffer must be at the
  * beginning of this structure.
  */
 struct mcam_vb_buffer {
-       struct vb2_buffer vb_buf;
+       struct vb2_v4l2_buffer vb_buf;
        struct list_head queue;
        struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
        dma_addr_t dma_desc_pa;         /* Descriptor physical address */
        int dma_desc_nent;              /* Number of mapped descriptors */
 };
 
-static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct mcam_vb_buffer, vb_buf);
 }
  * Hand a completed buffer back to user space.
  */
 static void mcam_buffer_done(struct mcam_camera *cam, int frame,
-               struct vb2_buffer *vbuf)
+               struct vb2_v4l2_buffer *vbuf)
 {
-       vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
-       vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
-       vbuf->v4l2_buf.field = V4L2_FIELD_NONE;
-       v4l2_get_timestamp(&vbuf->v4l2_buf.timestamp);
-       vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
-       vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+       vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage;
+       vbuf->sequence = cam->buf_seq[frame];
+       vbuf->field = V4L2_FIELD_NONE;
+       v4l2_get_timestamp(&vbuf->timestamp);
+       vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage);
+       vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 
                 * Drop the lock during the big copy.  This *should* be safe...
                 */
                spin_unlock_irqrestore(&cam->dev_lock, flags);
-               memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+               memcpy(vb2_plane_vaddr(&buf->vb_buf.vb2_buf, 0),
+                               cam->dma_bufs[bufno],
                                cam->pix_format.sizeimage);
                mcam_buffer_done(cam, bufno, &buf->vb_buf);
                spin_lock_irqsave(&cam->dev_lock, flags);
 {
        struct mcam_vb_buffer *buf;
        dma_addr_t dma_handle;
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vb;
 
        /*
         * If there are no available buffers, go into single mode
        cam->vb_bufs[frame] = buf;
        vb = &buf->vb_buf;
 
-       dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
+       dma_handle = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
        mcam_write_yuv_bases(cam, frame, dma_handle);
 }
 
 
 static void mcam_vb_buf_queue(struct vb2_buffer *vb)
 {
-       struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
        unsigned long flags;
        int start;
 
        spin_lock_irqsave(&cam->dev_lock, flags);
        list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
-               vb2_buffer_done(&buf->vb_buf, state);
+               vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
                list_del(&buf->queue);
        }
        for (i = 0; i < MAX_DMA_BUFS; i++) {
                buf = cam->vb_bufs[i];
 
                if (buf) {
-                       vb2_buffer_done(&buf->vb_buf, state);
+                       vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
                        cam->vb_bufs[i] = NULL;
                }
        }
  */
 static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
 {
-       struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
        int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
 
 
 static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
 {
-       struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
        struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
        struct mcam_dma_desc *desc = mvb->dma_desc;
        struct scatterlist *sg;
 
 static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
-       struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+       struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
        int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
 
        dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
 
 {
        struct emmaprp_dev *pcdev = data;
        struct emmaprp_ctx *curr_ctx;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        unsigned long flags;
        u32 irqst;
 
                        src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
                        dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
 
-                       dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
-                       dst_vb->v4l2_buf.flags &=
+                       dst_vb->timestamp = src_vb->timestamp;
+                       dst_vb->flags &=
                                ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-                       dst_vb->v4l2_buf.flags |=
-                               src_vb->v4l2_buf.flags
+                       dst_vb->flags |=
+                               src_vb->flags
                                & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-                       dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+                       dst_vb->timecode = src_vb->timecode;
 
                        spin_lock_irqsave(&pcdev->irqlock, flags);
                        v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
 
 static void emmaprp_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-       v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
 static struct vb2_ops emmaprp_qops = {
 
 
 static int isp_video_buffer_prepare(struct vb2_buffer *buf)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
        struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
-       struct isp_buffer *buffer = to_isp_buffer(buf);
+       struct isp_buffer *buffer = to_isp_buffer(vbuf);
        struct isp_video *video = vfh->video;
        dma_addr_t addr;
 
                return -EINVAL;
        }
 
-       vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage);
+       vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
+                             vfh->format.fmt.pix.sizeimage);
        buffer->dma = addr;
 
        return 0;
  */
 static void isp_video_buffer_queue(struct vb2_buffer *buf)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
        struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
-       struct isp_buffer *buffer = to_isp_buffer(buf);
+       struct isp_buffer *buffer = to_isp_buffer(vbuf);
        struct isp_video *video = vfh->video;
        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
        enum isp_pipeline_state state;
        spin_lock_irqsave(&video->irqlock, flags);
 
        if (unlikely(video->error)) {
-               vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                spin_unlock_irqrestore(&video->irqlock, flags);
                return;
        }
        list_del(&buf->irqlist);
        spin_unlock_irqrestore(&video->irqlock, flags);
 
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+       v4l2_get_timestamp(&buf->vb.timestamp);
 
        /* Do frame number propagation only if this is the output video node.
         * Frame number either comes from the CSI receivers or it gets
         * first, so the input number might lag behind by 1 in some cases.
         */
        if (video == pipe->output && !pipe->do_propagation)
-               buf->vb.v4l2_buf.sequence =
+               buf->vb.sequence =
                        atomic_inc_return(&pipe->frame_number);
        else
-               buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+               buf->vb.sequence = atomic_read(&pipe->frame_number);
 
        if (pipe->field != V4L2_FIELD_NONE)
-               buf->vb.v4l2_buf.sequence /= 2;
+               buf->vb.sequence /= 2;
 
-       buf->vb.v4l2_buf.field = pipe->field;
+       buf->vb.field = pipe->field;
 
        /* Report pipeline errors to userspace on the capture device side. */
        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
                state = VB2_BUF_STATE_DONE;
        }
 
-       vb2_buffer_done(&buf->vb, state);
+       vb2_buffer_done(&buf->vb.vb2_buf, state);
 
        spin_lock_irqsave(&video->irqlock, flags);
 
                buf = list_first_entry(&video->dmaqueue,
                                       struct isp_buffer, irqlist);
                list_del(&buf->irqlist);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        video->error = true;
 
  * @dma: DMA address
  */
 struct isp_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head irqlist;
        dma_addr_t dma;
 };
 
        "Unknown"
 };
 
-static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_buffer *vb)
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
 {
        struct v4l2_m2m_buffer *b =
                container_of(vb, struct v4l2_m2m_buffer, vb);
 
 static int jpu_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct jpu_q_data *q_data;
        unsigned int i;
        q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
 
        if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
-               if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
-                       vb->v4l2_buf.field = V4L2_FIELD_NONE;
-               if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+               if (vbuf->field == V4L2_FIELD_ANY)
+                       vbuf->field = V4L2_FIELD_NONE;
+               if (vbuf->field != V4L2_FIELD_NONE) {
                        dev_err(ctx->jpu->dev, "%s field isn't supported\n",
                                        __func__);
                        return -EINVAL;
 
 static void jpu_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
        if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
-               struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+               struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
                struct jpu_q_data *q_data, adjust;
                void *buffer = vb2_plane_vaddr(vb, 0);
                unsigned long buf_size = vb2_get_plane_payload(vb, 0);
        }
 
        if (ctx->fh.m2m_ctx)
-               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 
        return;
 
 
 static void jpu_buf_finish(struct vb2_buffer *vb)
 {
-       struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
        struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct jpu_q_data *q_data = &ctx->out_q;
        enum v4l2_buf_type type = vb->vb2_queue->type;
        u8 *buffer;
 
        if (vb->state == VB2_BUF_STATE_DONE)
-               vb->v4l2_buf.sequence = jpu_get_q_data(ctx, type)->sequence++;
+               vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
 
        if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
            V4L2_TYPE_IS_OUTPUT(type))
 static void jpu_stop_streaming(struct vb2_queue *vq)
 {
        struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vb;
        unsigned long flags;
 
        for (;;) {
 static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
 {
        /* remove current buffers and finish job */
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        unsigned long flags;
 
        spin_lock_irqsave(&ctx->jpu->lock, flags);
        struct jpu *jpu = ctx->jpu;
        struct jpu_buffer *jpu_buf;
        struct jpu_q_data *q_data;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        unsigned int w, h, bpl;
        unsigned char num_planes, subsampling;
        unsigned long flags;
                unsigned long src_1_addr, src_2_addr, dst_addr;
                unsigned int redu, inft;
 
-               dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
-               src_1_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+               dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+               src_1_addr =
+                       vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
                if (num_planes > 1)
-                       src_2_addr = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+                       src_2_addr = vb2_dma_contig_plane_dma_addr(
+                                       &src_buf->vb2_buf, 1);
                else
                        src_2_addr = src_1_addr + w * h;
 
                        return;
                }
 
-               src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
-               dst_1_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+               src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+               dst_1_addr =
+                       vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
                if (q_data->fmtinfo->num_planes > 1)
-                       dst_2_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+                       dst_2_addr = vb2_dma_contig_plane_dma_addr(
+                                       &dst_buf->vb2_buf, 1);
                else
                        dst_2_addr = dst_1_addr + w * h;
 
 {
        struct jpu *jpu = dev_id;
        struct jpu_ctx *curr_ctx;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        unsigned int int_status;
 
        int_status = jpu_read(jpu, JINTS);
                        unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
                                                   | jpu_read(jpu, JCDTCM) << 8
                                                   | jpu_read(jpu, JCDTCD);
-                       vb2_set_plane_payload(dst_buf, 0,
+                       vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
                                payload_size + JPU_JPEG_HDR_SIZE);
                }
 
-               dst_buf->v4l2_buf.field = src_buf->v4l2_buf.field;
-               dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
-               if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
-                       dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
-               dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-               dst_buf->v4l2_buf.flags |= src_buf->v4l2_buf.flags &
+               dst_buf->field = src_buf->field;
+               dst_buf->timestamp = src_buf->timestamp;
+               if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
+                       dst_buf->timecode = src_buf->timecode;
+               dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+               dst_buf->flags |= src_buf->flags &
                                        V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-               dst_buf->v4l2_buf.flags = src_buf->v4l2_buf.flags &
+               dst_buf->flags = src_buf->flags &
                        (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
                         V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
                         V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
 
        /* Release unused buffers */
        while (!list_empty(&vp->pending_buf_q)) {
                buf = camif_pending_queue_pop(vp);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        while (!list_empty(&vp->active_buf_q)) {
                buf = camif_active_queue_pop(vp);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&camif->slock, flags);
 
                if (!WARN_ON(vbuf == NULL)) {
                        /* Dequeue a filled buffer */
-                       v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp);
-                       vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++;
-                       vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+                       v4l2_get_timestamp(&vbuf->vb.timestamp);
+                       vbuf->vb.sequence = vp->frame_sequence++;
+                       vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 
                        /* Set up an empty buffer at the DMA engine */
                        vbuf = camif_pending_queue_pop(vp);
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
-       struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct camif_buffer *buf = container_of(vbuf, struct camif_buffer, vb);
        struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
        struct camif_dev *camif = vp->camif;
        unsigned long flags;
 
        spin_lock_irqsave(&camif->slock, flags);
-       WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr));
+       WARN_ON(camif_prepare_addr(vp, &buf->vb.vb2_buf, &buf->paddr));
 
        if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
                /* Schedule an empty buffer in H/W */
 
  * @index: an identifier of this buffer at the DMA engine
  */
 struct camif_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
        struct camif_addr paddr;
        unsigned int index;
 
 
 static void g2d_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static struct vb2_ops g2d_qops = {
 {
        struct g2d_dev *dev = prv;
        struct g2d_ctx *ctx = dev->curr;
-       struct vb2_buffer *src, *dst;
+       struct vb2_v4l2_buffer *src, *dst;
 
        g2d_clear_int(dev);
        clk_disable(dev->gate);
        BUG_ON(src == NULL);
        BUG_ON(dst == NULL);
 
-       dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
-       dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
-       dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst->v4l2_buf.flags |=
-               src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst->timecode = src->timecode;
+       dst->timestamp = src->timestamp;
+       dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst->flags |=
+               src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 
        v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
        v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
 
 
 static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
        if (ctx->mode == S5P_JPEG_DECODE &&
                q_data->h = tmp.h;
        }
 
-       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
 {
        struct s5p_jpeg *jpeg = dev_id;
        struct s5p_jpeg_ctx *curr_ctx;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        unsigned long payload_size = 0;
        enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
        bool enc_jpeg_too_large = false;
                payload_size = s5p_jpeg_compressed_size(jpeg->regs);
        }
 
-       dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
-       dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
-       dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst_buf->v4l2_buf.flags |=
-               src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_buf->timecode = src_buf->timecode;
+       dst_buf->timestamp = src_buf->timestamp;
+       dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst_buf->flags |=
+               src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 
        v4l2_m2m_buf_done(src_buf, state);
        if (curr_ctx->mode == S5P_JPEG_ENCODE)
-               vb2_set_plane_payload(dst_buf, 0, payload_size);
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
        v4l2_m2m_buf_done(dst_buf, state);
        v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
 
 static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
 {
        unsigned int int_status;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        struct s5p_jpeg *jpeg = priv;
        struct s5p_jpeg_ctx *curr_ctx;
        unsigned long payload_size = 0;
        if (jpeg->irq_ret == OK_ENC_OR_DEC) {
                if (curr_ctx->mode == S5P_JPEG_ENCODE) {
                        payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
-                       vb2_set_plane_payload(dst_vb, 0, payload_size);
+                       vb2_set_plane_payload(&dst_vb->vb2_buf,
+                                       0, payload_size);
                }
                v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
                v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
 {
        struct s5p_jpeg *jpeg = dev_id;
        struct s5p_jpeg_ctx *curr_ctx;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
        unsigned long payload_size = 0;
        enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
        bool interrupt_timeout = false;
        src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
        dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
 
-       dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
-       dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+       dst_buf->timecode = src_buf->timecode;
+       dst_buf->timestamp = src_buf->timestamp;
 
        v4l2_m2m_buf_done(src_buf, state);
        if (curr_ctx->mode == S5P_JPEG_ENCODE)
-               vb2_set_plane_payload(dst_buf, 0, payload_size);
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
        v4l2_m2m_buf_done(dst_buf, state);
        v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
 
 
                dst_buf = list_entry(ctx->dst_queue.next,
                                     struct s5p_mfc_buf, list);
                mfc_debug(2, "Cleaning up buffer: %d\n",
-                                         dst_buf->b->v4l2_buf.index);
-               vb2_set_plane_payload(dst_buf->b, 0, 0);
-               vb2_set_plane_payload(dst_buf->b, 1, 0);
+                                         dst_buf->b->vb2_buf.index);
+               vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, 0);
+               vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, 0);
                list_del(&dst_buf->list);
                ctx->dst_queue_cnt--;
-               dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
+               dst_buf->b->sequence = (ctx->sequence++);
 
                if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
                        s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
-                       dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+                       dst_buf->b->field = V4L2_FIELD_NONE;
                else
-                       dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
-               dst_buf->b->v4l2_buf.flags |= V4L2_BUF_FLAG_LAST;
+                       dst_buf->b->field = V4L2_FIELD_INTERLACED;
+               dst_buf->b->flags |= V4L2_BUF_FLAG_LAST;
 
-               ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
-               vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
+               ctx->dec_dst_flag &= ~(1 << dst_buf->b->vb2_buf.index);
+               vb2_buffer_done(&dst_buf->b->vb2_buf, VB2_BUF_STATE_DONE);
        }
 }
 
           appropriate flags. */
        src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
        list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
-               if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
-                       dst_buf->b->v4l2_buf.timecode =
-                                               src_buf->b->v4l2_buf.timecode;
-                       dst_buf->b->v4l2_buf.timestamp =
-                                               src_buf->b->v4l2_buf.timestamp;
-                       dst_buf->b->v4l2_buf.flags &=
+               if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
+                               == dec_y_addr) {
+                       dst_buf->b->timecode =
+                                               src_buf->b->timecode;
+                       dst_buf->b->timestamp =
+                                               src_buf->b->timestamp;
+                       dst_buf->b->flags &=
                                ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-                       dst_buf->b->v4l2_buf.flags |=
-                               src_buf->b->v4l2_buf.flags
+                       dst_buf->b->flags |=
+                               src_buf->b->flags
                                & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
                        switch (frame_type) {
                        case S5P_FIMV_DECODE_FRAME_I_FRAME:
-                               dst_buf->b->v4l2_buf.flags |=
+                               dst_buf->b->flags |=
                                                V4L2_BUF_FLAG_KEYFRAME;
                                break;
                        case S5P_FIMV_DECODE_FRAME_P_FRAME:
-                               dst_buf->b->v4l2_buf.flags |=
+                               dst_buf->b->flags |=
                                                V4L2_BUF_FLAG_PFRAME;
                                break;
                        case S5P_FIMV_DECODE_FRAME_B_FRAME:
-                               dst_buf->b->v4l2_buf.flags |=
+                               dst_buf->b->flags |=
                                                V4L2_BUF_FLAG_BFRAME;
                                break;
                        default:
         * check which videobuf does it correspond to */
        list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
                /* Check if this is the buffer we're looking for */
-               if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
+               if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
+                               == dspl_y_addr) {
                        list_del(&dst_buf->list);
                        ctx->dst_queue_cnt--;
-                       dst_buf->b->v4l2_buf.sequence = ctx->sequence;
+                       dst_buf->b->sequence = ctx->sequence;
                        if (s5p_mfc_hw_call(dev->mfc_ops,
                                        get_pic_type_top, ctx) ==
                                s5p_mfc_hw_call(dev->mfc_ops,
                                        get_pic_type_bot, ctx))
-                               dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+                               dst_buf->b->field = V4L2_FIELD_NONE;
                        else
-                               dst_buf->b->v4l2_buf.field =
+                               dst_buf->b->field =
                                                        V4L2_FIELD_INTERLACED;
-                       vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
-                       vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
-                       clear_bit(dst_buf->b->v4l2_buf.index,
+                       vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0,
+                                               ctx->luma_size);
+                       vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1,
+                                               ctx->chroma_size);
+                       clear_bit(dst_buf->b->vb2_buf.index,
                                                        &ctx->dec_dst_flag);
 
-                       vb2_buffer_done(dst_buf->b,
-                               err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+                       vb2_buffer_done(&dst_buf->b->vb2_buf, err ?
+                               VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
 
                        break;
                }
                if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
                        ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
                        ctx->consumed_stream + STUFF_BYTE <
-                       src_buf->b->v4l2_planes[0].bytesused) {
+                       src_buf->b->vb2_buf.planes[0].bytesused) {
                        /* Run MFC again on the same buffer */
                        mfc_debug(2, "Running again the same buffer\n");
                        ctx->after_packed_pb = 1;
                        list_del(&src_buf->list);
                        ctx->src_queue_cnt--;
                        if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
-                               vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
+                               vb2_buffer_done(&src_buf->b->vb2_buf,
+                                               VB2_BUF_STATE_ERROR);
                        else
-                               vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
+                               vb2_buffer_done(&src_buf->b->vb2_buf,
+                                               VB2_BUF_STATE_DONE);
                }
        }
 leave_handle_frame:
                                        struct s5p_mfc_buf, list);
                        if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
                                                dev) <
-                                       src_buf->b->v4l2_planes[0].bytesused)
+                                       src_buf->b->vb2_buf.planes[0].bytesused)
                                ctx->head_processed = 0;
                        else
                                ctx->head_processed = 1;
                                             struct s5p_mfc_buf, list);
                                list_del(&src_buf->list);
                                ctx->src_queue_cnt--;
-                               vb2_buffer_done(src_buf->b,
+                               vb2_buffer_done(&src_buf->b->vb2_buf,
                                                VB2_BUF_STATE_DONE);
                        }
                        spin_unlock_irqrestore(&dev->irqlock, flags);
                                                                        list);
                list_del(&mb_entry->list);
                ctx->dst_queue_cnt--;
-               vb2_set_plane_payload(mb_entry->b, 0, 0);
-               vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0);
+               vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
        }
        spin_unlock(&dev->irqlock);
 
 
  * struct s5p_mfc_buf - MFC buffer
  */
 struct s5p_mfc_buf {
+       struct vb2_v4l2_buffer *b;
        struct list_head list;
-       struct vb2_buffer *b;
        union {
                struct {
                        size_t luma;
 
 
 static int s5p_mfc_buf_init(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
        unsigned int i;
                        mfc_err("Plane buffer (CAPTURE) is too small\n");
                        return -EINVAL;
                }
-               i = vb->v4l2_buf.index;
-               ctx->dst_bufs[i].b = vb;
+               i = vb->index;
+               ctx->dst_bufs[i].b = vbuf;
                ctx->dst_bufs[i].cookie.raw.luma =
                                        vb2_dma_contig_plane_dma_addr(vb, 0);
                ctx->dst_bufs[i].cookie.raw.chroma =
                        return -EINVAL;
                }
 
-               i = vb->v4l2_buf.index;
-               ctx->src_bufs[i].b = vb;
+               i = vb->index;
+               ctx->src_bufs[i].b = vbuf;
                ctx->src_bufs[i].cookie.stream =
                                        vb2_dma_contig_plane_dma_addr(vb, 0);
                ctx->src_bufs_cnt++;
        struct s5p_mfc_buf *mfc_buf;
 
        if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-               mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+               mfc_buf = &ctx->src_bufs[vb->index];
                mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
                spin_lock_irqsave(&dev->irqlock, flags);
                list_add_tail(&mfc_buf->list, &ctx->src_queue);
                ctx->src_queue_cnt++;
                spin_unlock_irqrestore(&dev->irqlock, flags);
        } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-               mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+               mfc_buf = &ctx->dst_bufs[vb->index];
                mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
                /* Mark destination as available for use by MFC */
                spin_lock_irqsave(&dev->irqlock, flags);
-               set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
+               set_bit(vb->index, &ctx->dec_dst_flag);
                list_add_tail(&mfc_buf->list, &ctx->dst_queue);
                ctx->dst_queue_cnt++;
                spin_unlock_irqrestore(&dev->irqlock, flags);
 
 
        spin_lock_irqsave(&dev->irqlock, flags);
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
        s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
                        dst_size);
        spin_unlock_irqrestore(&dev->irqlock, flags);
                                        struct s5p_mfc_buf, list);
                        list_del(&dst_mb->list);
                        ctx->dst_queue_cnt--;
-                       vb2_set_plane_payload(dst_mb->b, 0,
+                       vb2_set_plane_payload(&dst_mb->b->vb2_buf, 0,
                                s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size,
                                                dev));
-                       vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
+                       vb2_buffer_done(&dst_mb->b->vb2_buf,
+                                       VB2_BUF_STATE_DONE);
                }
                spin_unlock_irqrestore(&dev->irqlock, flags);
        }
 
        spin_lock_irqsave(&dev->irqlock, flags);
        src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
-       src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
-       src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+       src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+       src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
        s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx,
                                                        src_y_addr, src_c_addr);
        spin_unlock_irqrestore(&dev->irqlock, flags);
 
        spin_lock_irqsave(&dev->irqlock, flags);
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
        s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
                        dst_size);
        spin_unlock_irqrestore(&dev->irqlock, flags);
                s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx,
                                &enc_y_addr, &enc_c_addr);
                list_for_each_entry(mb_entry, &ctx->src_queue, list) {
-                       mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
-                       mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+                       mb_y_addr = vb2_dma_contig_plane_dma_addr(
+                                       &mb_entry->b->vb2_buf, 0);
+                       mb_c_addr = vb2_dma_contig_plane_dma_addr(
+                                       &mb_entry->b->vb2_buf, 1);
                        if ((enc_y_addr == mb_y_addr) &&
                                                (enc_c_addr == mb_c_addr)) {
                                list_del(&mb_entry->list);
                                ctx->src_queue_cnt--;
-                               vb2_buffer_done(mb_entry->b,
+                               vb2_buffer_done(&mb_entry->b->vb2_buf,
                                                        VB2_BUF_STATE_DONE);
                                break;
                        }
                }
                list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
-                       mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
-                       mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+                       mb_y_addr = vb2_dma_contig_plane_dma_addr(
+                                       &mb_entry->b->vb2_buf, 0);
+                       mb_c_addr = vb2_dma_contig_plane_dma_addr(
+                                       &mb_entry->b->vb2_buf, 1);
                        if ((enc_y_addr == mb_y_addr) &&
                                                (enc_c_addr == mb_c_addr)) {
                                list_del(&mb_entry->list);
                                ctx->ref_queue_cnt--;
-                               vb2_buffer_done(mb_entry->b,
+                               vb2_buffer_done(&mb_entry->b->vb2_buf,
                                                        VB2_BUF_STATE_DONE);
                                break;
                        }
                ctx->dst_queue_cnt--;
                switch (slice_type) {
                case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
-                       mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+                       mb_entry->b->flags |= V4L2_BUF_FLAG_KEYFRAME;
                        break;
                case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
-                       mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+                       mb_entry->b->flags |= V4L2_BUF_FLAG_PFRAME;
                        break;
                case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
-                       mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
+                       mb_entry->b->flags |= V4L2_BUF_FLAG_BFRAME;
                        break;
                }
-               vb2_set_plane_payload(mb_entry->b, 0, strm_size);
-               vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
+               vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
        }
        spin_unlock_irqrestore(&dev->irqlock, flags);
        if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
                        return -EINVAL;
                }
                mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n",
-                         vb->v4l2_buf.index, i, &dma);
+                         vb->index, i, &dma);
        }
        return 0;
 }
 
 static int s5p_mfc_buf_init(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *vq = vb->vb2_queue;
        struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
        unsigned int i;
                ret = check_vb_with_fmt(ctx->dst_fmt, vb);
                if (ret < 0)
                        return ret;
-               i = vb->v4l2_buf.index;
-               ctx->dst_bufs[i].b = vb;
+               i = vb->index;
+               ctx->dst_bufs[i].b = vbuf;
                ctx->dst_bufs[i].cookie.stream =
                                        vb2_dma_contig_plane_dma_addr(vb, 0);
                ctx->dst_bufs_cnt++;
                ret = check_vb_with_fmt(ctx->src_fmt, vb);
                if (ret < 0)
                        return ret;
-               i = vb->v4l2_buf.index;
-               ctx->src_bufs[i].b = vb;
+               i = vb->index;
+               ctx->src_bufs[i].b = vbuf;
                ctx->src_bufs[i].cookie.raw.luma =
                                        vb2_dma_contig_plane_dma_addr(vb, 0);
                ctx->src_bufs[i].cookie.raw.chroma =
                return;
        }
        if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-               mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+               mfc_buf = &ctx->dst_bufs[vb->index];
                mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
                /* Mark destination as available for use by MFC */
                spin_lock_irqsave(&dev->irqlock, flags);
                ctx->dst_queue_cnt++;
                spin_unlock_irqrestore(&dev->irqlock, flags);
        } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-               mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+               mfc_buf = &ctx->src_bufs[vb->index];
                mfc_buf->flags &= ~MFC_BUF_FLAG_USED;
                spin_lock_irqsave(&dev->irqlock, flags);
                list_add_tail(&mfc_buf->list, &ctx->src_queue);
 
        temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
        temp_vb->flags |= MFC_BUF_FLAG_USED;
        s5p_mfc_set_dec_stream_buffer_v5(ctx,
-               vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
-               ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
+               vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+               ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
-       if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+       if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
                last_frame = MFC_DEC_LAST_FRAME;
                mfc_debug(2, "Setting ctx->state to FINISHING\n");
                ctx->state = MFCINST_FINISHING;
                src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
                                                                        list);
                src_mb->flags |= MFC_BUF_FLAG_USED;
-               if (src_mb->b->v4l2_planes[0].bytesused == 0) {
+               if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
                        /* send null frame */
                        s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2,
                                                                dev->bank2);
                        ctx->state = MFCINST_FINISHING;
                } else {
-                       src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
-                                                                       0);
-                       src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
-                                                                       1);
+                       src_y_addr = vb2_dma_contig_plane_dma_addr(
+                                       &src_mb->b->vb2_buf, 0);
+                       src_c_addr = vb2_dma_contig_plane_dma_addr(
+                                       &src_mb->b->vb2_buf, 1);
                        s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
                                                                src_c_addr);
                        if (src_mb->flags & MFC_BUF_FLAG_EOS)
        }
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
        dst_mb->flags |= MFC_BUF_FLAG_USED;
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
        s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        mfc_debug(2, "encoding buffer with index=%d state=%d\n",
-                 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+                 src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state);
        s5p_mfc_encode_one_frame_v5(ctx);
        return 0;
 }
        mfc_debug(2, "Preparing to init decoding\n");
        temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
        s5p_mfc_set_dec_desc_buffer(ctx);
-       mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+       mfc_debug(2, "Header size: %d\n",
+                       temp_vb->b->vb2_buf.planes[0].bytesused);
        s5p_mfc_set_dec_stream_buffer_v5(ctx,
-                               vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
-                               0, temp_vb->b->v4l2_planes[0].bytesused);
+                       vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+                       0, temp_vb->b->vb2_buf.planes[0].bytesused);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        s5p_mfc_init_decode_v5(ctx);
        s5p_mfc_set_enc_ref_buffer_v5(ctx);
        spin_lock_irqsave(&dev->irqlock, flags);
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
        s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
                return -EIO;
        }
        temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
-       mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+       mfc_debug(2, "Header size: %d\n",
+                       temp_vb->b->vb2_buf.planes[0].bytesused);
        s5p_mfc_set_dec_stream_buffer_v5(ctx,
-                               vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
-                               0, temp_vb->b->v4l2_planes[0].bytesused);
+                       vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+                       0, temp_vb->b->vb2_buf.planes[0].bytesused);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
 
        while (!list_empty(lh)) {
                b = list_entry(lh->next, struct s5p_mfc_buf, list);
-               for (i = 0; i < b->b->num_planes; i++)
-                       vb2_set_plane_payload(b->b, i, 0);
-               vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+               for (i = 0; i < b->b->vb2_buf.num_planes; i++)
+                       vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
+               vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&b->list);
        }
 }
 
        temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
        temp_vb->flags |= MFC_BUF_FLAG_USED;
        s5p_mfc_set_dec_stream_buffer_v6(ctx,
-               vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
-                       ctx->consumed_stream,
-                       temp_vb->b->v4l2_planes[0].bytesused);
+               vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0),
+               ctx->consumed_stream,
+               temp_vb->b->vb2_buf.planes[0].bytesused);
        spin_unlock_irqrestore(&dev->irqlock, flags);
 
        dev->curr_ctx = ctx->num;
-       if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+       if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) {
                last_frame = 1;
                mfc_debug(2, "Setting ctx->state to FINISHING\n");
                ctx->state = MFCINST_FINISHING;
 
        src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
        src_mb->flags |= MFC_BUF_FLAG_USED;
-       src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
-       src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+       src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
+       src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
 
        mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
        mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
 
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
        dst_mb->flags |= MFC_BUF_FLAG_USED;
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
 
        s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
 
        spin_lock_irqsave(&dev->irqlock, flags);
        mfc_debug(2, "Preparing to init decoding.\n");
        temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
-       mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+       mfc_debug(2, "Header size: %d\n",
+               temp_vb->b->vb2_buf.planes[0].bytesused);
        s5p_mfc_set_dec_stream_buffer_v6(ctx,
-               vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0,
-                       temp_vb->b->v4l2_planes[0].bytesused);
+               vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0,
+               temp_vb->b->vb2_buf.planes[0].bytesused);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        s5p_mfc_init_decode_v6(ctx);
        spin_lock_irqsave(&dev->irqlock, flags);
 
        dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
-       dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
-       dst_size = vb2_plane_size(dst_mb->b, 0);
+       dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
+       dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0);
        s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
 
        while (!list_empty(lh)) {
                b = list_entry(lh->next, struct s5p_mfc_buf, list);
-               for (i = 0; i < b->b->num_planes; i++)
-                       vb2_set_plane_payload(b->b, i, 0);
-               vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+               for (i = 0; i < b->b->vb2_buf.num_planes; i++)
+                       vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
+               vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&b->list);
        }
 }
 
 /** instance of a buffer */
 struct mxr_buffer {
        /** common v4l buffer stuff -- must be first */
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        /** node for layer's lists */
        struct list_head        list;
 };
 
        dma_addr_t addr = 0;
 
        if (buf)
-               addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+               addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
        mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
 }
 
 
        layer->ops.buffer_set(layer, layer->update_buf);
 
        if (done && done != layer->shadow_buf)
-               vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+               vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
 
 done:
        spin_unlock(&layer->enq_slock);
 
 
 static void buf_queue(struct vb2_buffer *vb)
 {
-       struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
        struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
        struct mxr_device *mdev = layer->mdev;
        unsigned long flags;
        if (layer->update_buf == layer->shadow_buf)
                layer->update_buf = NULL;
        if (layer->update_buf) {
-               vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
                layer->update_buf = NULL;
        }
        if (layer->shadow_buf) {
-               vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
                layer->shadow_buf = NULL;
        }
        spin_unlock_irqrestore(&layer->enq_slock, flags);
        /* set all buffer to be done */
        list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&layer->enq_slock, flags);
 
                mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
                return;
        }
-       luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+       luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
        if (layer->fmt->num_subframes == 2) {
-               chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1);
+               chroma_addr[0] =
+                       vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
        } else {
                /* FIXME: mxr_get_plane_size compute integer division,
                 * which is slow and should not be performed in interrupt */
 
 
 static void sh_veu_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
-       dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
-       v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
+       dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
+       v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
 }
 
 static const struct vb2_ops sh_veu_qops = {
 static irqreturn_t sh_veu_isr(int irq, void *dev_id)
 {
        struct sh_veu_dev *veu = dev_id;
-       struct vb2_buffer *dst;
-       struct vb2_buffer *src;
+       struct vb2_v4l2_buffer *dst;
+       struct vb2_v4l2_buffer *src;
        u32 status = sh_veu_reg_read(veu, VEU_EVTR);
 
        /* bundle read mode not used */
        if (!src || !dst)
                return IRQ_NONE;
 
-       dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
-       dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst->v4l2_buf.flags |=
-               src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
+       dst->timestamp = src->timestamp;
+       dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst->flags |=
+               src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+       dst->timecode = src->timecode;
 
        spin_lock(&veu->lock);
        v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
 
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-contig.h>
 
 /* Mirror addresses are not available for all registers */
 #define VOU_MIN_IMAGE_HEIGHT   16
 
 struct sh_vou_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
-static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_buffer *vb2)
+static inline struct
+sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2)
 {
        return container_of(vb2, struct sh_vou_buffer, vb);
 }
 };
 
 static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
-                                struct vb2_buffer *vb)
+                                struct vb2_v4l2_buffer *vbuf)
 {
        dma_addr_t addr1, addr2;
 
-       addr1 = vb2_dma_contig_plane_dma_addr(vb, 0);
+       addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
        switch (vou_dev->pix.pixelformat) {
        case V4L2_PIX_FMT_NV12:
        case V4L2_PIX_FMT_NV16:
 /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
 static void sh_vou_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vb);
+       struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf);
        unsigned long flags;
 
        spin_lock_irqsave(&vou_dev->lock, flags);
                                         video, s_stream, 1);
        if (ret < 0 && ret != -ENOIOCTLCMD) {
                list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                        list_del(&buf->list);
                }
                vou_dev->active = NULL;
        msleep(50);
        spin_lock_irqsave(&vou_dev->lock, flags);
        list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&buf->list);
        }
        vou_dev->active = NULL;
 
        list_del(&vb->list);
 
-       v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
-       vb->vb.v4l2_buf.sequence = vou_dev->sequence++;
-       vb->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
-       vb2_buffer_done(&vb->vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&vb->vb.timestamp);
+       vb->vb.sequence = vou_dev->sequence++;
+       vb->vb.field = V4L2_FIELD_INTERLACED;
+       vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
 
        vou_dev->active = list_entry(vou_dev->buf_list.next,
                                     struct sh_vou_buffer, list);
 
 
 /* Frame buffer data */
 struct frame_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct isi_dma_desc *p_dma_desc;
        struct list_head list;
 };
 static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
 {
        if (isi->active) {
-               struct vb2_buffer *vb = &isi->active->vb;
+               struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
                struct frame_buffer *buf = isi->active;
 
                list_del_init(&buf->list);
-               v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
-               vb->v4l2_buf.sequence = isi->sequence++;
-               vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+               v4l2_get_timestamp(&vbuf->timestamp);
+               vbuf->sequence = isi->sequence++;
+               vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
        }
 
        if (list_empty(&isi->video_buffer_list)) {
 
 static int buffer_init(struct vb2_buffer *vb)
 {
-       struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
 
        buf->p_dma_desc = NULL;
        INIT_LIST_HEAD(&buf->list);
 
 static int buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
-       struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+       struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct atmel_isi *isi = ici->priv;
        unsigned long size;
                return -EINVAL;
        }
 
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(vb, 0, size);
 
        if (!buf->p_dma_desc) {
                if (list_empty(&isi->dma_desc_head)) {
 
 static void buffer_cleanup(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct atmel_isi *isi = ici->priv;
-       struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+       struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
 
        /* This descriptor is available now and we add to head list */
        if (buf->p_dma_desc)
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct atmel_isi *isi = ici->priv;
-       struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+       struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
        unsigned long flags = 0;
 
        spin_lock_irqsave(&isi->lock, flags);
        /* Release all active buffers */
        list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
                list_del_init(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irq(&isi->lock);
 
 
 /* buffer for one video frame */
 struct mx2_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer               vb;
+       struct vb2_v4l2_buffer vb;
        struct mx2_buf_internal         internal;
 };
 
 
 static void mx2_videobuf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici =
                to_soc_camera_host(icd->parent);
        struct mx2_camera_dev *pcdev = ici->priv;
-       struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+       struct mx2_buffer *buf = container_of(vbuf, struct mx2_buffer, vb);
        unsigned long flags;
 
        dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
        buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
                               internal.queue);
        buf->internal.bufnum = 0;
-       vb = &buf->vb;
+       vb = &buf->vb.vb2_buf;
 
        phys = vb2_dma_contig_plane_dma_addr(vb, 0);
        mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
        buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
                               internal.queue);
        buf->internal.bufnum = 1;
-       vb = &buf->vb;
+       vb = &buf->vb.vb2_buf;
 
        phys = vb2_dma_contig_plane_dma_addr(vb, 0);
        mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
        struct mx2_buf_internal *ibuf;
        struct mx2_buffer *buf;
        struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        unsigned long phys;
 
        ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal,
        } else {
                buf = mx2_ibuf_to_buf(ibuf);
 
-               vb = &buf->vb;
+               vb = &buf->vb.vb2_buf;
+               vbuf = to_vb2_v4l2_buffer(vb);
 #ifdef DEBUG
                phys = vb2_dma_contig_plane_dma_addr(vb, 0);
                if (prp->cfg.channel == 1) {
                                vb2_get_plane_payload(vb, 0));
 
                list_del_init(&buf->internal.queue);
-               v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
-               vb->v4l2_buf.sequence = pcdev->frame_count;
+               v4l2_get_timestamp(&vbuf->timestamp);
+               vbuf->sequence = pcdev->frame_count;
                if (err)
                        vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
                else
 
        list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
 
-       vb = &buf->vb;
+       vb = &buf->vb.vb2_buf;
 
        phys = vb2_dma_contig_plane_dma_addr(vb, 0);
        mx27_update_emma_buf(pcdev, phys, bufnum);
 
 
 struct mx3_camera_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer                       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head                        queue;
 
        /* One descriptot per scatterlist (per frame) */
        __raw_writel(value, mx3->base + reg);
 }
 
-static struct mx3_camera_buffer *to_mx3_vb(struct vb2_buffer *vb)
+static struct mx3_camera_buffer *to_mx3_vb(struct vb2_v4l2_buffer *vb)
 {
        return container_of(vb, struct mx3_camera_buffer, vb);
 }
 
        spin_lock(&mx3_cam->lock);
        if (mx3_cam->active) {
-               struct vb2_buffer *vb = &mx3_cam->active->vb;
+               struct vb2_v4l2_buffer *vb = &mx3_cam->active->vb;
                struct mx3_camera_buffer *buf = to_mx3_vb(vb);
 
                list_del_init(&buf->queue);
-               v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
-               vb->v4l2_buf.field = mx3_cam->field;
-               vb->v4l2_buf.sequence = mx3_cam->sequence++;
-               vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+               v4l2_get_timestamp(&vb->timestamp);
+               vb->field = mx3_cam->field;
+               vb->sequence = mx3_cam->sequence++;
+               vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
        }
 
        if (list_empty(&mx3_cam->capture)) {
 
 static void mx3_videobuf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx3_camera_dev *mx3_cam = ici->priv;
-       struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+       struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
        struct scatterlist *sg = &buf->sg;
        struct dma_async_tx_descriptor *txd;
        struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
 
        if (vb2_plane_size(vb, 0) < new_size) {
                dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
-                       vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size);
+                       vbuf->vb2_buf.index, vb2_plane_size(vb, 0), new_size);
                goto error;
        }
 
 
 static void mx3_videobuf_release(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx3_camera_dev *mx3_cam = ici->priv;
-       struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+       struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
        struct dma_async_tx_descriptor *txd = buf->txd;
        unsigned long flags;
 
 
 static int mx3_videobuf_init(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx3_camera_dev *mx3_cam = ici->priv;
-       struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+       struct mx3_camera_buffer *buf = to_mx3_vb(vbuf);
 
        if (!buf->txd) {
                /* This is for locking debugging only */
 
        list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
                list_del_init(&buf->queue);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&mx3_cam->lock, flags);
 
        struct soc_camera_host          ici;
        struct list_head                capture;
 #define MAX_BUFFER_NUM                 3
-       struct vb2_buffer               *queue_buf[MAX_BUFFER_NUM];
+       struct vb2_v4l2_buffer          *queue_buf[MAX_BUFFER_NUM];
        struct vb2_alloc_ctx            *alloc_ctx;
        enum v4l2_field                 field;
        unsigned int                    pdata_flags;
 #define is_continuous_transfer(priv)   (priv->vb_count > MAX_BUFFER_NUM)
 
 struct rcar_vin_buffer {
-       struct vb2_buffer               vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head                list;
 };
 
 /* Moves a buffer from the queue to the HW slots */
 static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
 {
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        dma_addr_t phys_addr_top;
        int slot;
 
        if (slot < 0)
                return 0;
 
-       vb = &list_entry(priv->capture.next, struct rcar_vin_buffer, list)->vb;
-       list_del_init(to_buf_list(vb));
-       priv->queue_buf[slot] = vb;
-       phys_addr_top = vb2_dma_contig_plane_dma_addr(vb, 0);
+       vbuf = &list_entry(priv->capture.next,
+                       struct rcar_vin_buffer, list)->vb;
+       list_del_init(to_buf_list(vbuf));
+       priv->queue_buf[slot] = vbuf;
+       phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
        iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
 
        return 1;
 
 static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct rcar_vin_priv *priv = ici->priv;
 
        if (vb2_plane_size(vb, 0) < size) {
                dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
-                       vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+                       vb->index, vb2_plane_size(vb, 0), size);
                goto error;
        }
 
 
        spin_lock_irq(&priv->lock);
 
-       list_add_tail(to_buf_list(vb), &priv->capture);
+       list_add_tail(to_buf_list(vbuf), &priv->capture);
        rcar_vin_fill_hw_slot(priv);
 
        /* If we weren't running, and have enough buffers, start capturing! */
        if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
                if (rcar_vin_setup(priv)) {
                        /* Submit error */
-                       list_del_init(to_buf_list(vb));
+                       list_del_init(to_buf_list(vbuf));
                        spin_unlock_irq(&priv->lock);
                        goto error;
                }
 
        for (i = 0; i < MAX_BUFFER_NUM; i++) {
                if (priv->queue_buf[i]) {
-                       vb2_buffer_done(priv->queue_buf[i],
+                       vb2_buffer_done(&priv->queue_buf[i]->vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                        priv->queue_buf[i] = NULL;
                }
 
        list_for_each_safe(buf_head, tmp, &priv->capture) {
                vb2_buffer_done(&list_entry(buf_head,
-                                       struct rcar_vin_buffer, list)->vb,
+                               struct rcar_vin_buffer, list)->vb.vb2_buf,
                                VB2_BUF_STATE_ERROR);
                list_del_init(buf_head);
        }
                else
                        slot = 0;
 
-               priv->queue_buf[slot]->v4l2_buf.field = priv->field;
-               priv->queue_buf[slot]->v4l2_buf.sequence = priv->sequence++;
-               v4l2_get_timestamp(&priv->queue_buf[slot]->v4l2_buf.timestamp);
-               vb2_buffer_done(priv->queue_buf[slot], VB2_BUF_STATE_DONE);
+               priv->queue_buf[slot]->field = priv->field;
+               priv->queue_buf[slot]->sequence = priv->sequence++;
+               v4l2_get_timestamp(&priv->queue_buf[slot]->timestamp);
+               vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf,
+                               VB2_BUF_STATE_DONE);
                priv->queue_buf[slot] = NULL;
 
                if (priv->state != STOPPING)
 {
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct rcar_vin_priv *priv = ici->priv;
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        int i;
 
        /* disable capture, disable interrupts */
        /* make sure active buffer is cancelled */
        spin_lock_irq(&priv->lock);
        for (i = 0; i < MAX_BUFFER_NUM; i++) {
-               vb = priv->queue_buf[i];
-               if (vb) {
-                       list_del_init(to_buf_list(vb));
-                       vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+               vbuf = priv->queue_buf[i];
+               if (vbuf) {
+                       list_del_init(to_buf_list(vbuf));
+                       vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
                }
        }
        spin_unlock_irq(&priv->lock);
 
 
 /* per video frame buffer */
 struct sh_mobile_ceu_buffer {
-       struct vb2_buffer vb; /* v4l buffer must be first */
+       struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
        struct list_head queue;
 };
 
 
        spinlock_t lock;                /* Protects video buffer lists */
        struct list_head capture;
-       struct vb2_buffer *active;
+       struct vb2_v4l2_buffer *active;
        struct vb2_alloc_ctx *alloc_ctx;
 
        struct sh_mobile_ceu_info *pdata;
        u32 code;
 };
 
-static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
+static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
 {
-       return container_of(vb, struct sh_mobile_ceu_buffer, vb);
+       return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
 }
 
 static void ceu_write(struct sh_mobile_ceu_dev *priv,
                bottom2 = CDBCR;
        }
 
-       phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
+       phys_addr_top =
+               vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
 
        switch (icd->current_fmt->host_fmt->fourcc) {
        case V4L2_PIX_FMT_NV12:
 
 static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
 {
-       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
 
        /* Added list head initialization on alloc */
        WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
 
 static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
 {
-       struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct soc_camera_device *icd = container_of(vb->vb2_queue,
+                       struct soc_camera_device, vb2_vidq);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
-       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
        unsigned long size;
 
        size = icd->sizeimage;
 
        if (vb2_plane_size(vb, 0) < size) {
                dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
-                       vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+                       vb->index, vb2_plane_size(vb, 0), size);
                goto error;
        }
 
                 * we are not interested in the return value of
                 * sh_mobile_ceu_capture here.
                 */
-               pcdev->active = vb;
+               pcdev->active = vbuf;
                sh_mobile_ceu_capture(pcdev);
        }
        spin_unlock_irq(&pcdev->lock);
 
 static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
 {
-       struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct soc_camera_device *icd = container_of(vb->vb2_queue,
+                       struct soc_camera_device, vb2_vidq);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+       struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
        spin_lock_irq(&pcdev->lock);
 
-       if (pcdev->active == vb) {
+       if (pcdev->active == vbuf) {
                /* disable capture (release DMA buffer), reset */
                ceu_write(pcdev, CAPSR, 1 << 16);
                pcdev->active = NULL;
 
 static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
 {
-       struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct soc_camera_device *icd = container_of(vb->vb2_queue,
+                       struct soc_camera_device, vb2_vidq);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
                pcdev->buf_total);
 
        /* This is for locking debugging only */
-       INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
+       INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
        return 0;
 }
 
 static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
 {
        struct sh_mobile_ceu_dev *pcdev = data;
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        int ret;
 
        spin_lock(&pcdev->lock);
 
-       vb = pcdev->active;
-       if (!vb)
+       vbuf = pcdev->active;
+       if (!vbuf)
                /* Stale interrupt from a released buffer */
                goto out;
 
-       list_del_init(&to_ceu_vb(vb)->queue);
+       list_del_init(&to_ceu_vb(vbuf)->queue);
 
        if (!list_empty(&pcdev->capture))
                pcdev->active = &list_entry(pcdev->capture.next,
                pcdev->active = NULL;
 
        ret = sh_mobile_ceu_capture(pcdev);
-       v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
+       v4l2_get_timestamp(&vbuf->timestamp);
        if (!ret) {
-               vb->v4l2_buf.field = pcdev->field;
-               vb->v4l2_buf.sequence = pcdev->sequence++;
+               vbuf->field = pcdev->field;
+               vbuf->sequence = pcdev->sequence++;
        }
-       vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&vbuf->vb2_buf,
+                       ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
 
 out:
        spin_unlock(&pcdev->lock);
        spin_lock_irq(&pcdev->lock);
        if (pcdev->active) {
                list_del_init(&to_ceu_vb(pcdev->active)->queue);
-               vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
                pcdev->active = NULL;
        }
        spin_unlock_irq(&pcdev->lock);
 
 
 static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
 {
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
 
        if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
                return;
        dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 
        if (src_vb && dst_vb) {
-               dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
-               dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
-               dst_vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-               dst_vb->v4l2_buf.flags |= src_vb->v4l2_buf.flags &
+               dst_vb->timestamp = src_vb->timestamp;
+               dst_vb->timecode = src_vb->timecode;
+               dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+               dst_vb->flags |= src_vb->flags &
                                          V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
 
                v4l2_m2m_buf_done(src_vb, vb_state);
 static int bdisp_get_bufs(struct bdisp_ctx *ctx)
 {
        struct bdisp_frame *src, *dst;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        int ret;
 
        src = &ctx->src;
        dst = &ctx->dst;
 
        src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
-       ret = bdisp_get_addr(ctx, src_vb, src, src->paddr);
+       ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
        if (ret)
                return ret;
 
        dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-       ret = bdisp_get_addr(ctx, dst_vb, dst, dst->paddr);
+       ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
        if (ret)
                return ret;
 
-       dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
+       dst_vb->timestamp = src_vb->timestamp;
 
        return 0;
 }
 
 static void bdisp_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
        /* return to V4L2 any 0-size buffer so it can be dequeued by user */
        }
 
        if (ctx->fh.m2m_ctx)
-               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+               v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
 {
        struct bdisp_ctx *ctx = q->drv_priv;
-       struct vb2_buffer *buf;
+       struct vb2_v4l2_buffer *buf;
        int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
 
        if (ret < 0) {
 
        unsigned int            bufs_completed;         /* bufs done in this batch */
 
        struct vpe_q_data       q_data[2];              /* src & dst queue data */
-       struct vb2_buffer       *src_vbs[VPE_MAX_SRC_BUFS];
-       struct vb2_buffer       *dst_vb;
+       struct vb2_v4l2_buffer  *src_vbs[VPE_MAX_SRC_BUFS];
+       struct vb2_v4l2_buffer  *dst_vb;
 
        dma_addr_t              mv_buf_dma[2];          /* dma addrs of motion vector in/out bufs */
        void                    *mv_buf[2];             /* virtual addrs of motion vector bufs */
 {
        struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
        const struct vpe_port_data *p_data = &port_data[port];
-       struct vb2_buffer *vb = ctx->dst_vb;
+       struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
        struct vpe_fmt *fmt = q_data->fmt;
        const struct vpdma_data_format *vpdma_fmt;
        int mv_buf_selector = !ctx->src_mv_buf_selector;
 {
        struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
        const struct vpe_port_data *p_data = &port_data[port];
-       struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
+       struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpe_fmt *fmt = q_data->fmt;
        const struct vpdma_data_format *vpdma_fmt;
        int mv_buf_selector = ctx->src_mv_buf_selector;
-       int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
+       int field = vbuf->field == V4L2_FIELD_BOTTOM;
        int frame_width, frame_height;
        dma_addr_t dma_addr;
        u32 flags = 0;
        struct vpe_dev *dev = (struct vpe_dev *)data;
        struct vpe_ctx *ctx;
        struct vpe_q_data *d_q_data;
-       struct vb2_buffer *s_vb, *d_vb;
-       struct v4l2_buffer *s_buf, *d_buf;
+       struct vb2_v4l2_buffer *s_vb, *d_vb;
        unsigned long flags;
        u32 irqst0, irqst1;
 
 
        s_vb = ctx->src_vbs[0];
        d_vb = ctx->dst_vb;
-       s_buf = &s_vb->v4l2_buf;
-       d_buf = &d_vb->v4l2_buf;
 
-       d_buf->flags = s_buf->flags;
+       d_vb->flags = s_vb->flags;
+       d_vb->timestamp = s_vb->timestamp;
 
-       d_buf->timestamp = s_buf->timestamp;
-       if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
-               d_buf->timecode = s_buf->timecode;
+       if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+               d_vb->timecode = s_vb->timecode;
 
-       d_buf->sequence = ctx->sequence;
+       d_vb->sequence = ctx->sequence;
 
        d_q_data = &ctx->q_data[Q_DATA_DST];
        if (d_q_data->flags & Q_DATA_INTERLACED) {
-               d_buf->field = ctx->field;
+               d_vb->field = ctx->field;
                if (ctx->field == V4L2_FIELD_BOTTOM) {
                        ctx->sequence++;
                        ctx->field = V4L2_FIELD_TOP;
                        ctx->field = V4L2_FIELD_BOTTOM;
                }
        } else {
-               d_buf->field = V4L2_FIELD_NONE;
+               d_vb->field = V4L2_FIELD_NONE;
                ctx->sequence++;
        }
 
 
 static int vpe_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct vpe_q_data *q_data;
        int i, num_planes;
 
        if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
                if (!(q_data->flags & Q_DATA_INTERLACED)) {
-                       vb->v4l2_buf.field = V4L2_FIELD_NONE;
+                       vbuf->field = V4L2_FIELD_NONE;
                } else {
-                       if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
-                                       vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+                       if (vbuf->field != V4L2_FIELD_TOP &&
+                                       vbuf->field != V4L2_FIELD_BOTTOM)
                                return -EINVAL;
                }
        }
 
 static void vpe_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
-       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
 
 
 
 static int device_process(struct vim2m_ctx *ctx,
-                         struct vb2_buffer *in_vb,
-                         struct vb2_buffer *out_vb)
+                         struct vb2_v4l2_buffer *in_vb,
+                         struct vb2_v4l2_buffer *out_vb)
 {
        struct vim2m_dev *dev = ctx->dev;
        struct vim2m_q_data *q_data;
        height  = q_data->height;
        bytesperline    = (q_data->width * q_data->fmt->depth) >> 3;
 
-       p_in = vb2_plane_vaddr(in_vb, 0);
-       p_out = vb2_plane_vaddr(out_vb, 0);
+       p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+       p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
        if (!p_in || !p_out) {
                v4l2_err(&dev->v4l2_dev,
                         "Acquiring kernel pointers to buffers failed\n");
                return -EFAULT;
        }
 
-       if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
+       if (vb2_plane_size(&in_vb->vb2_buf, 0) >
+                       vb2_plane_size(&out_vb->vb2_buf, 0)) {
                v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
                return -EINVAL;
        }
        bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
        w = 0;
 
-       out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
-       in_vb->v4l2_buf.sequence = q_data->sequence++;
-       out_vb->v4l2_buf.timestamp = in_vb->v4l2_buf.timestamp;
-       if (in_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
-               out_vb->v4l2_buf.timecode = in_vb->v4l2_buf.timecode;
-       out_vb->v4l2_buf.field = in_vb->v4l2_buf.field;
-       out_vb->v4l2_buf.flags = in_vb->v4l2_buf.flags &
+       out_vb->sequence =
+               get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
+       in_vb->sequence = q_data->sequence++;
+       out_vb->timestamp = in_vb->timestamp;
+
+       if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+               out_vb->timecode = in_vb->timecode;
+       out_vb->field = in_vb->field;
+       out_vb->flags = in_vb->flags &
                (V4L2_BUF_FLAG_TIMECODE |
                 V4L2_BUF_FLAG_KEYFRAME |
                 V4L2_BUF_FLAG_PFRAME |
 {
        struct vim2m_ctx *ctx = priv;
        struct vim2m_dev *dev = ctx->dev;
-       struct vb2_buffer *src_buf, *dst_buf;
+       struct vb2_v4l2_buffer *src_buf, *dst_buf;
 
        src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
        dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 {
        struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
        struct vim2m_ctx *curr_ctx;
-       struct vb2_buffer *src_vb, *dst_vb;
+       struct vb2_v4l2_buffer *src_vb, *dst_vb;
        unsigned long flags;
 
        curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
 
 static int vim2m_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
        struct vim2m_q_data *q_data;
 
 
        q_data = get_q_data(ctx, vb->vb2_queue->type);
        if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
-               if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
-                       vb->v4l2_buf.field = V4L2_FIELD_NONE;
-               if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+               if (vbuf->field == V4L2_FIELD_ANY)
+                       vbuf->field = V4L2_FIELD_NONE;
+               if (vbuf->field != V4L2_FIELD_NONE) {
                        dprintk(ctx->dev, "%s field isn't supported\n",
                                        __func__);
                        return -EINVAL;
 
 static void vim2m_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
 
-       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+       v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
 static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
 static void vim2m_stop_streaming(struct vb2_queue *q)
 {
        struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
-       struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
        unsigned long flags;
 
        for (;;) {
                if (V4L2_TYPE_IS_OUTPUT(q->type))
-                       vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+                       vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
                else
-                       vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-               if (vb == NULL)
+                       vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+               if (vbuf == NULL)
                        return;
                spin_lock_irqsave(&ctx->dev->irqlock, flags);
-               v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+               v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
                spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
        }
 }
 
 /* buffer for one video frame */
 struct vivid_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head        list;
 };
 
 
        void *vbuf;
 
        if (p == 0 || tpg_g_buffers(tpg) > 1)
-               return vb2_plane_vaddr(&buf->vb, p);
-       vbuf = vb2_plane_vaddr(&buf->vb, 0);
+               return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
+       vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
        for (i = 0; i < p; i++)
                vbuf += bpl[i] * h / tpg->vdownsampling[i];
        return vbuf;
 static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
                struct vivid_buffer *vid_cap_buf)
 {
-       bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
+       bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
        struct tpg_data *tpg = &dev->tpg;
        struct vivid_buffer *vid_out_buf = NULL;
        unsigned vdiv = dev->fmt_out->vdownsampling[p];
        if (vid_out_buf == NULL)
                return -ENODATA;
 
-       vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;
+       vid_cap_buf->vb.field = vid_out_buf->vb.field;
 
        voutbuf = plane_vaddr(tpg, vid_out_buf, p,
                              dev->bytesperline_out, dev->fmt_out_rect.height);
        if (p < dev->fmt_out->buffers)
-               voutbuf += vid_out_buf->vb.v4l2_planes[p].data_offset;
+               voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
        voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
                (dev->loop_vid_out.top / vdiv) * stride_out;
        vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
        bool is_loop = false;
 
        if (dev->loop_video && dev->can_loop_video &&
-           ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
-            (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
+               ((vivid_is_svid_cap(dev) &&
+               !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
+               (vivid_is_hdmi_cap(dev) &&
+               !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
                is_loop = true;
 
-       buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count;
+       buf->vb.sequence = dev->vid_cap_seq_count;
        /*
         * Take the timestamp now if the timestamp source is set to
         * "Start of Exposure".
         */
        if (dev->tstamp_src_is_soe)
-               v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+               v4l2_get_timestamp(&buf->vb.timestamp);
        if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
                /*
                 * 60 Hz standards start with the bottom field, 50 Hz standards
                 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
                 * standards.
                 */
-               buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
+               buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
                        V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
                /*
                 * The sequence counter counts frames, not fields. So divide
                 * by two.
                 */
-               buf->vb.v4l2_buf.sequence /= 2;
+               buf->vb.sequence /= 2;
        } else {
-               buf->vb.v4l2_buf.field = dev->field_cap;
+               buf->vb.field = dev->field_cap;
        }
-       tpg_s_field(tpg, buf->vb.v4l2_buf.field,
+       tpg_s_field(tpg, buf->vb.field,
                    dev->field_cap == V4L2_FIELD_ALTERNATE);
-       tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.v4l2_buf.index]);
+       tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
 
        vivid_precalc_copy_rects(dev);
 
                }
                tpg_calc_text_basep(tpg, basep, p, vbuf);
                if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
-                       tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev), p, vbuf);
+                       tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
+                                       p, vbuf);
        }
-       dev->must_blank[buf->vb.v4l2_buf.index] = false;
+       dev->must_blank[buf->vb.vb2_buf.index] = false;
 
        /* Updates stream time, only update at the start of a new frame. */
-       if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0)
-               dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
+       if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
+                       (buf->vb.sequence & 1) == 0)
+               dev->ms_vid_cap =
+                       jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
 
        ms = dev->ms_vid_cap;
        if (dev->osd_mode <= 1) {
                                (ms / (60 * 1000)) % 60,
                                (ms / 1000) % 60,
                                ms % 1000,
-                               buf->vb.v4l2_buf.sequence,
+                               buf->vb.sequence,
                                (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
-                                       (buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
+                                       (buf->vb.field == V4L2_FIELD_TOP ?
                                         " top" : " bottom") : "");
                tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
        }
         * the timestamp now.
         */
        if (!dev->tstamp_src_is_soe)
-               v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+               v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
 }
 
 /*
        struct tpg_data *tpg = &dev->tpg;
        unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
        void *vbase = dev->fb_vbase_cap;
-       void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+       void *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
        unsigned img_width = dev->compose_cap.width;
        unsigned img_height = dev->compose_cap.height;
        unsigned stride = tpg->bytesperline[0];
                return;
        if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
             dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
-           dev->overlay_cap_field != buf->vb.v4l2_buf.field)
+           dev->overlay_cap_field != buf->vb.field)
                return;
 
        vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
                /* Fill buffer */
                vivid_fillbuff(dev, vid_cap_buf);
                dprintk(dev, 1, "filled buffer %d\n",
-                       vid_cap_buf->vb.v4l2_buf.index);
+                       vid_cap_buf->vb.vb2_buf.index);
 
                /* Handle overlay */
                if (dev->overlay_cap_owner && dev->fb_cap.base &&
-                               dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
+                       dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
                        vivid_overlay(dev, vid_cap_buf);
 
-               vb2_buffer_done(&vid_cap_buf->vb, dev->dqbuf_error ?
+               vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
                                VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
                dprintk(dev, 2, "vid_cap buffer %d done\n",
-                               vid_cap_buf->vb.v4l2_buf.index);
+                               vid_cap_buf->vb.vb2_buf.index);
        }
 
        if (vbi_cap_buf) {
                        vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
                else
                        vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
-               vb2_buffer_done(&vbi_cap_buf->vb, dev->dqbuf_error ?
+               vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
                                VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
                dprintk(dev, 2, "vbi_cap %d done\n",
-                               vbi_cap_buf->vb.v4l2_buf.index);
+                               vbi_cap_buf->vb.vb2_buf.index);
        }
        dev->dqbuf_error = false;
 
                        buf = list_entry(dev->vid_cap_active.next,
                                         struct vivid_buffer, list);
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                        dprintk(dev, 2, "vid_cap buffer %d done\n",
-                               buf->vb.v4l2_buf.index);
+                               buf->vb.vb2_buf.index);
                }
        }
 
                        buf = list_entry(dev->vbi_cap_active.next,
                                         struct vivid_buffer, list);
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                        dprintk(dev, 2, "vbi_cap buffer %d done\n",
-                               buf->vb.v4l2_buf.index);
+                               buf->vb.vb2_buf.index);
                }
        }
 
 
                return;
 
        if (vid_out_buf) {
-               vid_out_buf->vb.v4l2_buf.sequence = dev->vid_out_seq_count;
+               vid_out_buf->vb.sequence = dev->vid_out_seq_count;
                if (dev->field_out == V4L2_FIELD_ALTERNATE) {
                        /*
-                        * The sequence counter counts frames, not fields. So divide
-                        * by two.
+                        * The sequence counter counts frames, not fields.
+                        * So divide by two.
                         */
-                       vid_out_buf->vb.v4l2_buf.sequence /= 2;
+                       vid_out_buf->vb.sequence /= 2;
                }
-               v4l2_get_timestamp(&vid_out_buf->vb.v4l2_buf.timestamp);
-               vid_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
-               vb2_buffer_done(&vid_out_buf->vb, dev->dqbuf_error ?
+               v4l2_get_timestamp(&vid_out_buf->vb.timestamp);
+               vid_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+               vb2_buffer_done(&vid_out_buf->vb.vb2_buf, dev->dqbuf_error ?
                                VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
                dprintk(dev, 2, "vid_out buffer %d done\n",
-                       vid_out_buf->vb.v4l2_buf.index);
+                       vid_out_buf->vb.vb2_buf.index);
        }
 
        if (vbi_out_buf) {
                if (dev->stream_sliced_vbi_out)
                        vivid_sliced_vbi_out_process(dev, vbi_out_buf);
 
-               vbi_out_buf->vb.v4l2_buf.sequence = dev->vbi_out_seq_count;
-               v4l2_get_timestamp(&vbi_out_buf->vb.v4l2_buf.timestamp);
-               vbi_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
-               vb2_buffer_done(&vbi_out_buf->vb, dev->dqbuf_error ?
+               vbi_out_buf->vb.sequence = dev->vbi_out_seq_count;
+               v4l2_get_timestamp(&vbi_out_buf->vb.timestamp);
+               vbi_out_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+               vb2_buffer_done(&vbi_out_buf->vb.vb2_buf, dev->dqbuf_error ?
                                VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
                dprintk(dev, 2, "vbi_out buffer %d done\n",
-                       vbi_out_buf->vb.v4l2_buf.index);
+                       vbi_out_buf->vb.vb2_buf.index);
        }
        dev->dqbuf_error = false;
 }
                        buf = list_entry(dev->vid_out_active.next,
                                         struct vivid_buffer, list);
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                        dprintk(dev, 2, "vid_out buffer %d done\n",
-                               buf->vb.v4l2_buf.index);
+                               buf->vb.vb2_buf.index);
                }
        }
 
                        buf = list_entry(dev->vbi_out_active.next,
                                         struct vivid_buffer, list);
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                        dprintk(dev, 2, "vbi_out buffer %d done\n",
-                               buf->vb.v4l2_buf.index);
+                               buf->vb.vb2_buf.index);
                }
        }
 
 
        spin_unlock(&dev->slock);
 
        if (sdr_cap_buf) {
-               sdr_cap_buf->vb.v4l2_buf.sequence = dev->sdr_cap_seq_count;
+               sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
                vivid_sdr_cap_process(dev, sdr_cap_buf);
-               v4l2_get_timestamp(&sdr_cap_buf->vb.v4l2_buf.timestamp);
-               sdr_cap_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
-               vb2_buffer_done(&sdr_cap_buf->vb, dev->dqbuf_error ?
+               v4l2_get_timestamp(&sdr_cap_buf->vb.timestamp);
+               sdr_cap_buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
+               vb2_buffer_done(&sdr_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
                                VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
                dev->dqbuf_error = false;
        }
                /* Calculate the number of jiffies since we started streaming */
                jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap;
                /* Get the number of buffers streamed since the start */
-               buffers_since_start = (u64)jiffies_since_start * dev->sdr_adc_freq +
+               buffers_since_start =
+                       (u64)jiffies_since_start * dev->sdr_adc_freq +
                                      (HZ * SDR_CAP_SAMPLES_PER_BUF) / 2;
                do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF);
 
                        dev->sdr_cap_seq_offset = buffers_since_start;
                        buffers_since_start = 0;
                }
-               dev->sdr_cap_seq_count = buffers_since_start + dev->sdr_cap_seq_offset;
+               dev->sdr_cap_seq_count =
+                       buffers_since_start + dev->sdr_cap_seq_offset;
 
                vivid_thread_sdr_cap_tick(dev);
                mutex_unlock(&dev->mutex);
 
 static void sdr_cap_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+       struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
 
        dprintk(dev, 1, "%s\n", __func__);
 
 
                list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
        return err;
        while (!list_empty(&dev->sdr_cap_active)) {
                struct vivid_buffer *buf;
 
-               buf = list_entry(dev->sdr_cap_active.next, struct vivid_buffer, list);
+               buf = list_entry(dev->sdr_cap_active.next,
+                               struct vivid_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        /* shutdown control thread */
        .wait_finish            = vb2_ops_wait_finish,
 };
 
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
+               struct v4l2_frequency_band *band)
 {
        switch (band->tuner) {
        case 0:
        }
 }
 
-int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+int vivid_sdr_g_frequency(struct file *file, void *fh,
+               struct v4l2_frequency *vf)
 {
        struct vivid_dev *dev = video_drvdata(file);
 
        }
 }
 
-int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int vivid_sdr_s_frequency(struct file *file, void *fh,
+               const struct v4l2_frequency *vf)
 {
        struct vivid_dev *dev = video_drvdata(file);
        unsigned freq = vf->frequency;
        case 0:
                strlcpy(vt->name, "ADC", sizeof(vt->name));
                vt->type = V4L2_TUNER_ADC;
-               vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+               vt->capability =
+                       V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
                vt->rangelow = bands_adc[0].rangelow;
                vt->rangehigh = bands_adc[2].rangehigh;
                return 0;
        case 1:
                strlcpy(vt->name, "RF", sizeof(vt->name));
                vt->type = V4L2_TUNER_RF;
-               vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+               vt->capability =
+                       V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
                vt->rangelow = bands_fm[0].rangelow;
                vt->rangehigh = bands_fm[0].rangehigh;
                return 0;
 
 void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
 {
-       u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+       u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
        unsigned long i;
-       unsigned long plane_size = vb2_plane_size(&buf->vb, 0);
+       unsigned long plane_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
        s64 s64tmp;
        s32 src_phase_step;
        s32 mod_phase_step;
 
 void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
 {
        struct v4l2_vbi_format vbi;
-       u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+       u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
 
        vivid_g_fmt_vbi_cap(dev, &vbi);
-       buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+       buf->vb.sequence = dev->vbi_cap_seq_count;
        if (dev->field_cap == V4L2_FIELD_ALTERNATE)
-               buf->vb.v4l2_buf.sequence /= 2;
+               buf->vb.sequence /= 2;
 
-       vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+       vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
 
-       memset(vbuf, 0x10, vb2_plane_size(&buf->vb, 0));
+       memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0));
 
        if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
                vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
 
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
 }
 
 
-void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
+                       struct vivid_buffer *buf)
 {
-       struct v4l2_sliced_vbi_data *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+       struct v4l2_sliced_vbi_data *vbuf =
+                       vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
 
-       buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+       buf->vb.sequence = dev->vbi_cap_seq_count;
        if (dev->field_cap == V4L2_FIELD_ALTERNATE)
-               buf->vb.v4l2_buf.sequence /= 2;
+               buf->vb.sequence /= 2;
 
-       vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+       vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
 
-       memset(vbuf, 0, vb2_plane_size(&buf->vb, 0));
+       memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0));
        if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
                unsigned i;
 
                        vbuf[i] = dev->vbi_gen.data[i];
        }
 
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.timestamp.tv_sec += dev->time_wrap_offset;
 }
 
-static int vbi_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
-                      unsigned *nbuffers, unsigned *nplanes,
-                      unsigned sizes[], void *alloc_ctxs[])
+static int vbi_cap_queue_setup(struct vb2_queue *vq,
+                       const struct v4l2_format *fmt,
+                       unsigned *nbuffers, unsigned *nplanes,
+                       unsigned sizes[], void *alloc_ctxs[])
 {
        struct vivid_dev *dev = vb2_get_drv_priv(vq);
        bool is_60hz = dev->std_cap & V4L2_STD_525_60;
 
 static void vbi_cap_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+       struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
 
        dprintk(dev, 1, "%s\n", __func__);
 
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
        return err;
 
 
 static void vbi_out_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+       struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
 
        dprintk(dev, 1, "%s\n", __func__);
 
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
        return err;
        return 0;
 }
 
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+               struct v4l2_format *fmt)
 {
        struct vivid_dev *dev = video_drvdata(file);
        struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
        return 0;
 }
 
-void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev,
+               struct vivid_buffer *buf)
 {
-       struct v4l2_sliced_vbi_data *vbi = vb2_plane_vaddr(&buf->vb, 0);
-       unsigned elems = vb2_get_plane_payload(&buf->vb, 0) / sizeof(*vbi);
+       struct v4l2_sliced_vbi_data *vbi =
+               vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+       unsigned elems =
+               vb2_get_plane_payload(&buf->vb.vb2_buf, 0) / sizeof(*vbi);
 
        dev->vbi_out_have_cc[0] = false;
        dev->vbi_out_have_cc[1] = false;
 
                }
 
                vb2_set_plane_payload(vb, p, size);
-               vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p];
+               vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
        }
 
        return 0;
 
 static void vid_cap_buf_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct v4l2_timecode *tc = &vb->v4l2_buf.timecode;
+       struct v4l2_timecode *tc = &vbuf->timecode;
        unsigned fps = 25;
-       unsigned seq = vb->v4l2_buf.sequence;
+       unsigned seq = vbuf->sequence;
 
        if (!vivid_is_sdtv_cap(dev))
                return;
         * Set the timecode. Rarely used, so it is interesting to
         * test this.
         */
-       vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
+       vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
        if (dev->std_cap & V4L2_STD_525_60)
                fps = 30;
        tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
 
 static void vid_cap_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+       struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
 
        dprintk(dev, 1, "%s\n", __func__);
 
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
        return err;
 
 
 static int vid_out_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
        unsigned long size;
        unsigned planes;
        }
 
        if (dev->field_out != V4L2_FIELD_ALTERNATE)
-               vb->v4l2_buf.field = dev->field_out;
-       else if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
-                vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+               vbuf->field = dev->field_out;
+       else if (vbuf->field != V4L2_FIELD_TOP &&
+                vbuf->field != V4L2_FIELD_BOTTOM)
                return -EINVAL;
 
        for (p = 0; p < planes; p++) {
                size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
-                       vb->v4l2_planes[p].data_offset;
+                       vb->planes[p].data_offset;
 
                if (vb2_get_plane_payload(vb, p) < size) {
                        dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
 
 static void vid_out_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+       struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
 
        dprintk(dev, 1, "%s\n", __func__);
 
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
        return err;
 
 
        vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
                       buf->addr[0] + rpf->offsets[0]);
-       if (buf->buf.num_planes > 1)
+       if (buf->buf.vb2_buf.num_planes > 1)
                vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
                               buf->addr[1] + rpf->offsets[1]);
-       if (buf->buf.num_planes > 2)
+       if (buf->buf.vb2_buf.num_planes > 2)
                vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
                               buf->addr[2] + rpf->offsets[1]);
 }
 
 
        spin_unlock_irqrestore(&video->irqlock, flags);
 
-       done->buf.v4l2_buf.sequence = video->sequence++;
-       v4l2_get_timestamp(&done->buf.v4l2_buf.timestamp);
-       for (i = 0; i < done->buf.num_planes; ++i)
-               vb2_set_plane_payload(&done->buf, i, done->length[i]);
-       vb2_buffer_done(&done->buf, VB2_BUF_STATE_DONE);
+       done->buf.sequence = video->sequence++;
+       v4l2_get_timestamp(&done->buf.timestamp);
+       for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
+               vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]);
+       vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
 
        return next;
 }
 
 static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
-       struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+       struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
        const struct v4l2_pix_format_mplane *format = &video->format;
        unsigned int i;
 
 
 static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
        struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
-       struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+       struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
        unsigned long flags;
        bool empty;
 
        /* Remove all buffers from the IRQ queue. */
        spin_lock_irqsave(&video->irqlock, flags);
        list_for_each_entry(buffer, &video->irqqueue, queue)
-               vb2_buffer_done(&buffer->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
        INIT_LIST_HEAD(&video->irqqueue);
        spin_unlock_irqrestore(&video->irqlock, flags);
 }
 
 }
 
 struct vsp1_video_buffer {
-       struct vb2_buffer buf;
+       struct vb2_v4l2_buffer buf;
        struct list_head queue;
 
        dma_addr_t addr[3];
 };
 
 static inline struct vsp1_video_buffer *
-to_vsp1_video_buffer(struct vb2_buffer *vb)
+to_vsp1_video_buffer(struct vb2_v4l2_buffer *vbuf)
 {
-       return container_of(vb, struct vsp1_video_buffer, buf);
+       return container_of(vbuf, struct vsp1_video_buffer, buf);
 }
 
 struct vsp1_video_operations {
 
        struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video);
 
        vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]);
-       if (buf->buf.num_planes > 1)
+       if (buf->buf.vb2_buf.num_planes > 1)
                vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]);
-       if (buf->buf.num_planes > 2)
+       if (buf->buf.vb2_buf.num_planes > 2)
                vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]);
 }
 
 
  * @dma: DMA channel that uses the buffer
  */
 struct xvip_dma_buffer {
-       struct vb2_buffer buf;
+       struct vb2_v4l2_buffer buf;
        struct list_head queue;
        struct xvip_dma *dma;
 };
        list_del(&buf->queue);
        spin_unlock(&dma->queued_lock);
 
-       buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
-       buf->buf.v4l2_buf.sequence = dma->sequence++;
-       v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
-       vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage);
-       vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+       buf->buf.field = V4L2_FIELD_NONE;
+       buf->buf.sequence = dma->sequence++;
+       v4l2_get_timestamp(&buf->buf.timestamp);
+       vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+       vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 static int
 
 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
-       struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+       struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 
        buf->dma = dma;
 
 
 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
-       struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+       struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
        struct dma_async_tx_descriptor *desc;
        dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
        u32 flags;
        desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
        if (!desc) {
                dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
                return;
        }
        desc->callback = xvip_dma_complete;
        /* Give back all queued buffers to videobuf2. */
        spin_lock_irq(&dma->queued_lock);
        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
                list_del(&buf->queue);
        }
        spin_unlock_irq(&dma->queued_lock);
        /* Give back all queued buffers to videobuf2. */
        spin_lock_irq(&dma->queued_lock);
        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&buf->queue);
        }
        spin_unlock_irq(&dma->queued_lock);
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 /* AirSpy USB API commands (from AirSpy Library) */
 
 /* intermediate buffers with raw data from the USB device */
 struct airspy_frame_buf {
-       struct vb2_buffer vb;   /* common v4l buffer stuff -- must be first */
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
                }
 
                /* fill framebuffer */
-               ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+               ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
                len = airspy_convert_stream(s, ptr, urb->transfer_buffer,
                                urb->actual_length);
-               vb2_set_plane_payload(&fbuf->vb, 0, len);
-               v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
-               fbuf->vb.v4l2_buf.sequence = s->sequence++;
-               vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
+               v4l2_get_timestamp(&fbuf->vb.timestamp);
+               fbuf->vb.sequence = s->sequence++;
+               vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 skip:
        usb_submit_urb(urb, GFP_ATOMIC);
                buf = list_entry(s->queued_bufs.next,
                                struct airspy_frame_buf, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
 }
 
 static void airspy_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct airspy *s = vb2_get_drv_priv(vb->vb2_queue);
        struct airspy_frame_buf *buf =
-                       container_of(vb, struct airspy_frame_buf, vb);
+                       container_of(vbuf, struct airspy_frame_buf, vb);
        unsigned long flags;
 
        /* Check the device has not disconnected between prep and queuing */
        if (unlikely(!s->udev)) {
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                return;
        }
 
 
                list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
 
 
 static int vbi_buffer_prepare(struct vb2_buffer *vb)
 {
        struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
        unsigned long size;
 
        size = dev->vbi_width * dev->vbi_height * 2;
                        __func__, vb2_plane_size(vb, 0), size);
                return -EINVAL;
        }
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(vb, 0, size);
 
        return 0;
 }
 vbi_buffer_queue(struct vb2_buffer *vb)
 {
        struct au0828_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct au0828_buffer *buf =
+                       container_of(vbuf, struct au0828_buffer, vb);
        struct au0828_dmaqueue *vbiq = &dev->vbiq;
        unsigned long flags = 0;
 
 
                                 struct au0828_dmaqueue *dma_q,
                                 struct au0828_buffer *buf)
 {
-       struct vb2_buffer *vb = &buf->vb;
-       struct vb2_queue *q = vb->vb2_queue;
+       struct vb2_v4l2_buffer *vb = &buf->vb;
+       struct vb2_queue *q = vb->vb2_buf.vb2_queue;
 
        /* Advice that buffer was filled */
        au0828_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
 
        if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
-               vb->v4l2_buf.sequence = dev->frame_count++;
+               vb->sequence = dev->frame_count++;
        else
-               vb->v4l2_buf.sequence = dev->vbi_frame_count++;
+               vb->sequence = dev->vbi_frame_count++;
 
-       vb->v4l2_buf.field = V4L2_FIELD_INTERLACED;
-       v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
-       vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+       vb->field = V4L2_FIELD_INTERLACED;
+       v4l2_get_timestamp(&vb->timestamp);
+       vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 /*
 
        buf = dev->isoc_ctl.buf;
        if (buf != NULL)
-               outp = vb2_plane_vaddr(&buf->vb, 0);
+               outp = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
 
        vbi_buf = dev->isoc_ctl.vbi_buf;
        if (vbi_buf != NULL)
-               vbioutp = vb2_plane_vaddr(&vbi_buf->vb, 0);
+               vbioutp = vb2_plane_vaddr(&vbi_buf->vb.vb2_buf, 0);
 
        for (i = 0; i < urb->number_of_packets; i++) {
                int status = urb->iso_frame_desc[i].status;
                                        vbioutp = NULL;
                                else
                                        vbioutp = vb2_plane_vaddr(
-                                               &vbi_buf->vb, 0);
+                                               &vbi_buf->vb.vb2_buf, 0);
 
                                /* Video */
                                if (buf != NULL)
                                if (buf == NULL)
                                        outp = NULL;
                                else
-                                       outp = vb2_plane_vaddr(&buf->vb, 0);
+                                       outp = vb2_plane_vaddr(
+                                               &buf->vb.vb2_buf, 0);
 
                                /* As long as isoc traffic is arriving, keep
                                   resetting the timer */
 static int
 buffer_prepare(struct vb2_buffer *vb)
 {
-       struct au0828_buffer *buf = container_of(vb, struct au0828_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct au0828_buffer *buf = container_of(vbuf,
+                               struct au0828_buffer, vb);
        struct au0828_dev    *dev = vb2_get_drv_priv(vb->vb2_queue);
 
        buf->length = dev->height * dev->bytesperline;
                        __func__, vb2_plane_size(vb, 0), buf->length);
                return -EINVAL;
        }
-       vb2_set_plane_payload(&buf->vb, 0, buf->length);
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->length);
        return 0;
 }
 
 static void
 buffer_queue(struct vb2_buffer *vb)
 {
-       struct au0828_buffer    *buf     = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct au0828_buffer    *buf     = container_of(vbuf,
                                                        struct au0828_buffer,
                                                        vb);
        struct au0828_dev       *dev     = vb2_get_drv_priv(vb->vb2_queue);
 
        spin_lock_irqsave(&dev->slock, flags);
        if (dev->isoc_ctl.buf != NULL) {
-               vb2_buffer_done(&dev->isoc_ctl.buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&dev->isoc_ctl.buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
                dev->isoc_ctl.buf = NULL;
        }
        while (!list_empty(&vidq->active)) {
                struct au0828_buffer *buf;
 
                buf = list_entry(vidq->active.next, struct au0828_buffer, list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&buf->list);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 
        spin_lock_irqsave(&dev->slock, flags);
        if (dev->isoc_ctl.vbi_buf != NULL) {
-               vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb,
+               vb2_buffer_done(&dev->isoc_ctl.vbi_buf->vb.vb2_buf,
                                VB2_BUF_STATE_ERROR);
                dev->isoc_ctl.vbi_buf = NULL;
        }
 
                buf = list_entry(vbiq->active.next, struct au0828_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 
 
        buf = dev->isoc_ctl.buf;
        if (buf != NULL) {
-               vid_data = vb2_plane_vaddr(&buf->vb, 0);
+               vid_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
                memset(vid_data, 0x00, buf->length); /* Blank green frame */
                buffer_filled(dev, dma_q, buf);
        }
 
        buf = dev->isoc_ctl.vbi_buf;
        if (buf != NULL) {
-               vbi_data = vb2_plane_vaddr(&buf->vb, 0);
+               vbi_data = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
                memset(vbi_data, 0x00, buf->length);
                buffer_filled(dev, dma_q, buf);
        }
 
 
 /* Analog */
 #include <linux/videodev2.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
 /* buffer for one video frame */
 struct au0828_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 
        void *mem;
 
 {
        struct em28xx        *dev  = vb2_get_drv_priv(vb->vb2_queue);
        struct em28xx_v4l2   *v4l2 = dev->v4l2;
-       struct em28xx_buffer *buf  = container_of(vb, struct em28xx_buffer, vb);
        unsigned long        size;
 
        size = v4l2->vbi_width * v4l2->vbi_height * 2;
                       __func__, vb2_plane_size(vb, 0), size);
                return -EINVAL;
        }
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(vb, 0, size);
 
        return 0;
 }
 static void
 vbi_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+       struct em28xx_buffer *buf =
+               container_of(vbuf, struct em28xx_buffer, vb);
        struct em28xx_dmaqueue *vbiq = &dev->vbiq;
        unsigned long flags = 0;
 
 
 {
        em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
 
-       buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++;
+       buf->vb.sequence = dev->v4l2->field_count++;
        if (dev->v4l2->progressive)
-               buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+               buf->vb.field = V4L2_FIELD_NONE;
        else
-               buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+               buf->vb.field = V4L2_FIELD_INTERLACED;
+       v4l2_get_timestamp(&buf->vb.timestamp);
 
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 /*
 static int
 buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct em28xx        *dev = vb2_get_drv_priv(vb->vb2_queue);
        struct em28xx_v4l2   *v4l2 = dev->v4l2;
-       struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
        unsigned long size;
 
-       em28xx_videodbg("%s, field=%d\n", __func__, vb->v4l2_buf.field);
+       em28xx_videodbg("%s, field=%d\n", __func__, vbuf->field);
 
        size = (v4l2->width * v4l2->height * v4l2->format->depth + 7) >> 3;
 
                                __func__, vb2_plane_size(vb, 0), size);
                return -EINVAL;
        }
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(vb, 0, size);
 
        return 0;
 }
        struct em28xx *dev = vb2_get_drv_priv(vq);
        struct em28xx_v4l2 *v4l2 = dev->v4l2;
        struct v4l2_frequency f;
+       struct v4l2_fh *owner;
        int rc = 0;
 
        em28xx_videodbg("%s\n", __func__);
                /* Ask tuner to go to analog or radio mode */
                memset(&f, 0, sizeof(f));
                f.frequency = v4l2->frequency;
-               if (vq->owner && vq->owner->vdev->vfl_type == VFL_TYPE_RADIO)
+               owner = (struct v4l2_fh *)vq->owner;
+               if (owner && owner->vdev->vfl_type == VFL_TYPE_RADIO)
                        f.type = V4L2_TUNER_RADIO;
                else
                        f.type = V4L2_TUNER_ANALOG_TV;
 
        spin_lock_irqsave(&dev->slock, flags);
        if (dev->usb_ctl.vid_buf != NULL) {
-               vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&dev->usb_ctl.vid_buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
                dev->usb_ctl.vid_buf = NULL;
        }
        while (!list_empty(&vidq->active)) {
 
                buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
        spin_lock_irqsave(&dev->slock, flags);
        if (dev->usb_ctl.vbi_buf != NULL) {
-               vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
                dev->usb_ctl.vbi_buf = NULL;
        }
        while (!list_empty(&vbiq->active)) {
 
                buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 static void
 buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
+       struct em28xx_buffer *buf =
+               container_of(vbuf, struct em28xx_buffer, vb);
        struct em28xx_dmaqueue *vidq = &dev->vidq;
        unsigned long flags = 0;
 
 
 #include <linux/kref.h>
 #include <linux/videodev2.h>
 
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
 /* buffer for one video frame */
 struct em28xx_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 
        void *mem;
 
  */
 static inline void store_byte(struct go7007_buffer *vb, u8 byte)
 {
-       if (vb && vb->vb.v4l2_planes[0].bytesused < GO7007_BUF_SIZE) {
-               u8 *ptr = vb2_plane_vaddr(&vb->vb, 0);
+       if (vb && vb->vb.vb2_buf.planes[0].bytesused < GO7007_BUF_SIZE) {
+               u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
 
-               ptr[vb->vb.v4l2_planes[0].bytesused++] = byte;
+               ptr[vb->vb.vb2_buf.planes[0].bytesused++] = byte;
        }
 }
 
                        .type = V4L2_EVENT_MOTION_DET,
                        .u.motion_det = {
                                .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
-                               .frame_sequence = vb->vb.v4l2_buf.sequence,
+                               .frame_sequence = vb->vb.sequence,
                                .region_mask = motion_regions,
                        },
                };
  */
 static void go7007_motion_regions(struct go7007 *go, struct go7007_buffer *vb)
 {
-       u32 *bytesused = &vb->vb.v4l2_planes[0].bytesused;
+       u32 *bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
        unsigned motion[4] = { 0, 0, 0, 0 };
        u32 motion_regions = 0;
        unsigned stride = (go->width + 7) >> 3;
                go->next_seq++;
                return vb;
        }
-       bytesused = &vb->vb.v4l2_planes[0].bytesused;
+       bytesused = &vb->vb.vb2_buf.planes[0].bytesused;
 
-       vb->vb.v4l2_buf.sequence = go->next_seq++;
+       vb->vb.sequence = go->next_seq++;
        if (vb->modet_active && *bytesused + 216 < GO7007_BUF_SIZE)
                go7007_motion_regions(go, vb);
        else
                go7007_set_motion_regions(go, vb, 0);
 
-       v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
+       v4l2_get_timestamp(&vb->vb.timestamp);
        vb_tmp = vb;
        spin_lock(&go->spinlock);
        list_del(&vb->list);
        if (list_empty(&go->vidq_active))
                vb = NULL;
        else
-               vb = list_first_entry(&go->vidq_active, struct go7007_buffer, list);
+               vb = list_first_entry(&go->vidq_active,
+                               struct go7007_buffer, list);
        go->active_buf = vb;
        spin_unlock(&go->spinlock);
-       vb2_buffer_done(&vb_tmp->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&vb_tmp->vb.vb2_buf, VB2_BUF_STATE_DONE);
        return vb;
 }
 
        }
 
        for (i = 0; i < length; ++i) {
-               if (vb && vb->vb.v4l2_planes[0].bytesused >= GO7007_BUF_SIZE - 3) {
+               if (vb && vb->vb.vb2_buf.planes[0].bytesused >=
+                               GO7007_BUF_SIZE - 3) {
                        v4l2_info(&go->v4l2_dev, "dropping oversized frame\n");
-                       vb->vb.v4l2_planes[0].bytesused = 0;
+                       vb->vb.vb2_buf.planes[0].bytesused = 0;
                        vb->frame_offset = 0;
                        vb->modet_active = 0;
                        vb = go->active_buf = NULL;
                                        vb = frame_boundary(go, vb);
                                go->seen_frame = buf[i] == frame_start_code;
                                if (vb && go->seen_frame)
-                                       vb->frame_offset = vb->vb.v4l2_planes[0].bytesused;
+                                       vb->frame_offset =
+                                       vb->vb.vb2_buf.planes[0].bytesused;
                        }
                        /* Handle any special chunk types, or just write the
                         * start code to the (potentially new) buffer */
 
 #define        GO7007_BUF_SIZE         (GO7007_BUF_PAGES << PAGE_SHIFT)
 
 struct go7007_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
        unsigned int frame_offset;
        u32 modet_active;
 
 
 static u32 get_frame_type_flag(struct go7007_buffer *vb, int format)
 {
-       u8 *ptr = vb2_plane_vaddr(&vb->vb, 0);
+       u8 *ptr = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
 
        switch (format) {
        case V4L2_PIX_FMT_MJPEG:
 {
        struct vb2_queue *vq = vb->vb2_queue;
        struct go7007 *go = vb2_get_drv_priv(vq);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct go7007_buffer *go7007_vb =
-               container_of(vb, struct go7007_buffer, vb);
+               container_of(vbuf, struct go7007_buffer, vb);
        unsigned long flags;
 
        spin_lock_irqsave(&go->spinlock, flags);
 
 static int go7007_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct go7007_buffer *go7007_vb =
-               container_of(vb, struct go7007_buffer, vb);
+               container_of(vbuf, struct go7007_buffer, vb);
 
        go7007_vb->modet_active = 0;
        go7007_vb->frame_offset = 0;
-       vb->v4l2_planes[0].bytesused = 0;
+       vb->planes[0].bytesused = 0;
        return 0;
 }
 
 {
        struct vb2_queue *vq = vb->vb2_queue;
        struct go7007 *go = vb2_get_drv_priv(vq);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct go7007_buffer *go7007_vb =
-               container_of(vb, struct go7007_buffer, vb);
+               container_of(vbuf, struct go7007_buffer, vb);
        u32 frame_type_flag = get_frame_type_flag(go7007_vb, go->format);
-       struct v4l2_buffer *buf = &vb->v4l2_buf;
 
-       buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME |
+       vbuf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_BFRAME |
                        V4L2_BUF_FLAG_PFRAME);
-       buf->flags |= frame_type_flag;
-       buf->field = V4L2_FIELD_NONE;
+       vbuf->flags |= frame_type_flag;
+       vbuf->field = V4L2_FIELD_NONE;
 }
 
 static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 /* HackRF USB API commands (from HackRF Library) */
 
 /* intermediate buffers with raw data from the USB device */
 struct hackrf_frame_buf {
-       struct vb2_buffer vb;   /* common v4l buffer stuff -- must be first */
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
                }
 
                /* fill framebuffer */
-               ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+               ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
                len = hackrf_convert_stream(dev, ptr, urb->transfer_buffer,
                                urb->actual_length);
-               vb2_set_plane_payload(&fbuf->vb, 0, len);
-               v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
-               fbuf->vb.v4l2_buf.sequence = dev->sequence++;
-               vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len);
+               v4l2_get_timestamp(&fbuf->vb.timestamp);
+               fbuf->vb.sequence = dev->sequence++;
+               vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 skip:
        usb_submit_urb(urb, GFP_ATOMIC);
                buf = list_entry(dev->queued_bufs.next,
                                struct hackrf_frame_buf, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
 }
 
 static void hackrf_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct hackrf_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
        struct hackrf_frame_buf *buf =
-                       container_of(vb, struct hackrf_frame_buf, vb);
+                       container_of(vbuf, struct hackrf_frame_buf, vb);
        unsigned long flags;
 
        spin_lock_irqsave(&dev->queued_bufs_lock, flags);
 
                list_for_each_entry_safe(buf, tmp, &dev->queued_bufs, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
        }
 
 
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
 #include <linux/usb.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 #include <linux/spi/spi.h>
 
 
 /* intermediate buffers with raw data from the USB device */
 struct msi2500_frame_buf {
-       struct vb2_buffer vb;   /* common v4l buffer stuff -- must be first */
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
                }
 
                /* fill framebuffer */
-               ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+               ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
                flen = msi2500_convert_stream(dev, ptr, iso_buf, flen);
-               vb2_set_plane_payload(&fbuf->vb, 0, flen);
-               vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+               vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, flen);
+               vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        }
 
 handler_end:
                buf = list_entry(dev->queued_bufs.next,
                                 struct msi2500_frame_buf, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
 }
 
 static void msi2500_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct msi2500_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-       struct msi2500_frame_buf *buf = container_of(vb,
+       struct msi2500_frame_buf *buf = container_of(vbuf,
                                                     struct msi2500_frame_buf,
                                                     vb);
        unsigned long flags;
 
        /* Check the device has not disconnected between prep and queuing */
        if (unlikely(!dev->udev)) {
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                return;
        }
 
 
                        PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
                                       " discarded.\n", fbuf->filled);
                } else {
-                       fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
-                       fbuf->vb.v4l2_buf.sequence = pdev->vframe_count;
-                       vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+                       fbuf->vb.field = V4L2_FIELD_NONE;
+                       fbuf->vb.sequence = pdev->vframe_count;
+                       vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
                        pdev->fill_buf = NULL;
                        pdev->vsync = 0;
                }
                {
                        PWC_ERROR("Too many ISOC errors, bailing out.\n");
                        if (pdev->fill_buf) {
-                               vb2_buffer_done(&pdev->fill_buf->vb,
+                               vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
                                                VB2_BUF_STATE_ERROR);
                                pdev->fill_buf = NULL;
                        }
 
                        if (pdev->vsync == 1) {
                                v4l2_get_timestamp(
-                                       &fbuf->vb.v4l2_buf.timestamp);
+                                       &fbuf->vb.timestamp);
                                pdev->vsync = 2;
                        }
 
                buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
                                 list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, state);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
        }
        spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
 }
 
 static int buffer_init(struct vb2_buffer *vb)
 {
-       struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct pwc_frame_buf *buf =
+               container_of(vbuf, struct pwc_frame_buf, vb);
 
        /* need vmalloc since frame buffer > 128K */
        buf->data = vzalloc(PWC_FRAME_SIZE);
 static void buffer_finish(struct vb2_buffer *vb)
 {
        struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
-       struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct pwc_frame_buf *buf =
+               container_of(vbuf, struct pwc_frame_buf, vb);
 
        if (vb->state == VB2_BUF_STATE_DONE) {
                /*
 
 static void buffer_cleanup(struct vb2_buffer *vb)
 {
-       struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct pwc_frame_buf *buf =
+               container_of(vbuf, struct pwc_frame_buf, vb);
 
        vfree(buf->data);
 }
 static void buffer_queue(struct vb2_buffer *vb)
 {
        struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
-       struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct pwc_frame_buf *buf =
+               container_of(vbuf, struct pwc_frame_buf, vb);
        unsigned long flags = 0;
 
        /* Check the device has not disconnected between prep and queuing */
        if (!pdev->udev) {
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
                return;
        }
 
 
        pwc_cleanup_queued_bufs(pdev, VB2_BUF_STATE_ERROR);
        if (pdev->fill_buf)
-               vb2_buffer_done(&pdev->fill_buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&pdev->fill_buf->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        mutex_unlock(&pdev->v4l2_lock);
 }
 
 
        u16 *src;
        u16 *dsty, *dstu, *dstv;
 
-       image = vb2_plane_vaddr(&fbuf->vb, 0);
+       image = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0);
 
        yuv = fbuf->data + pdev->frame_header_size;  /* Skip header */
 
                         * determine this using the type of the webcam */
                memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
                memcpy(raw_frame+1, yuv, pdev->frame_size);
-               vb2_set_plane_payload(&fbuf->vb, 0,
+               vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
                        pdev->frame_size + sizeof(struct pwc_raw_frame));
                return 0;
        }
 
-       vb2_set_plane_payload(&fbuf->vb, 0,
+       vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0,
                              pdev->width * pdev->height * 3 / 2);
 
        if (pdev->vbandlength == 0) {
 
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 #ifdef CONFIG_USB_PWC_INPUT_EVDEV
 #include <linux/input.h>
 /* intermediate buffers with raw data from the USB cam */
 struct pwc_frame_buf
 {
-       struct vb2_buffer vb;   /* common v4l buffer stuff -- must be first */
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
        void *data;
        int filled;             /* number of bytes filled */
 
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/usb.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-device.h>
 /* buffer for one video frame */
 struct s2255_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
        buf = list_entry(vc->buf_list.next,
                         struct s2255_buffer, list);
        list_del(&buf->list);
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-       buf->vb.v4l2_buf.field = vc->field;
-       buf->vb.v4l2_buf.sequence = vc->frame_count;
+       v4l2_get_timestamp(&buf->vb.timestamp);
+       buf->vb.field = vc->field;
+       buf->vb.sequence = vc->frame_count;
        spin_unlock_irqrestore(&vc->qlock, flags);
 
        s2255_fillbuff(vc, buf, jpgsize);
        /* tell v4l buffer was filled */
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
        dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
 }
 
 {
        int pos = 0;
        const char *tmpbuf;
-       char *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+       char *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
        unsigned long last_frame;
        struct s2255_dev *dev = vc->dev;
 
                        break;
                case V4L2_PIX_FMT_JPEG:
                case V4L2_PIX_FMT_MJPEG:
-                       vb2_set_plane_payload(&buf->vb, 0, jpgsize);
+                       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, jpgsize);
                        memcpy(vbuf, tmpbuf, jpgsize);
                        break;
                case V4L2_PIX_FMT_YUV422P:
 static int buffer_prepare(struct vb2_buffer *vb)
 {
        struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
-       struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
        int w = vc->width;
        int h = vc->height;
        unsigned long size;
                return -EINVAL;
        }
 
-       vb2_set_plane_payload(&buf->vb, 0, size);
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
        return 0;
 }
 
 static void buffer_queue(struct vb2_buffer *vb)
 {
-       struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb);
        struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue);
        unsigned long flags = 0;
        dprintk(vc->dev, 1, "%s\n", __func__);
        spin_lock_irqsave(&vc->qlock, flags);
        list_for_each_entry_safe(buf, node, &vc->buf_list, list) {
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                dprintk(vc->dev, 2, "[%p/%d] done\n",
-                       buf, buf->vb.v4l2_buf.index);
+                       buf, buf->vb.vb2_buf.index);
        }
        spin_unlock_irqrestore(&vc->qlock, flags);
 }
 
 {
        unsigned long flags;
        struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct stk1160_buffer *buf =
-               container_of(vb, struct stk1160_buffer, vb);
+               container_of(vbuf, struct stk1160_buffer, vb);
 
        spin_lock_irqsave(&dev->buf_lock, flags);
        if (!dev->udev) {
                 * If the device is disconnected return the buffer to userspace
                 * directly. The next QBUF call will fail with -ENODEV.
                 */
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        } else {
 
                buf->mem = vb2_plane_vaddr(vb, 0);
                 * the buffer to userspace directly.
                 */
                if (buf->length < dev->width * dev->height * 2)
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                else
                        list_add_tail(&buf->list, &dev->avail_bufs);
 
                buf = list_first_entry(&dev->avail_bufs,
                        struct stk1160_buffer, list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                stk1160_dbg("buffer [%p/%d] aborted\n",
-                           buf, buf->vb.v4l2_buf.index);
+                           buf, buf->vb.vb2_buf.index);
        }
 
        /* It's important to release the current buffer */
                buf = dev->isoc_ctl.buf;
                dev->isoc_ctl.buf = NULL;
 
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                stk1160_dbg("buffer [%p/%d] aborted\n",
-                           buf, buf->vb.v4l2_buf.index);
+                           buf, buf->vb.vb2_buf.index);
        }
        spin_unlock_irqrestore(&dev->buf_lock, flags);
 }
 
 {
        struct stk1160_buffer *buf = dev->isoc_ctl.buf;
 
-       buf->vb.v4l2_buf.sequence = dev->sequence++;
-       buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
-       buf->vb.v4l2_buf.bytesused = buf->bytesused;
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+       buf->vb.sequence = dev->sequence++;
+       buf->vb.field = V4L2_FIELD_INTERLACED;
+       buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
+       v4l2_get_timestamp(&buf->vb.timestamp);
 
-       vb2_set_plane_payload(&buf->vb, 0, buf->bytesused);
-       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 
        dev->isoc_ctl.buf = NULL;
 }
 
 /* Buffer for one video frame */
 struct stk1160_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 
        void *mem;
 
 
        /* First available buffer. */
        buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
-       frame = vb2_plane_vaddr(&buf->vb, 0);
+       frame = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
 
        /* Copy the chunk data. */
        usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
 
        /* Last chunk in a frame, signalling an end */
        if (odd && chunk_no == usbtv->n_chunks-1) {
-               int size = vb2_plane_size(&buf->vb, 0);
+               int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
                enum vb2_buffer_state state = usbtv->chunks_done ==
                                                usbtv->n_chunks ?
                                                VB2_BUF_STATE_DONE :
                                                VB2_BUF_STATE_ERROR;
 
-               buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
-               buf->vb.v4l2_buf.sequence = usbtv->sequence++;
-               v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-               vb2_set_plane_payload(&buf->vb, 0, size);
-               vb2_buffer_done(&buf->vb, state);
+               buf->vb.field = V4L2_FIELD_INTERLACED;
+               buf->vb.sequence = usbtv->sequence++;
+               v4l2_get_timestamp(&buf->vb.timestamp);
+               vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
                list_del(&buf->list);
        }
 
        while (!list_empty(&usbtv->bufs)) {
                struct usbtv_buf *buf = list_first_entry(&usbtv->bufs,
                                                struct usbtv_buf, list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
                list_del(&buf->list);
        }
        spin_unlock_irqrestore(&usbtv->buflock, flags);
 
 static void usbtv_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct usbtv *usbtv = vb2_get_drv_priv(vb->vb2_queue);
-       struct usbtv_buf *buf = container_of(vb, struct usbtv_buf, vb);
+       struct usbtv_buf *buf = container_of(vbuf, struct usbtv_buf, vb);
        unsigned long flags;
 
        if (usbtv->udev == NULL) {
 
 #include <linux/usb.h>
 
 #include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 /* Hardware. */
 
 /* A single videobuf2 frame buffer. */
 struct usbtv_buf {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 #include <linux/videodev2.h>
 #include <linux/vmalloc.h>
 #include <linux/wait.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
 #include "uvcvideo.h"
                                                          queue);
                list_del(&buf->queue);
                buf->state = state;
-               vb2_buffer_done(&buf->buf, vb2_state);
+               vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
        }
 }
 
 
 static int uvc_buffer_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
-       struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+       struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
 
-       if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+       if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
            vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
                uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
                return -EINVAL;
        buf->error = 0;
        buf->mem = vb2_plane_vaddr(vb, 0);
        buf->length = vb2_plane_size(vb, 0);
-       if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+       if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
                buf->bytesused = 0;
        else
                buf->bytesused = vb2_get_plane_payload(vb, 0);
 
 static void uvc_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
-       struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+       struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
        unsigned long flags;
 
        spin_lock_irqsave(&queue->irqlock, flags);
                 * directly. The next QBUF call will fail with -ENODEV.
                 */
                buf->state = UVC_BUF_STATE_ERROR;
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&queue->irqlock, flags);
 
 static void uvc_buffer_finish(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
        struct uvc_streaming *stream = uvc_queue_to_stream(queue);
-       struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+       struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
 
        if (vb->state == VB2_BUF_STATE_DONE)
-               uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
+               uvc_video_clock_update(stream, vbuf, buf);
 }
 
 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
                buf->error = 0;
                buf->state = UVC_BUF_STATE_QUEUED;
                buf->bytesused = 0;
-               vb2_set_plane_payload(&buf->buf, 0, 0);
+               vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
                return buf;
        }
 
        spin_unlock_irqrestore(&queue->irqlock, flags);
 
        buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
-       vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
-       vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+       vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+       vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 
        return nextbuf;
 }
 
  * timestamp of the sliding window to 1s.
  */
 void uvc_video_clock_update(struct uvc_streaming *stream,
-                           struct v4l2_buffer *v4l2_buf,
+                           struct vb2_v4l2_buffer *vbuf,
                            struct uvc_buffer *buf)
 {
        struct uvc_clock *clock = &stream->clock;
                  stream->dev->name,
                  sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
                  y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
-                 v4l2_buf->timestamp.tv_sec,
-                 (unsigned long)v4l2_buf->timestamp.tv_usec,
+                 vbuf->timestamp.tv_sec,
+                 (unsigned long)vbuf->timestamp.tv_usec,
                  x1, first->host_sof, first->dev_sof,
                  x2, last->host_sof, last->dev_sof, y1, y2);
 
        /* Update the V4L2 buffer. */
-       v4l2_buf->timestamp.tv_sec = ts.tv_sec;
-       v4l2_buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+       vbuf->timestamp.tv_sec = ts.tv_sec;
+       vbuf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
 
 done:
        spin_unlock_irqrestore(&stream->clock.lock, flags);
 
                uvc_video_get_ts(&ts);
 
-               buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
-               buf->buf.v4l2_buf.sequence = stream->sequence;
-               buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
-               buf->buf.v4l2_buf.timestamp.tv_usec =
+               buf->buf.field = V4L2_FIELD_NONE;
+               buf->buf.sequence = stream->sequence;
+               buf->buf.timestamp.tv_sec = ts.tv_sec;
+               buf->buf.timestamp.tv_usec =
                        ts.tv_nsec / NSEC_PER_USEC;
 
                /* TODO: Handle PTS and SCR. */
                if (buf->bytesused == stream->queue.buf_used) {
                        stream->queue.buf_used = 0;
                        buf->state = UVC_BUF_STATE_READY;
-                       buf->buf.v4l2_buf.sequence = ++stream->sequence;
+                       buf->buf.sequence = ++stream->sequence;
                        uvc_queue_next_buffer(&stream->queue, buf);
                        stream->last_fid ^= UVC_STREAM_FID;
                }
 
 };
 
 struct uvc_buffer {
-       struct vb2_buffer buf;
+       struct vb2_v4l2_buffer buf;
        struct list_head queue;
 
        enum uvc_buffer_state state;
 extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
                __u8 intfnum, __u8 cs, void *data, __u16 size);
 void uvc_video_clock_update(struct uvc_streaming *stream,
-                           struct v4l2_buffer *v4l2_buf,
+                           struct vb2_v4l2_buffer *vbuf,
                            struct uvc_buffer *buf);
 
 /* Status */
 
  *
  * Call from buf_queue(), videobuf_queue_ops callback.
  */
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+               struct vb2_v4l2_buffer *vbuf)
 {
-       struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
+       struct v4l2_m2m_buffer *b = container_of(vbuf,
+                               struct v4l2_m2m_buffer, vb);
        struct v4l2_m2m_queue_ctx *q_ctx;
        unsigned long flags;
 
-       q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
+       q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
        if (!q_ctx)
                return;
 
 
 
 #define log_memop(vb, op)                                              \
        dprintk(2, "call_memop(%p, %d, %s)%s\n",                        \
-               (vb)->vb2_queue, (vb)->v4l2_buf.index, #op,             \
+               (vb)->vb2_queue, (vb)->index, #op,                      \
                (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
 
 #define call_memop(vb, op, args...)                                    \
 
 #define log_vb_qop(vb, op, args...)                                    \
        dprintk(2, "call_vb_qop(%p, %d, %s)%s\n",                       \
-               (vb)->vb2_queue, (vb)->v4l2_buf.index, #op,             \
+               (vb)->vb2_queue, (vb)->index, #op,                      \
                (vb)->vb2_queue->ops->op ? "" : " (nop)")
 
 #define call_vb_qop(vb, op, args...)                                   \
 
                /* Associate allocator private data with this plane */
                vb->planes[plane].mem_priv = mem_priv;
-               vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+               vb->planes[plane].length = q->plane_sizes[plane];
        }
 
        return 0;
        for (plane = 0; plane < vb->num_planes; ++plane) {
                call_void_memop(vb, put, vb->planes[plane].mem_priv);
                vb->planes[plane].mem_priv = NULL;
-               dprintk(3, "freed plane %d of buffer %d\n", plane,
-                       vb->v4l2_buf.index);
+               dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
        }
 }
 
 
        call_void_memop(vb, detach_dmabuf, p->mem_priv);
        dma_buf_put(p->dbuf);
-       memset(p, 0, sizeof(*p));
+       p->mem_priv = NULL;
+       p->dbuf = NULL;
+       p->dbuf_mapped = 0;
 }
 
 /**
                        continue;
 
                for (plane = 0; plane < vb->num_planes; ++plane)
-                       vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+                       vb->planes[plane].length = q->plane_sizes[plane];
        }
 }
 
        unsigned long off;
 
        if (q->num_buffers) {
-               struct v4l2_plane *p;
+               struct vb2_plane *p;
                vb = q->bufs[q->num_buffers - 1];
-               p = &vb->v4l2_planes[vb->num_planes - 1];
-               off = PAGE_ALIGN(p->m.mem_offset + p->length);
+               p = &vb->planes[vb->num_planes - 1];
+               off = PAGE_ALIGN(p->m.offset + p->length);
        } else {
                off = 0;
        }
                        continue;
 
                for (plane = 0; plane < vb->num_planes; ++plane) {
-                       vb->v4l2_planes[plane].m.mem_offset = off;
+                       vb->planes[plane].m.offset = off;
 
                        dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
                                        buffer, plane, off);
 
-                       off += vb->v4l2_planes[plane].length;
+                       off += vb->planes[plane].length;
                        off = PAGE_ALIGN(off);
                }
        }
                        break;
                }
 
-               /* Length stores number of planes for multiplanar buffers */
-               if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
-                       vb->v4l2_buf.length = num_planes;
-
                vb->state = VB2_BUF_STATE_DEQUEUED;
                vb->vb2_queue = q;
                vb->num_planes = num_planes;
-               vb->v4l2_buf.index = q->num_buffers + buffer;
-               vb->v4l2_buf.type = q->type;
-               vb->v4l2_buf.memory = memory;
+               vb->index = q->num_buffers + buffer;
+               vb->type = q->type;
+               vb->memory = memory;
 
                /* Allocate video buffer memory for the MMAP type */
                if (memory == V4L2_MEMORY_MMAP) {
                        length = (b->memory == V4L2_MEMORY_USERPTR ||
                                  b->memory == V4L2_MEMORY_DMABUF)
                               ? b->m.planes[plane].length
-                              : vb->v4l2_planes[plane].length;
+                               : vb->planes[plane].length;
                        bytesused = b->m.planes[plane].bytesused
                                  ? b->m.planes[plane].bytesused : length;
 
                }
        } else {
                length = (b->memory == V4L2_MEMORY_USERPTR)
-                      ? b->length : vb->v4l2_planes[0].length;
-               bytesused = b->bytesused ? b->bytesused : length;
+                       ? b->length : vb->planes[0].length;
 
                if (b->bytesused > length)
                        return -EINVAL;
  */
 static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *q = vb->vb2_queue;
+       unsigned int plane;
 
        /* Copy back data such as timestamp, flags, etc. */
-       memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
-       b->reserved2 = vb->v4l2_buf.reserved2;
-       b->reserved = vb->v4l2_buf.reserved;
+       b->index = vb->index;
+       b->type = vb->type;
+       b->memory = vb->memory;
+       b->bytesused = 0;
+
+       b->flags = vbuf->flags;
+       b->field = vbuf->field;
+       b->timestamp = vbuf->timestamp;
+       b->timecode = vbuf->timecode;
+       b->sequence = vbuf->sequence;
+       b->reserved2 = 0;
+       b->reserved = 0;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
                /*
                 * for it. The caller has already verified memory and size.
                 */
                b->length = vb->num_planes;
-               memcpy(b->m.planes, vb->v4l2_planes,
-                       b->length * sizeof(struct v4l2_plane));
+               for (plane = 0; plane < vb->num_planes; ++plane) {
+                       struct v4l2_plane *pdst = &b->m.planes[plane];
+                       struct vb2_plane *psrc = &vb->planes[plane];
+
+                       pdst->bytesused = psrc->bytesused;
+                       pdst->length = psrc->length;
+                       if (q->memory == V4L2_MEMORY_MMAP)
+                               pdst->m.mem_offset = psrc->m.offset;
+                       else if (q->memory == V4L2_MEMORY_USERPTR)
+                               pdst->m.userptr = psrc->m.userptr;
+                       else if (q->memory == V4L2_MEMORY_DMABUF)
+                               pdst->m.fd = psrc->m.fd;
+                       pdst->data_offset = psrc->data_offset;
+                       memset(pdst->reserved, 0, sizeof(pdst->reserved));
+               }
        } else {
                /*
                 * We use length and offset in v4l2_planes array even for
                 * single-planar buffers, but userspace does not.
                 */
-               b->length = vb->v4l2_planes[0].length;
-               b->bytesused = vb->v4l2_planes[0].bytesused;
+               b->length = vb->planes[0].length;
+               b->bytesused = vb->planes[0].bytesused;
                if (q->memory == V4L2_MEMORY_MMAP)
-                       b->m.offset = vb->v4l2_planes[0].m.mem_offset;
+                       b->m.offset = vb->planes[0].m.offset;
                else if (q->memory == V4L2_MEMORY_USERPTR)
-                       b->m.userptr = vb->v4l2_planes[0].m.userptr;
+                       b->m.userptr = vb->planes[0].m.userptr;
                else if (q->memory == V4L2_MEMORY_DMABUF)
-                       b->m.fd = vb->v4l2_planes[0].m.fd;
+                       b->m.fd = vb->planes[0].m.fd;
        }
 
        /*
        vb->cnt_buf_done++;
 #endif
        dprintk(4, "done processing on buffer %d, state: %d\n",
-                       vb->v4l2_buf.index, state);
+                       vb->index, state);
 
        /* sync buffers */
        for (plane = 0; plane < vb->num_planes; ++plane)
  * v4l2_buffer by the userspace. The caller has already verified that struct
  * v4l2_buffer has a valid number of planes.
  */
-static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
-                               struct v4l2_plane *v4l2_planes)
+static void __fill_vb2_buffer(struct vb2_buffer *vb,
+               const struct v4l2_buffer *b, struct vb2_plane *planes)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        unsigned int plane;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
                if (b->memory == V4L2_MEMORY_USERPTR) {
                        for (plane = 0; plane < vb->num_planes; ++plane) {
-                               v4l2_planes[plane].m.userptr =
+                               planes[plane].m.userptr =
                                        b->m.planes[plane].m.userptr;
-                               v4l2_planes[plane].length =
+                               planes[plane].length =
                                        b->m.planes[plane].length;
                        }
                }
                if (b->memory == V4L2_MEMORY_DMABUF) {
                        for (plane = 0; plane < vb->num_planes; ++plane) {
-                               v4l2_planes[plane].m.fd =
+                               planes[plane].m.fd =
                                        b->m.planes[plane].m.fd;
-                               v4l2_planes[plane].length =
+                               planes[plane].length =
                                        b->m.planes[plane].length;
                        }
                }
                         * applications working.
                         */
                        for (plane = 0; plane < vb->num_planes; ++plane) {
-                               struct v4l2_plane *pdst = &v4l2_planes[plane];
+                               struct vb2_plane *pdst = &planes[plane];
                                struct v4l2_plane *psrc = &b->m.planes[plane];
 
                                if (psrc->bytesused == 0)
                 * old userspace applications working.
                 */
                if (b->memory == V4L2_MEMORY_USERPTR) {
-                       v4l2_planes[0].m.userptr = b->m.userptr;
-                       v4l2_planes[0].length = b->length;
+                       planes[0].m.userptr = b->m.userptr;
+                       planes[0].length = b->length;
                }
 
                if (b->memory == V4L2_MEMORY_DMABUF) {
-                       v4l2_planes[0].m.fd = b->m.fd;
-                       v4l2_planes[0].length = b->length;
+                       planes[0].m.fd = b->m.fd;
+                       planes[0].length = b->length;
                }
 
                if (V4L2_TYPE_IS_OUTPUT(b->type)) {
                                vb2_warn_zero_bytesused(vb);
 
                        if (vb->vb2_queue->allow_zero_bytesused)
-                               v4l2_planes[0].bytesused = b->bytesused;
+                               planes[0].bytesused = b->bytesused;
                        else
-                               v4l2_planes[0].bytesused = b->bytesused ?
-                                       b->bytesused : v4l2_planes[0].length;
+                               planes[0].bytesused = b->bytesused ?
+                                       b->bytesused : planes[0].length;
                } else
-                       v4l2_planes[0].bytesused = 0;
+                       planes[0].bytesused = 0;
 
        }
 
        /* Zero flags that the vb2 core handles */
-       vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+       vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
        if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
            V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
                /*
                 * their timestamp and timestamp source flags from the
                 * queue.
                 */
-               vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+               vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
        }
 
        if (V4L2_TYPE_IS_OUTPUT(b->type)) {
                 * The 'field' is valid metadata for this output buffer
                 * and so that needs to be copied here.
                 */
-               vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
-               vb->v4l2_buf.field = b->field;
+               vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+               vbuf->field = b->field;
        } else {
                /* Zero any output buffer flags as this is a capture buffer */
-               vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
+               vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
        }
 }
 
  */
 static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
-       __fill_vb2_buffer(vb, b, vb->v4l2_planes);
+       __fill_vb2_buffer(vb, b, vb->planes);
        return call_vb_qop(vb, buf_prepare, vb);
 }
 
  */
 static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
-       struct v4l2_plane planes[VIDEO_MAX_PLANES];
+       struct vb2_plane planes[VIDEO_MAX_PLANES];
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
 
        for (plane = 0; plane < vb->num_planes; ++plane) {
                /* Skip the plane if already verified */
-               if (vb->v4l2_planes[plane].m.userptr &&
-                   vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
-                   && vb->v4l2_planes[plane].length == planes[plane].length)
+               if (vb->planes[plane].m.userptr &&
+                       vb->planes[plane].m.userptr == planes[plane].m.userptr
+                       && vb->planes[plane].length == planes[plane].length)
                        continue;
 
                dprintk(3, "userspace address for plane %d changed, "
                }
 
                vb->planes[plane].mem_priv = NULL;
-               memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+               vb->planes[plane].bytesused = 0;
+               vb->planes[plane].length = 0;
+               vb->planes[plane].m.userptr = 0;
+               vb->planes[plane].data_offset = 0;
 
                /* Acquire each plane's memory */
                mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
         * Now that everything is in order, copy relevant information
         * provided by userspace.
         */
-       for (plane = 0; plane < vb->num_planes; ++plane)
-               vb->v4l2_planes[plane] = planes[plane];
+       for (plane = 0; plane < vb->num_planes; ++plane) {
+               vb->planes[plane].bytesused = planes[plane].bytesused;
+               vb->planes[plane].length = planes[plane].length;
+               vb->planes[plane].m.userptr = planes[plane].m.userptr;
+               vb->planes[plane].data_offset = planes[plane].data_offset;
+       }
 
        if (reacquired) {
                /*
        /* In case of errors, release planes that were already acquired */
        for (plane = 0; plane < vb->num_planes; ++plane) {
                if (vb->planes[plane].mem_priv)
-                       call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+                       call_void_memop(vb, put_userptr,
+                               vb->planes[plane].mem_priv);
                vb->planes[plane].mem_priv = NULL;
-               vb->v4l2_planes[plane].m.userptr = 0;
-               vb->v4l2_planes[plane].length = 0;
+               vb->planes[plane].m.userptr = 0;
+               vb->planes[plane].length = 0;
        }
 
        return ret;
  */
 static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
-       struct v4l2_plane planes[VIDEO_MAX_PLANES];
+       struct vb2_plane planes[VIDEO_MAX_PLANES];
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
 
                /* Skip the plane if already verified */
                if (dbuf == vb->planes[plane].dbuf &&
-                   vb->v4l2_planes[plane].length == planes[plane].length) {
+                       vb->planes[plane].length == planes[plane].length) {
                        dma_buf_put(dbuf);
                        continue;
                }
 
                /* Release previously acquired memory if present */
                __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
-               memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+               vb->planes[plane].bytesused = 0;
+               vb->planes[plane].length = 0;
+               vb->planes[plane].m.fd = 0;
+               vb->planes[plane].data_offset = 0;
 
                /* Acquire each plane's memory */
-               mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
-                       dbuf, planes[plane].length, dma_dir);
+               mem_priv = call_ptr_memop(vb, attach_dmabuf,
+                       q->alloc_ctx[plane], dbuf, planes[plane].length,
+                       dma_dir);
                if (IS_ERR(mem_priv)) {
                        dprintk(1, "failed to attach dmabuf\n");
                        ret = PTR_ERR(mem_priv);
         * Now that everything is in order, copy relevant information
         * provided by userspace.
         */
-       for (plane = 0; plane < vb->num_planes; ++plane)
-               vb->v4l2_planes[plane] = planes[plane];
+       for (plane = 0; plane < vb->num_planes; ++plane) {
+               vb->planes[plane].bytesused = planes[plane].bytesused;
+               vb->planes[plane].length = planes[plane].length;
+               vb->planes[plane].m.fd = planes[plane].m.fd;
+               vb->planes[plane].data_offset = planes[plane].data_offset;
+       }
 
        if (reacquired) {
                /*
 
 static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vb2_queue *q = vb->vb2_queue;
        int ret;
 
        }
 
        vb->state = VB2_BUF_STATE_PREPARING;
-       vb->v4l2_buf.timestamp.tv_sec = 0;
-       vb->v4l2_buf.timestamp.tv_usec = 0;
-       vb->v4l2_buf.sequence = 0;
+       vbuf->timestamp.tv_sec = 0;
+       vbuf->timestamp.tv_usec = 0;
+       vbuf->sequence = 0;
 
        switch (q->memory) {
        case V4L2_MEMORY_MMAP:
                /* Fill buffer information for the userspace */
                __fill_v4l2_buffer(vb, b);
 
-               dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
+               dprintk(1, "prepare of buffer %d succeeded\n", vb->index);
        }
        return ret;
 }
        /*
         * If you see this warning, then the driver isn't cleaning up properly
         * after a failed start_streaming(). See the start_streaming()
-        * documentation in videobuf2-v4l2.h for more information how buffers
+        * documentation in videobuf2-core.h for more information how buffers
         * should be returned to vb2 in start_streaming().
         */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
 {
        int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
        struct vb2_buffer *vb;
+       struct vb2_v4l2_buffer *vbuf;
 
        if (ret)
                return ret;
 
        vb = q->bufs[b->index];
+       vbuf = to_vb2_v4l2_buffer(vb);
 
        switch (vb->state) {
        case VB2_BUF_STATE_DEQUEUED:
                 */
                if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
                    V4L2_BUF_FLAG_TIMESTAMP_COPY)
-                       vb->v4l2_buf.timestamp = b->timestamp;
-               vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+                       vbuf->timestamp = b->timestamp;
+               vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
                if (b->flags & V4L2_BUF_FLAG_TIMECODE)
-                       vb->v4l2_buf.timecode = b->timecode;
+                       vbuf->timecode = b->timecode;
        }
 
        trace_vb2_qbuf(q, vb);
                        return ret;
        }
 
-       dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
+       dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
        return 0;
 }
 
                }
 }
 
-static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
+               bool nonblocking)
 {
        struct vb2_buffer *vb = NULL;
+       struct vb2_v4l2_buffer *vbuf = NULL;
        int ret;
 
        if (b->type != q->type) {
 
        trace_vb2_dqbuf(q, vb);
 
+       vbuf = to_vb2_v4l2_buffer(vb);
        if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
-           vb->v4l2_buf.flags & V4L2_BUF_FLAG_LAST)
+                       vbuf->flags & V4L2_BUF_FLAG_LAST)
                q->last_buffer_dequeued = true;
        /* go back to dequeued state */
        __vb2_dqbuf(vb);
 
        dprintk(1, "dqbuf of buffer %d, with state %d\n",
-                       vb->v4l2_buf.index, vb->state);
+                       vb->index, vb->state);
 
        return 0;
 }
        /*
         * If you see this warning, then the driver isn't cleaning up properly
         * in stop_streaming(). See the stop_streaming() documentation in
-        * videobuf2-v4l2.h for more information how buffers should be returned
+        * videobuf2-core.h for more information how buffers should be returned
         * to vb2 in stop_streaming().
         */
        if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
                vb = q->bufs[buffer];
 
                for (plane = 0; plane < vb->num_planes; ++plane) {
-                       if (vb->v4l2_planes[plane].m.mem_offset == off) {
+                       if (vb->planes[plane].m.offset == off) {
                                *_buffer = buffer;
                                *_plane = plane;
                                return 0;
         * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
         * so, we need to do the same here.
         */
-       length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
+       length = PAGE_ALIGN(vb->planes[plane].length);
        if (length < (vma->vm_end - vma->vm_start)) {
                dprintk(1,
                        "MMAP invalid, as it would overflow buffer length\n");
  * responsible of clearing it's content and setting initial values for some
  * required entries before calling this function.
  * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-v4l2.h
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
  * for more information.
  */
 int vb2_queue_init(struct vb2_queue *q)
        init_waitqueue_head(&q->done_wq);
 
        if (q->buf_struct_size == 0)
-               q->buf_struct_size = sizeof(struct vb2_buffer);
+               q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
 
        return 0;
 }
 
                           struct vpfe_cap_buffer, list);
 
        list_del(&video->next_frm->list);
-       video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
-       return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+       video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+       return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
 }
 
 /* schedule the next buffer which is available on dma queue */
                video->cur_frm = video->next_frm;
 
        list_del(&video->next_frm->list);
-       video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
-       addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
+       video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+       addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
        video->ops->queue(vpfe_dev, addr);
        video->state = VPFE_VIDEO_BUFFER_QUEUED;
 }
        struct vpfe_device *vpfe_dev = video->vpfe_dev;
        unsigned long addr;
 
-       addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
        addr += video->field_off;
        video->ops->queue(vpfe_dev, addr);
 }
 {
        struct vpfe_pipeline *pipe = &video->pipe;
 
-       v4l2_get_timestamp(&video->cur_frm->vb.v4l2_buf.timestamp);
-       vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE);
+       v4l2_get_timestamp(&video->cur_frm->vb.timestamp);
+       vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
        if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
                video->cur_frm = video->next_frm;
 }
 
 static void vpfe_buffer_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        /* Get the file handle object and device object */
        struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
        struct vpfe_video_device *video = fh->video;
        struct vpfe_device *vpfe_dev = video->vpfe_dev;
        struct vpfe_pipeline *pipe = &video->pipe;
-       struct vpfe_cap_buffer *buf = container_of(vb,
+       struct vpfe_cap_buffer *buf = container_of(vbuf,
                                struct vpfe_cap_buffer, vb);
        unsigned long flags;
        unsigned long empty;
        /* Remove buffer from the buffer queue */
        list_del(&video->cur_frm->list);
        /* Mark state of the current frame to active */
-       video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+       video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
        /* Initialize field_id and started member */
        video->field_id = 0;
-       addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
        video->ops->queue(vpfe_dev, addr);
        video->state = VPFE_VIDEO_BUFFER_QUEUED;
 
        if (ret) {
                struct vpfe_cap_buffer *buf, *tmp;
 
-               vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_QUEUED);
                list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
                        list_del(&buf->list);
-                       vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+                       vb2_buffer_done(&buf->vb.vb2_buf,
+                                       VB2_BUF_STATE_QUEUED);
                }
                goto unlock_out;
        }
 
 static int vpfe_buffer_init(struct vb2_buffer *vb)
 {
-       struct vpfe_cap_buffer *buf = container_of(vb,
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct vpfe_cap_buffer *buf = container_of(vbuf,
                                                   struct vpfe_cap_buffer, vb);
 
        INIT_LIST_HEAD(&buf->list);
 
        /* release all active buffers */
        if (video->cur_frm == video->next_frm) {
-               vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        } else {
                if (video->cur_frm != NULL)
-                       vb2_buffer_done(&video->cur_frm->vb,
+                       vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
                if (video->next_frm != NULL)
-                       vb2_buffer_done(&video->next_frm->vb,
+                       vb2_buffer_done(&video->next_frm->vb.vb2_buf,
                                        VB2_BUF_STATE_ERROR);
        }
 
                video->next_frm = list_entry(video->dma_queue.next,
                                                struct vpfe_cap_buffer, list);
                list_del(&video->next_frm->list);
-               vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&video->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
        }
 }
 
 static void vpfe_buf_cleanup(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
        struct vpfe_video_device *video = fh->video;
        struct vpfe_device *vpfe_dev = video->vpfe_dev;
-       struct vpfe_cap_buffer *buf = container_of(vb,
+       struct vpfe_cap_buffer *buf = container_of(vbuf,
                                        struct vpfe_cap_buffer, vb);
 
        v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
 
 #ifndef _DAVINCI_VPFE_VIDEO_H
 #define _DAVINCI_VPFE_VIDEO_H
 
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-contig.h>
 
 struct vpfe_device;
        container_of(vdev, struct vpfe_video_device, video_dev)
 
 struct vpfe_cap_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 
 static void iss_video_buf_cleanup(struct vb2_buffer *vb)
 {
-       struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
 
        if (buffer->iss_addr)
                buffer->iss_addr = 0;
 
 static int iss_video_buf_prepare(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
-       struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+       struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
        struct iss_video *video = vfh->video;
        unsigned long size = vfh->format.fmt.pix.sizeimage;
        dma_addr_t addr;
 
 static void iss_video_buf_queue(struct vb2_buffer *vb)
 {
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
        struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
        struct iss_video *video = vfh->video;
-       struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+       struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
        struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
        unsigned long flags;
        bool empty;
        list_del(&buf->list);
        spin_unlock_irqrestore(&video->qlock, flags);
 
-       v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+       v4l2_get_timestamp(&buf->vb.timestamp);
 
        /* Do frame number propagation only if this is the output video node.
         * Frame number either comes from the CSI receivers or it gets
         * first, so the input number might lag behind by 1 in some cases.
         */
        if (video == pipe->output && !pipe->do_propagation)
-               buf->vb.v4l2_buf.sequence =
+               buf->vb.sequence =
                        atomic_inc_return(&pipe->frame_number);
        else
-               buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+               buf->vb.sequence = atomic_read(&pipe->frame_number);
 
-       vb2_buffer_done(&buf->vb, pipe->error ?
+       vb2_buffer_done(&buf->vb.vb2_buf, pipe->error ?
                        VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
        pipe->error = false;
 
        buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
                               list);
        spin_unlock_irqrestore(&video->qlock, flags);
-       buf->vb.state = VB2_BUF_STATE_ACTIVE;
+       buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
        return buf;
 }
 
                buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
                                       list);
                list_del(&buf->list);
-               vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
        }
 
        vb2_queue_error(video->queue);
 
  */
 struct iss_buffer {
        /* common v4l buffer stuff -- must be first */
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer  vb;
        struct list_head        list;
        dma_addr_t iss_addr;
 };
 
-#define to_iss_buffer(buf)     container_of(buf, struct iss_buffer, buffer)
+#define to_iss_buffer(buf)     container_of(buf, struct iss_buffer, vb)
 
 enum iss_video_dmaqueue_flags {
        /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
 
 static int uvc_buffer_prepare(struct vb2_buffer *vb)
 {
        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
-       struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
 
-       if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+       if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
            vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
                uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
                return -EINVAL;
        buf->state = UVC_BUF_STATE_QUEUED;
        buf->mem = vb2_plane_vaddr(vb, 0);
        buf->length = vb2_plane_size(vb, 0);
-       if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+       if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
                buf->bytesused = 0;
        else
                buf->bytesused = vb2_get_plane_payload(vb, 0);
 static void uvc_buffer_queue(struct vb2_buffer *vb)
 {
        struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
-       struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
        unsigned long flags;
 
        spin_lock_irqsave(&queue->irqlock, flags);
                 * directly. The next QBUF call will fail with -ENODEV.
                 */
                buf->state = UVC_BUF_STATE_ERROR;
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
        }
 
        spin_unlock_irqrestore(&queue->irqlock, flags);
                                       queue);
                list_del(&buf->queue);
                buf->state = UVC_BUF_STATE_ERROR;
-               vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
        }
        /* This must be protected by the irqlock spinlock to avoid race
         * conditions between uvc_queue_buffer and the disconnection event that
        if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
             buf->length != buf->bytesused) {
                buf->state = UVC_BUF_STATE_QUEUED;
-               vb2_set_plane_payload(&buf->buf, 0, 0);
+               vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
                return buf;
        }
 
        else
                nextbuf = NULL;
 
-       buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
-       buf->buf.v4l2_buf.sequence = queue->sequence++;
-       v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
+       buf->buf.field = V4L2_FIELD_NONE;
+       buf->buf.sequence = queue->sequence++;
+       v4l2_get_timestamp(&buf->buf.timestamp);
 
-       vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
-       vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+       vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+       vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 
        return nextbuf;
 }
 
 };
 
 struct uvc_buffer {
-       struct vb2_buffer buf;
+       struct vb2_v4l2_buffer buf;
        struct list_head queue;
 
        enum uvc_buffer_state state;
 
 #include <linux/videodev2.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-dma-contig.h>
 #include <media/davinci/vpbe_types.h>
 #include <media/davinci/vpbe_osd.h>
 };
 
 struct vpbe_disp_buffer {
-       struct vb2_buffer vb;
+       struct vb2_v4l2_buffer vb;
        struct list_head list;
 };
 
 
 };
 
 struct v4l2_m2m_buffer {
-       struct vb2_buffer       vb;
+       struct vb2_v4l2_buffer  vb;
        struct list_head        list;
 };
 
                         struct v4l2_m2m_ctx *m2m_ctx);
 
 static inline void
-v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state)
+v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
 {
-       vb2_buffer_done(buf, state);
+       vb2_buffer_done(&buf->vb2_buf, state);
 }
 
 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 
 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+                       struct vb2_v4l2_buffer *vbuf);
 
 /**
  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
 
        int             (*mmap)(void *buf_priv, struct vm_area_struct *vma);
 };
 
+/**
+ * struct vb2_plane - plane information
+ * @mem_priv:  private data with this plane
+ * @dbuf:      dma_buf - shared buffer object
+ * @dbuf_mapped:       flag to show whether dbuf is mapped or not
+ * @bytesused: number of bytes occupied by data in the plane (payload)
+ * @length:    size of this plane (NOT the payload) in bytes
+ * @mem_offset:        when memory in the associated struct vb2_buffer is
+ *             VB2_MEMORY_MMAP, equals the offset from the start of
+ *             the device memory for this plane (or is a "cookie" that
+ *             should be passed to mmap() called on the video node)
+ * @userptr:   when memory is VB2_MEMORY_USERPTR, a userspace pointer
+ *             pointing to this plane
+ * @fd:                when memory is VB2_MEMORY_DMABUF, a userspace file
+ *             descriptor associated with this plane
+ * @data_offset:       offset in the plane to the start of data; usually 0,
+ *             unless there is a header in front of the data
+ * Should contain enough information to be able to cover all the fields
+ * of struct v4l2_plane at videodev2.h
+ */
 struct vb2_plane {
        void                    *mem_priv;
        struct dma_buf          *dbuf;
        unsigned int            dbuf_mapped;
+       unsigned int            bytesused;
+       unsigned int            length;
+       union {
+               unsigned int    offset;
+               unsigned long   userptr;
+               int             fd;
+       } m;
+       unsigned int            data_offset;
 };
 
 /**
 
 /**
  * struct vb2_buffer - represents a video buffer
- * @v4l2_buf:          struct v4l2_buffer associated with this buffer; can
- *                     be read by the driver and relevant entries can be
- *                     changed by the driver in case of CAPTURE types
- *                     (such as timestamp)
- * @v4l2_planes:       struct v4l2_planes associated with this buffer; can
- *                     be read by the driver and relevant entries can be
- *                     changed by the driver in case of CAPTURE types
- *                     (such as bytesused); NOTE that even for single-planar
- *                     types, the v4l2_planes[0] struct should be used
- *                     instead of v4l2_buf for filling bytesused - drivers
- *                     should use the vb2_set_plane_payload() function for that
  * @vb2_queue:         the queue to which this driver belongs
+ * @index:             id number of the buffer
+ * @type:              buffer type
+ * @memory:            the method, in which the actual data is passed
  * @num_planes:                number of planes in the buffer
  *                     on an internal driver queue
+ * @planes:            private per-plane information; do not change
  * @state:             current buffer state; do not change
  * @queued_entry:      entry on the queued buffers list, which holds all
  *                     buffers queued from userspace
  * @done_entry:                entry on the list that stores all buffers ready to
  *                     be dequeued to userspace
- * @planes:            private per-plane information; do not change
  */
 struct vb2_buffer {
-       struct v4l2_buffer      v4l2_buf;
-       struct v4l2_plane       v4l2_planes[VIDEO_MAX_PLANES];
-
        struct vb2_queue        *vb2_queue;
-
+       unsigned int            index;
+       unsigned int            type;
+       unsigned int            memory;
        unsigned int            num_planes;
+       struct vb2_plane        planes[VIDEO_MAX_PLANES];
 
-/* Private: internal use only */
+       /* Private: internal use only */
        enum vb2_buffer_state   state;
 
        struct list_head        queued_entry;
        struct list_head        done_entry;
-
-       struct vb2_plane        planes[VIDEO_MAX_PLANES];
-
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        /*
         * Counters for how often these buffer-related ops are
  * @drv_priv:  driver private data
  * @buf_struct_size: size of the driver-specific buffer structure;
  *             "0" indicates the driver doesn't want to use a custom buffer
- *             structure type, so sizeof(struct vb2_buffer) will is used
+ *             structure type. for example, sizeof(struct vb2_v4l2_buffer)
+ *             will be used for v4l2.
  * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
  *             V4L2_BUF_FLAG_TSTAMP_SRC_*
  * @gfp_flags: additional gfp flags used when allocating the buffers.
                                 unsigned int plane_no, unsigned long size)
 {
        if (plane_no < vb->num_planes)
-               vb->v4l2_planes[plane_no].bytesused = size;
+               vb->planes[plane_no].bytesused = size;
 }
 
 /**
                                 unsigned int plane_no)
 {
        if (plane_no < vb->num_planes)
-               return vb->v4l2_planes[plane_no].bytesused;
+               return vb->planes[plane_no].bytesused;
        return 0;
 }
 
 vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
 {
        if (plane_no < vb->num_planes)
-               return vb->v4l2_planes[plane_no].length;
+               return vb->planes[plane_no].length;
        return 0;
 }
 
 
 #ifndef _MEDIA_VIDEOBUF2_V4L2_H
 #define _MEDIA_VIDEOBUF2_V4L2_H
 
+#include <linux/videodev2.h>
 #include <media/videobuf2-core.h>
 
+/**
+ * struct vb2_v4l2_buffer - video buffer information for v4l2
+ * @vb2_buf:   video buffer 2
+ * @flags:     buffer informational flags
+ * @field:     enum v4l2_field; field order of the image in the buffer
+ * @timestamp: frame timestamp
+ * @timecode:  frame timecode
+ * @sequence:  sequence count of this frame
+ * Should contain enough information to be able to cover all the fields
+ * of struct v4l2_buffer at videodev2.h
+ */
+struct vb2_v4l2_buffer {
+       struct vb2_buffer       vb2_buf;
+
+       __u32                   flags;
+       __u32                   field;
+       struct timeval          timestamp;
+       struct v4l2_timecode    timecode;
+       __u32                   sequence;
+};
+
+/**
+ * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
+ */
+#define to_vb2_v4l2_buffer(vb) \
+       (container_of(vb, struct vb2_v4l2_buffer, vb2_buf))
+
 #endif /* _MEDIA_VIDEOBUF2_V4L2_H */
 
        ),
 
        TP_fast_assign(
+               struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
                __entry->minor = q->owner ? q->owner->vdev->minor : -1;
                __entry->queued_count = q->queued_count;
                __entry->owned_by_drv_count =
                        atomic_read(&q->owned_by_drv_count);
-               __entry->index = vb->v4l2_buf.index;
-               __entry->type = vb->v4l2_buf.type;
-               __entry->bytesused = vb->v4l2_planes[0].bytesused;
-               __entry->flags = vb->v4l2_buf.flags;
-               __entry->field = vb->v4l2_buf.field;
-               __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
-               __entry->timecode_type = vb->v4l2_buf.timecode.type;
-               __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
-               __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
-               __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
-               __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
-               __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
-               __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
-               __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
-               __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
-               __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
-               __entry->sequence = vb->v4l2_buf.sequence;
+               __entry->index = vb->index;
+               __entry->type = vb->type;
+               __entry->bytesused = vb->planes[0].bytesused;
+               __entry->flags = vbuf->flags;
+               __entry->field = vbuf->field;
+               __entry->timestamp = timeval_to_ns(&vbuf->timestamp);
+               __entry->timecode_type = vbuf->timecode.type;
+               __entry->timecode_flags = vbuf->timecode.flags;
+               __entry->timecode_frames = vbuf->timecode.frames;
+               __entry->timecode_seconds = vbuf->timecode.seconds;
+               __entry->timecode_minutes = vbuf->timecode.minutes;
+               __entry->timecode_hours = vbuf->timecode.hours;
+               __entry->timecode_userbits0 = vbuf->timecode.userbits[0];
+               __entry->timecode_userbits1 = vbuf->timecode.userbits[1];
+               __entry->timecode_userbits2 = vbuf->timecode.userbits[2];
+               __entry->timecode_userbits3 = vbuf->timecode.userbits[3];
+               __entry->sequence = vbuf->sequence;
        ),
 
        TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "