From: Linus Torvalds Date: Fri, 1 Sep 2023 19:21:32 +0000 (-0700) Subject: Merge tag 'media/v6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab... X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=307d59039fb26212a84a9aa6a134a7d2bdea34ca;p=linux.git Merge tag 'media/v6.6-1' of git://git./linux/kernel/git/mchehab/linux-media Pull media updates from Mauro Carvalho Chehab: - new i2c drivers: ds90ub913, ds90ub953, ds90ub960, dw9719, ds90ub913 - new Intel IVSC MEI drivers - some Mediatek platform drivers were moved to a common location - Intel atomisp2 driver is now working with the main ov2680 driver. Due to that, the atomisp2 ov2680 staging one was removed - the bttv driver was finally converted to videobuf2 framework. This was the last one upstream using videobuf version 1 core. We'll likely remove the old videobuf framework on 6.7 - lots of improvements at atomisp driver: it now works with normal I2C sensors. Several compile-mode dependecies to select between ISP2400 and ISP2401 are now solved in runtime - a new ipu-bridge logic was added to work with IVSC MEI drivers - venus driver gained better support for new VPU versions - the v4l core async framework has gained lots of improvements and cleanups - lots of other cleanups, improvements and driver fixes * tag 'media/v6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: (358 commits) media: ivsc: Add ACPI dependency media: bttv: convert to vb2 media: bttv: use audio defaults for winfast2000 media: bttv: refactor bttv_set_dma() media: bttv: move vbi_skip/vbi_count out of buffer media: bttv: remove crop info from bttv_buffer media: bttv: remove tvnorm field from bttv_buffer media: bttv: remove format field from bttv_buffer media: bttv: move do_crop flag out of bttv_fh media: bttv: copy vbi_fmt from bttv_fh media: bttv: copy vid fmt/width/height from fh media: bttv: radio use v4l2_fh instead of bttv_fh media: bttv: replace BUG with WARN_ON media: bttv: use video_drvdata to get bttv media: i2c: rdacm21: Fix uninitialized value media: coda: Remove duplicated include media: vivid: fix the racy dev->radio_tx_rds_owner media: i2c: ccs: Check rules is non-NULL media: i2c: ds90ub960: Fix PLL config for 1200 MHz CSI rate media: i2c: ds90ub953: Fix use of uninitialized variables ... --- 307d59039fb26212a84a9aa6a134a7d2bdea34ca diff --cc drivers/media/dvb-frontends/mn88443x.c index db2921c736af9,bbdd0389ffa36..7a58f53ab9996 --- a/drivers/media/dvb-frontends/mn88443x.c +++ b/drivers/media/dvb-frontends/mn88443x.c @@@ -8,9 -8,9 +8,9 @@@ #include #include #include - #include + #include #include -#include +#include #include "mn88443x.h" diff --cc drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c index 0000000000000,8e44a051edda2..04948d3eb011a mode 000000,100644..100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c @@@ -1,0 -1,1458 +1,1460 @@@ + // SPDX-License-Identifier: GPL-2.0 + /* + * Copyright (c) 2016 MediaTek Inc. + * Author: PC Chen + * Tiffany Lin + */ + + #include + #include + #include + #include + + #include "mtk_vcodec_enc.h" + #include "venc_drv_if.h" + + #define MTK_VENC_MIN_W 160U + #define MTK_VENC_MIN_H 128U + #define MTK_VENC_HD_MAX_W 1920U + #define MTK_VENC_HD_MAX_H 1088U + #define MTK_VENC_4K_MAX_W 3840U + #define MTK_VENC_4K_MAX_H 2176U + + #define DFT_CFG_WIDTH MTK_VENC_MIN_W + #define DFT_CFG_HEIGHT MTK_VENC_MIN_H + #define MTK_MAX_CTRLS_HINT 20 + + #define MTK_DEFAULT_FRAMERATE_NUM 1001 + #define MTK_DEFAULT_FRAMERATE_DENOM 30000 + #define MTK_VENC_4K_CAPABILITY_ENABLE BIT(0) + + static void mtk_venc_worker(struct work_struct *work); + + static const struct v4l2_frmsize_stepwise mtk_venc_hd_framesizes = { + MTK_VENC_MIN_W, MTK_VENC_HD_MAX_W, 16, + MTK_VENC_MIN_H, MTK_VENC_HD_MAX_H, 16, + }; + + static const struct v4l2_frmsize_stepwise mtk_venc_4k_framesizes = { + MTK_VENC_MIN_W, MTK_VENC_4K_MAX_W, 16, + MTK_VENC_MIN_H, MTK_VENC_4K_MAX_H, 16, + }; + + static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl) + { + struct mtk_vcodec_enc_ctx *ctx = ctrl_to_enc_ctx(ctrl); + struct mtk_enc_params *p = &ctx->enc_params; + int ret = 0; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_BITRATE_MODE val= %d", ctrl->val); + if (ctrl->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) { + mtk_v4l2_venc_err(ctx, "Unsupported bitrate mode =%d", ctrl->val); + ret = -EINVAL; + } + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d", ctrl->val); + p->bitrate = ctrl->val; + ctx->param_change |= MTK_ENCODE_PARAM_BITRATE; + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_B_FRAMES val = %d", ctrl->val); + p->num_b_frame = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE val = %d", + ctrl->val); + p->rc_frame = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_MAX_QP val = %d", ctrl->val); + p->h264_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_HEADER_MODE val = %d", ctrl->val); + p->seq_hdr_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE val = %d", ctrl->val); + p->rc_mb = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_PROFILE val = %d", ctrl->val); + p->h264_profile = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_LEVEL val = %d", ctrl->val); + p->h264_level = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_I_PERIOD val = %d", ctrl->val); + p->intra_period = ctrl->val; + ctx->param_change |= MTK_ENCODE_PARAM_INTRA_PERIOD; + break; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_GOP_SIZE val = %d", ctrl->val); + p->gop_size = ctrl->val; + ctx->param_change |= MTK_ENCODE_PARAM_GOP_SIZE; + break; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + /* + * FIXME - what vp8 profiles are actually supported? + * The ctrl is added (with only profile 0 supported) for now. + */ + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_VP8_PROFILE val = %d", ctrl->val); + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME"); + p->force_intra = 1; + ctx->param_change |= MTK_ENCODE_PARAM_FORCE_INTRA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; + } + + static const struct v4l2_ctrl_ops mtk_vcodec_enc_ctrl_ops = { + .s_ctrl = vidioc_venc_s_ctrl, + }; + + static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, + const struct mtk_video_fmt *formats, + size_t num_formats) + { + if (f->index >= num_formats) + return -EINVAL; + + f->pixelformat = formats[f->index].fourcc; + + return 0; + } + + static const struct mtk_video_fmt * + mtk_venc_find_format(u32 fourcc, const struct mtk_vcodec_enc_pdata *pdata) + { + const struct mtk_video_fmt *fmt; + unsigned int k; + + for (k = 0; k < pdata->num_capture_formats; k++) { + fmt = &pdata->capture_formats[k]; + if (fmt->fourcc == fourcc) + return fmt; + } + + for (k = 0; k < pdata->num_output_formats; k++) { + fmt = &pdata->output_formats[k]; + if (fmt->fourcc == fourcc) + return fmt; + } + + return NULL; + } + + static int vidioc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) + { + const struct mtk_video_fmt *fmt; + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(fh); + + if (fsize->index != 0) + return -EINVAL; + + fmt = mtk_venc_find_format(fsize->pixel_format, + ctx->dev->venc_pdata); + if (!fmt) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + + if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) + fsize->stepwise = mtk_venc_4k_framesizes; + else + fsize->stepwise = mtk_venc_hd_framesizes; + + return 0; + } + + static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) + { + const struct mtk_vcodec_enc_pdata *pdata = + fh_to_enc_ctx(priv)->dev->venc_pdata; + + return vidioc_enum_fmt(f, pdata->capture_formats, + pdata->num_capture_formats); + } + + static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, + struct v4l2_fmtdesc *f) + { + const struct mtk_vcodec_enc_pdata *pdata = + fh_to_enc_ctx(priv)->dev->venc_pdata; + + return vidioc_enum_fmt(f, pdata->output_formats, + pdata->num_output_formats); + } + + static int mtk_vcodec_enc_get_chip_name(void *priv) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct device *dev = &ctx->dev->plat_dev->dev; + + if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-enc")) + return 8173; + else if (of_device_is_compatible(dev->of_node, "mediatek,mt8183-vcodec-enc")) + return 8183; + else if (of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-enc")) + return 8192; + else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-enc")) + return 8195; + else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-enc")) + return 8188; + else + return 8173; + } + + static int vidioc_venc_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct device *dev = &ctx->dev->plat_dev->dev; + int platform_name = mtk_vcodec_enc_get_chip_name(priv); + + strscpy(cap->driver, dev->driver->name, sizeof(cap->driver)); + snprintf(cap->card, sizeof(cap->card), "MT%d video encoder", platform_name); + + return 0; + } + + static int vidioc_venc_s_parm(struct file *file, void *priv, + struct v4l2_streamparm *a) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct v4l2_fract *timeperframe = &a->parm.output.timeperframe; + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + if (timeperframe->numerator == 0 || timeperframe->denominator == 0) { + timeperframe->numerator = MTK_DEFAULT_FRAMERATE_NUM; + timeperframe->denominator = MTK_DEFAULT_FRAMERATE_DENOM; + } + + ctx->enc_params.framerate_num = timeperframe->denominator; + ctx->enc_params.framerate_denom = timeperframe->numerator; + ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE; + + a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + + return 0; + } + + static int vidioc_venc_g_parm(struct file *file, void *priv, + struct v4l2_streamparm *a) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + a->parm.output.timeperframe.denominator = + ctx->enc_params.framerate_num; + a->parm.output.timeperframe.numerator = + ctx->enc_params.framerate_denom; + + return 0; + } + + static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_enc_ctx *ctx, + enum v4l2_buf_type type) + { + if (V4L2_TYPE_IS_OUTPUT(type)) + return &ctx->q_data[MTK_Q_DATA_SRC]; + + return &ctx->q_data[MTK_Q_DATA_DST]; + } + + static void vidioc_try_fmt_cap(struct v4l2_format *f) + { + f->fmt.pix_mp.field = V4L2_FIELD_NONE; + f->fmt.pix_mp.num_planes = 1; + f->fmt.pix_mp.plane_fmt[0].bytesperline = 0; + f->fmt.pix_mp.flags = 0; + } + + /* V4L2 specification suggests the driver corrects the format struct if any of + * the dimensions is unsupported + */ + static int vidioc_try_fmt_out(struct mtk_vcodec_enc_ctx *ctx, struct v4l2_format *f, + const struct mtk_video_fmt *fmt) + { + struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; + int tmp_w, tmp_h; + unsigned int max_width, max_height; + + pix_fmt_mp->field = V4L2_FIELD_NONE; + + if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) { + max_width = MTK_VENC_4K_MAX_W; + max_height = MTK_VENC_4K_MAX_H; + } else { + max_width = MTK_VENC_HD_MAX_W; + max_height = MTK_VENC_HD_MAX_H; + } + + pix_fmt_mp->height = clamp(pix_fmt_mp->height, MTK_VENC_MIN_H, max_height); + pix_fmt_mp->width = clamp(pix_fmt_mp->width, MTK_VENC_MIN_W, max_width); + + /* find next closer width align 16, heign align 32, size align + * 64 rectangle + */ + tmp_w = pix_fmt_mp->width; + tmp_h = pix_fmt_mp->height; + v4l_bound_align_image(&pix_fmt_mp->width, + MTK_VENC_MIN_W, + max_width, 4, + &pix_fmt_mp->height, + MTK_VENC_MIN_H, + max_height, 5, 6); + + if (pix_fmt_mp->width < tmp_w && (pix_fmt_mp->width + 16) <= max_width) + pix_fmt_mp->width += 16; + if (pix_fmt_mp->height < tmp_h && (pix_fmt_mp->height + 32) <= max_height) + pix_fmt_mp->height += 32; + + mtk_v4l2_venc_dbg(0, ctx, + "before resize wxh=%dx%d, after resize wxh=%dx%d, sizeimage=%d %d", + tmp_w, tmp_h, pix_fmt_mp->width, + pix_fmt_mp->height, + pix_fmt_mp->plane_fmt[0].sizeimage, + pix_fmt_mp->plane_fmt[1].sizeimage); + + pix_fmt_mp->num_planes = fmt->num_planes; + pix_fmt_mp->plane_fmt[0].sizeimage = + pix_fmt_mp->width * pix_fmt_mp->height + + ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16); + pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width; + + if (pix_fmt_mp->num_planes == 2) { + pix_fmt_mp->plane_fmt[1].sizeimage = + (pix_fmt_mp->width * pix_fmt_mp->height) / 2 + + (ALIGN(pix_fmt_mp->width, 16) * 16); + pix_fmt_mp->plane_fmt[2].sizeimage = 0; + pix_fmt_mp->plane_fmt[1].bytesperline = + pix_fmt_mp->width; + pix_fmt_mp->plane_fmt[2].bytesperline = 0; + } else if (pix_fmt_mp->num_planes == 3) { + pix_fmt_mp->plane_fmt[1].sizeimage = + pix_fmt_mp->plane_fmt[2].sizeimage = + (pix_fmt_mp->width * pix_fmt_mp->height) / 4 + + ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16); + pix_fmt_mp->plane_fmt[1].bytesperline = + pix_fmt_mp->plane_fmt[2].bytesperline = + pix_fmt_mp->width / 2; + } + + pix_fmt_mp->flags = 0; + + return 0; + } + + static void mtk_venc_set_param(struct mtk_vcodec_enc_ctx *ctx, + struct venc_enc_param *param) + { + struct mtk_q_data *q_data_src = &ctx->q_data[MTK_Q_DATA_SRC]; + struct mtk_enc_params *enc_params = &ctx->enc_params; + + switch (q_data_src->fmt->fourcc) { + case V4L2_PIX_FMT_YUV420M: + param->input_yuv_fmt = VENC_YUV_FORMAT_I420; + break; + case V4L2_PIX_FMT_YVU420M: + param->input_yuv_fmt = VENC_YUV_FORMAT_YV12; + break; + case V4L2_PIX_FMT_NV12M: + param->input_yuv_fmt = VENC_YUV_FORMAT_NV12; + break; + case V4L2_PIX_FMT_NV21M: + param->input_yuv_fmt = VENC_YUV_FORMAT_NV21; + break; + default: + mtk_v4l2_venc_err(ctx, "Unsupported fourcc =%d", q_data_src->fmt->fourcc); + break; + } + param->h264_profile = enc_params->h264_profile; + param->h264_level = enc_params->h264_level; + + /* Config visible resolution */ + param->width = q_data_src->visible_width; + param->height = q_data_src->visible_height; + /* Config coded resolution */ + param->buf_width = q_data_src->coded_width; + param->buf_height = q_data_src->coded_height; + param->frm_rate = enc_params->framerate_num / + enc_params->framerate_denom; + param->intra_period = enc_params->intra_period; + param->gop_size = enc_params->gop_size; + param->bitrate = enc_params->bitrate; + + mtk_v4l2_venc_dbg(0, ctx, + "fmt 0x%x, P/L %d/%d w/h %d/%d buf %d/%d fps/bps %d/%d gop %d i_per %d", + param->input_yuv_fmt, param->h264_profile, + param->h264_level, param->width, param->height, + param->buf_width, param->buf_height, + param->frm_rate, param->bitrate, + param->gop_size, param->intra_period); + } + + static int vidioc_venc_s_fmt_cap(struct file *file, void *priv, + struct v4l2_format *f) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata; + struct vb2_queue *vq; + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type); + int i, ret; + const struct mtk_video_fmt *fmt; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) { + mtk_v4l2_venc_err(ctx, "fail to get vq"); + return -EINVAL; + } + + if (vb2_is_busy(vq)) { + mtk_v4l2_venc_err(ctx, "queue busy"); + return -EBUSY; + } + + fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata); + if (!fmt) { + fmt = &ctx->dev->venc_pdata->capture_formats[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + + q_data->fmt = fmt; + vidioc_try_fmt_cap(f); + + q_data->coded_width = f->fmt.pix_mp.width; + q_data->coded_height = f->fmt.pix_mp.height; + q_data->field = f->fmt.pix_mp.field; + + for (i = 0; i < f->fmt.pix_mp.num_planes; i++) { + struct v4l2_plane_pix_format *plane_fmt; + + plane_fmt = &f->fmt.pix_mp.plane_fmt[i]; + q_data->bytesperline[i] = plane_fmt->bytesperline; + q_data->sizeimage[i] = plane_fmt->sizeimage; + } + + if (ctx->state == MTK_STATE_FREE) { + ret = venc_if_init(ctx, q_data->fmt->fourcc); + if (ret) { + mtk_v4l2_venc_err(ctx, "venc_if_init failed=%d, codec type=%x", + ret, q_data->fmt->fourcc); + return -EBUSY; + } + ctx->state = MTK_STATE_INIT; + } + + return 0; + } + + static int vidioc_venc_s_fmt_out(struct file *file, void *priv, + struct v4l2_format *f) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata; + struct vb2_queue *vq; + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type); + int ret, i; + const struct mtk_video_fmt *fmt; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) { + mtk_v4l2_venc_err(ctx, "fail to get vq"); + return -EINVAL; + } + + if (vb2_is_busy(vq)) { + mtk_v4l2_venc_err(ctx, "queue busy"); + return -EBUSY; + } + + fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata); + if (!fmt) { + fmt = &ctx->dev->venc_pdata->output_formats[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + + ret = vidioc_try_fmt_out(ctx, f, fmt); + if (ret) + return ret; + + q_data->fmt = fmt; + q_data->visible_width = f->fmt.pix_mp.width; + q_data->visible_height = f->fmt.pix_mp.height; + q_data->coded_width = f->fmt.pix_mp.width; + q_data->coded_height = f->fmt.pix_mp.height; + + q_data->field = f->fmt.pix_mp.field; + ctx->colorspace = f->fmt.pix_mp.colorspace; + ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; + ctx->quantization = f->fmt.pix_mp.quantization; + ctx->xfer_func = f->fmt.pix_mp.xfer_func; + + for (i = 0; i < f->fmt.pix_mp.num_planes; i++) { + struct v4l2_plane_pix_format *plane_fmt; + + plane_fmt = &f->fmt.pix_mp.plane_fmt[i]; + q_data->bytesperline[i] = plane_fmt->bytesperline; + q_data->sizeimage[i] = plane_fmt->sizeimage; + } + + return 0; + } + + static int vidioc_venc_g_fmt(struct file *file, void *priv, + struct v4l2_format *f) + { + struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct vb2_queue *vq; + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type); + int i; + + vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); + if (!vq) + return -EINVAL; + + + pix->width = q_data->coded_width; + pix->height = q_data->coded_height; + pix->pixelformat = q_data->fmt->fourcc; + pix->field = q_data->field; + pix->num_planes = q_data->fmt->num_planes; + for (i = 0; i < pix->num_planes; i++) { + pix->plane_fmt[i].bytesperline = q_data->bytesperline[i]; + pix->plane_fmt[i].sizeimage = q_data->sizeimage[i]; + } + + pix->flags = 0; + pix->colorspace = ctx->colorspace; + pix->ycbcr_enc = ctx->ycbcr_enc; + pix->quantization = ctx->quantization; + pix->xfer_func = ctx->xfer_func; + + return 0; + } + + static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, + struct v4l2_format *f) + { + const struct mtk_video_fmt *fmt; + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata; + + fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata); + if (!fmt) { + fmt = &ctx->dev->venc_pdata->capture_formats[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + f->fmt.pix_mp.colorspace = ctx->colorspace; + f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc; + f->fmt.pix_mp.quantization = ctx->quantization; + f->fmt.pix_mp.xfer_func = ctx->xfer_func; + + vidioc_try_fmt_cap(f); + + return 0; + } + + static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv, + struct v4l2_format *f) + { + const struct mtk_video_fmt *fmt; + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata; + + fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata); + if (!fmt) { + fmt = &ctx->dev->venc_pdata->output_formats[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + if (!f->fmt.pix_mp.colorspace) { + f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709; + f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT; + f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + return vidioc_try_fmt_out(ctx, f, fmt); + } + + static int vidioc_venc_g_selection(struct file *file, void *priv, + struct v4l2_selection *s) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r.top = 0; + s->r.left = 0; + s->r.width = q_data->coded_width; + s->r.height = q_data->coded_height; + break; + case V4L2_SEL_TGT_CROP: + s->r.top = 0; + s->r.left = 0; + s->r.width = q_data->visible_width; + s->r.height = q_data->visible_height; + break; + default: + return -EINVAL; + } + + return 0; + } + + static int vidioc_venc_s_selection(struct file *file, void *priv, + struct v4l2_selection *s) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP: + /* Only support crop from (0,0) */ + s->r.top = 0; + s->r.left = 0; + s->r.width = min(s->r.width, q_data->coded_width); + s->r.height = min(s->r.height, q_data->coded_height); + q_data->visible_width = s->r.width; + q_data->visible_height = s->r.height; + break; + default: + return -EINVAL; + } + return 0; + } + + static int vidioc_venc_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + + if (ctx->state == MTK_STATE_ABORT) { + mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error", + ctx->id); + return -EIO; + } + + return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); + } + + static int vidioc_venc_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + int ret; + + if (ctx->state == MTK_STATE_ABORT) { + mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error", + ctx->id); + return -EIO; + } + + ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); + if (ret) + return ret; + + /* + * Complete flush if the user dequeued the 0-payload LAST buffer. + * We check the payload because a buffer with the LAST flag can also + * be seen during resolution changes. If we happen to be flushing at + * that time, the last buffer before the resolution changes could be + * misinterpreted for the buffer generated by the flush and terminate + * it earlier than we want. + */ + if (!V4L2_TYPE_IS_OUTPUT(buf->type) && + buf->flags & V4L2_BUF_FLAG_LAST && + buf->m.planes[0].bytesused == 0 && + ctx->is_flushing) { + /* + * Last CAPTURE buffer is dequeued, we can allow another flush + * to take place. + */ + ctx->is_flushing = false; + } + + return 0; + } + + static int vidioc_encoder_cmd(struct file *file, void *priv, + struct v4l2_encoder_cmd *cmd) + { + struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv); + struct vb2_queue *src_vq, *dst_vq; + int ret; + + if (ctx->state == MTK_STATE_ABORT) { + mtk_v4l2_venc_err(ctx, "[%d] Call to CMD after unrecoverable error", + ctx->id); + return -EIO; + } + + ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, cmd); + if (ret) + return ret; + + /* Calling START or STOP is invalid if a flush is in progress */ + if (ctx->is_flushing) + return -EBUSY; + + mtk_v4l2_venc_dbg(1, ctx, "encoder cmd=%u", cmd->cmd); + + dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + switch (cmd->cmd) { + case V4L2_ENC_CMD_STOP: + src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!vb2_is_streaming(src_vq)) { + mtk_v4l2_venc_dbg(1, ctx, "Output stream is off. No need to flush."); + return 0; + } + if (!vb2_is_streaming(dst_vq)) { + mtk_v4l2_venc_dbg(1, ctx, "Capture stream is off. No need to flush."); + return 0; + } + ctx->is_flushing = true; + v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb); + v4l2_m2m_try_schedule(ctx->m2m_ctx); + break; + + case V4L2_ENC_CMD_START: + vb2_clear_last_buffer_dequeued(dst_vq); + break; + + default: + return -EINVAL; + } + + return 0; + } + + const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = { + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_qbuf = vidioc_venc_qbuf, + .vidioc_dqbuf = vidioc_venc_dqbuf, + + .vidioc_querycap = vidioc_venc_querycap, + .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, + .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, + .vidioc_enum_framesizes = vidioc_enum_framesizes, + + .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane, + .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + + .vidioc_s_parm = vidioc_venc_s_parm, + .vidioc_g_parm = vidioc_venc_g_parm, + .vidioc_s_fmt_vid_cap_mplane = vidioc_venc_s_fmt_cap, + .vidioc_s_fmt_vid_out_mplane = vidioc_venc_s_fmt_out, + + .vidioc_g_fmt_vid_cap_mplane = vidioc_venc_g_fmt, + .vidioc_g_fmt_vid_out_mplane = vidioc_venc_g_fmt, + + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + + .vidioc_g_selection = vidioc_venc_g_selection, + .vidioc_s_selection = vidioc_venc_s_selection, + + .vidioc_encoder_cmd = vidioc_encoder_cmd, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, + }; + + static int vb2ops_venc_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, + unsigned int *nplanes, + unsigned int sizes[], + struct device *alloc_devs[]) + { + struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vq); + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vq->type); + unsigned int i; + + if (q_data == NULL) + return -EINVAL; + + if (*nplanes) { ++ if (*nplanes != q_data->fmt->num_planes) ++ return -EINVAL; + for (i = 0; i < *nplanes; i++) + if (sizes[i] < q_data->sizeimage[i]) + return -EINVAL; + } else { + *nplanes = q_data->fmt->num_planes; + for (i = 0; i < *nplanes; i++) + sizes[i] = q_data->sizeimage[i]; + } + + return 0; + } + + static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb) + { + struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type); + int i; + + for (i = 0; i < q_data->fmt->num_planes; i++) { + if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { + mtk_v4l2_venc_err(ctx, "data will not fit into plane %d (%lu < %d)", + i, vb2_plane_size(vb, i), q_data->sizeimage[i]); + return -EINVAL; + } + } + + return 0; + } + + static void vb2ops_venc_buf_queue(struct vb2_buffer *vb) + { + struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vb2_v4l2 = + container_of(vb, struct vb2_v4l2_buffer, vb2_buf); + + struct mtk_video_enc_buf *mtk_buf = + container_of(vb2_v4l2, struct mtk_video_enc_buf, + m2m_buf.vb); + + if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && + (ctx->param_change != MTK_ENCODE_PARAM_NONE)) { + mtk_v4l2_venc_dbg(1, ctx, "[%d] Before id=%d encode parameter change %x", + ctx->id, vb2_v4l2->vb2_buf.index, ctx->param_change); + mtk_buf->param_change = ctx->param_change; + mtk_buf->enc_params = ctx->enc_params; + ctx->param_change = MTK_ENCODE_PARAM_NONE; + } + + v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb)); + } + + static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count) + { + struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q); + struct venc_enc_param param; + int ret, pm_ret; + int i; + + /* Once state turn into MTK_STATE_ABORT, we need stop_streaming + * to clear it + */ + if ((ctx->state == MTK_STATE_ABORT) || (ctx->state == MTK_STATE_FREE)) { + ret = -EIO; + goto err_start_stream; + } + + /* Do the initialization when both start_streaming have been called */ + if (V4L2_TYPE_IS_OUTPUT(q->type)) { + if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q)) + return 0; + } else { + if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q)) + return 0; + } + + ret = pm_runtime_resume_and_get(&ctx->dev->plat_dev->dev); + if (ret < 0) { + mtk_v4l2_venc_err(ctx, "pm_runtime_resume_and_get fail %d", ret); + goto err_start_stream; + } + + mtk_venc_set_param(ctx, ¶m); + ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, ¶m); + if (ret) { + mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret); + ctx->state = MTK_STATE_ABORT; + goto err_set_param; + } + ctx->param_change = MTK_ENCODE_PARAM_NONE; + + if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) && + (ctx->enc_params.seq_hdr_mode != + V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)) { + ret = venc_if_set_param(ctx, + VENC_SET_PARAM_PREPEND_HEADER, + NULL); + if (ret) { + mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret); + ctx->state = MTK_STATE_ABORT; + goto err_set_param; + } + ctx->state = MTK_STATE_HEADER; + } + + return 0; + + err_set_param: + pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev); + if (pm_ret < 0) + mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", pm_ret); + + err_start_stream: + for (i = 0; i < q->num_buffers; ++i) { + struct vb2_buffer *buf = vb2_get_buffer(q, i); + + /* + * FIXME: This check is not needed as only active buffers + * can be marked as done. + */ + if (buf && buf->state == VB2_BUF_STATE_ACTIVE) { + mtk_v4l2_venc_dbg(0, ctx, "[%d] id=%d, type=%d, %d->VB2_BUF_STATE_QUEUED", + ctx->id, i, q->type, (int)buf->state); + v4l2_m2m_buf_done(to_vb2_v4l2_buffer(buf), + VB2_BUF_STATE_QUEUED); + } + } + + return ret; + } + + static void vb2ops_venc_stop_streaming(struct vb2_queue *q) + { + struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q); + struct vb2_v4l2_buffer *src_buf, *dst_buf; + int ret; + + mtk_v4l2_venc_dbg(2, ctx, "[%d]-> type=%d", ctx->id, q->type); + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) { + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); + } + /* STREAMOFF on the CAPTURE queue completes any ongoing flush */ + if (ctx->is_flushing) { + struct v4l2_m2m_buffer *b, *n; + + mtk_v4l2_venc_dbg(1, ctx, "STREAMOFF called while flushing"); + /* + * STREAMOFF could be called before the flush buffer is + * dequeued. Check whether empty flush buf is still in + * queue before removing it. + */ + v4l2_m2m_for_each_src_buf_safe(ctx->m2m_ctx, b, n) { + if (b == &ctx->empty_flush_buf) { + v4l2_m2m_src_buf_remove_by_buf(ctx->m2m_ctx, &b->vb); + break; + } + } + ctx->is_flushing = false; + } + } else { + while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) { + if (src_buf != &ctx->empty_flush_buf.vb) + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); + } + if (ctx->is_flushing) { + /* + * If we are in the middle of a flush, put the flush + * buffer back into the queue so the next CAPTURE + * buffer gets returned with the LAST flag set. + */ + v4l2_m2m_buf_queue(ctx->m2m_ctx, + &ctx->empty_flush_buf.vb); + } + } + + if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) || + (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) { + mtk_v4l2_venc_dbg(1, ctx, "[%d]-> q type %d out=%d cap=%d", + ctx->id, q->type, + vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q), + vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q)); + return; + } + + /* Release the encoder if both streams are stopped. */ + ret = venc_if_deinit(ctx); + if (ret) + mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret); + + ret = pm_runtime_put(&ctx->dev->plat_dev->dev); + if (ret < 0) + mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", ret); + + ctx->state = MTK_STATE_FREE; + } + + static int vb2ops_venc_buf_out_validate(struct vb2_buffer *vb) + { + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + vbuf->field = V4L2_FIELD_NONE; + return 0; + } + + static const struct vb2_ops mtk_venc_vb2_ops = { + .queue_setup = vb2ops_venc_queue_setup, + .buf_out_validate = vb2ops_venc_buf_out_validate, + .buf_prepare = vb2ops_venc_buf_prepare, + .buf_queue = vb2ops_venc_buf_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .start_streaming = vb2ops_venc_start_streaming, + .stop_streaming = vb2ops_venc_stop_streaming, + }; + + static int mtk_venc_encode_header(void *priv) + { + struct mtk_vcodec_enc_ctx *ctx = priv; + int ret; + struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct mtk_vcodec_mem bs_buf; + struct venc_done_result enc_result; + + dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); + if (!dst_buf) { + mtk_v4l2_venc_dbg(1, ctx, "No dst buffer"); + return -EINVAL; + } + + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length; + + mtk_v4l2_venc_dbg(1, ctx, + "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu", + ctx->id, dst_buf->vb2_buf.index, bs_buf.va, + (u64)bs_buf.dma_addr, bs_buf.size); + + ret = venc_if_encode(ctx, + VENC_START_OPT_ENCODE_SEQUENCE_HEADER, + NULL, &bs_buf, &enc_result); + + if (ret) { + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + ctx->state = MTK_STATE_ABORT; + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); + mtk_v4l2_venc_err(ctx, "venc_if_encode failed=%d", ret); + return -EINVAL; + } + src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); + if (src_buf) { + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; + } else { + mtk_v4l2_venc_err(ctx, "No timestamp for the header buffer."); + } + + ctx->state = MTK_STATE_HEADER; + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_result.bs_size); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); + + return 0; + } + + static int mtk_venc_param_change(struct mtk_vcodec_enc_ctx *ctx) + { + struct venc_enc_param enc_prm; + struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx); + struct mtk_video_enc_buf *mtk_buf; + int ret = 0; + + /* Don't upcast the empty flush buffer */ + if (vb2_v4l2 == &ctx->empty_flush_buf.vb) + return 0; + + mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, m2m_buf.vb); + + memset(&enc_prm, 0, sizeof(enc_prm)); + if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE) + return 0; + + if (mtk_buf->param_change & MTK_ENCODE_PARAM_BITRATE) { + enc_prm.bitrate = mtk_buf->enc_params.bitrate; + mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param br=%d", + ctx->id, vb2_v4l2->vb2_buf.index, enc_prm.bitrate); + ret |= venc_if_set_param(ctx, + VENC_SET_PARAM_ADJUST_BITRATE, + &enc_prm); + } + if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FRAMERATE) { + enc_prm.frm_rate = mtk_buf->enc_params.framerate_num / + mtk_buf->enc_params.framerate_denom; + mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param fr=%d", + ctx->id, vb2_v4l2->vb2_buf.index, enc_prm.frm_rate); + ret |= venc_if_set_param(ctx, + VENC_SET_PARAM_ADJUST_FRAMERATE, + &enc_prm); + } + if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_GOP_SIZE) { + enc_prm.gop_size = mtk_buf->enc_params.gop_size; + mtk_v4l2_venc_dbg(1, ctx, "change param intra period=%d", enc_prm.gop_size); + ret |= venc_if_set_param(ctx, + VENC_SET_PARAM_GOP_SIZE, + &enc_prm); + } + if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) { + mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param force I=%d", + ctx->id, vb2_v4l2->vb2_buf.index, + mtk_buf->enc_params.force_intra); + if (mtk_buf->enc_params.force_intra) + ret |= venc_if_set_param(ctx, + VENC_SET_PARAM_FORCE_INTRA, + NULL); + } + + mtk_buf->param_change = MTK_ENCODE_PARAM_NONE; + + if (ret) { + ctx->state = MTK_STATE_ABORT; + mtk_v4l2_venc_err(ctx, "venc_if_set_param %d failed=%d", + mtk_buf->param_change, ret); + return -1; + } + + return 0; + } + + /* + * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker() + * to call v4l2_m2m_job_finish(). + * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock. + * So this function must not try to acquire dev->dev_mutex. + * This means v4l2 ioctls and mtk_venc_worker() can run at the same time. + * mtk_venc_worker() should be carefully implemented to avoid bugs. + */ + static void mtk_venc_worker(struct work_struct *work) + { + struct mtk_vcodec_enc_ctx *ctx = container_of(work, struct mtk_vcodec_enc_ctx, + encode_work); + struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct venc_frm_buf frm_buf; + struct mtk_vcodec_mem bs_buf; + struct venc_done_result enc_result; + int ret, i; + + /* check dst_buf, dst_buf may be removed in device_run + * to stored encdoe header so we need check dst_buf and + * call job_finish here to prevent recursion + */ + dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); + if (!dst_buf) { + v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx); + return; + } + + src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + + /* + * If we see the flush buffer, send an empty buffer with the LAST flag + * to the client. is_flushing will be reset at the time the buffer + * is dequeued. + */ + if (src_buf == &ctx->empty_flush_buf.vb) { + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + dst_buf->flags |= V4L2_BUF_FLAG_LAST; + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); + v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx); + return; + } + + memset(&frm_buf, 0, sizeof(frm_buf)); + for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) { + frm_buf.fb_addr[i].dma_addr = + vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i); + frm_buf.fb_addr[i].size = + (size_t)src_buf->vb2_buf.planes[i].length; + } + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length; + + mtk_v4l2_venc_dbg(2, ctx, + "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu", + (u64)frm_buf.fb_addr[0].dma_addr, frm_buf.fb_addr[0].size, + (u64)frm_buf.fb_addr[1].dma_addr, frm_buf.fb_addr[1].size, + (u64)frm_buf.fb_addr[2].dma_addr, frm_buf.fb_addr[2].size); + + ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, + &frm_buf, &bs_buf, &enc_result); + + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; + + if (enc_result.is_key_frm) + dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME; + + if (ret) { + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); + mtk_v4l2_venc_err(ctx, "venc_if_encode failed=%d", ret); + } else { + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_result.bs_size); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); + mtk_v4l2_venc_dbg(2, ctx, "venc_if_encode bs size=%d", + enc_result.bs_size); + } + + v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx); + + mtk_v4l2_venc_dbg(1, ctx, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>", + src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret, enc_result.bs_size); + } + + static void m2mops_venc_device_run(void *priv) + { + struct mtk_vcodec_enc_ctx *ctx = priv; + + if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) && + (ctx->state != MTK_STATE_HEADER)) { + /* encode h264 sps/pps header */ + mtk_venc_encode_header(ctx); + queue_work(ctx->dev->encode_workqueue, &ctx->encode_work); + return; + } + + mtk_venc_param_change(ctx); + queue_work(ctx->dev->encode_workqueue, &ctx->encode_work); + } + + static int m2mops_venc_job_ready(void *m2m_priv) + { + struct mtk_vcodec_enc_ctx *ctx = m2m_priv; + + if (ctx->state == MTK_STATE_ABORT || ctx->state == MTK_STATE_FREE) { + mtk_v4l2_venc_dbg(3, ctx, "[%d]Not ready: state=0x%x.", ctx->id, ctx->state); + return 0; + } + + return 1; + } + + static void m2mops_venc_job_abort(void *priv) + { + struct mtk_vcodec_enc_ctx *ctx = priv; + + ctx->state = MTK_STATE_ABORT; + } + + const struct v4l2_m2m_ops mtk_venc_m2m_ops = { + .device_run = m2mops_venc_device_run, + .job_ready = m2mops_venc_job_ready, + .job_abort = m2mops_venc_job_abort, + }; + + void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_enc_ctx *ctx) + { + struct mtk_q_data *q_data; + + ctx->m2m_ctx->q_lock = &ctx->q_mutex; + ctx->fh.m2m_ctx = ctx->m2m_ctx; + ctx->fh.ctrl_handler = &ctx->ctrl_hdl; + INIT_WORK(&ctx->encode_work, mtk_venc_worker); + + ctx->colorspace = V4L2_COLORSPACE_REC709; + ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + ctx->quantization = V4L2_QUANTIZATION_DEFAULT; + ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT; + + q_data = &ctx->q_data[MTK_Q_DATA_SRC]; + memset(q_data, 0, sizeof(struct mtk_q_data)); + q_data->visible_width = DFT_CFG_WIDTH; + q_data->visible_height = DFT_CFG_HEIGHT; + q_data->coded_width = DFT_CFG_WIDTH; + q_data->coded_height = DFT_CFG_HEIGHT; + q_data->field = V4L2_FIELD_NONE; + + q_data->fmt = &ctx->dev->venc_pdata->output_formats[0]; + + v4l_bound_align_image(&q_data->coded_width, + MTK_VENC_MIN_W, + MTK_VENC_HD_MAX_W, 4, + &q_data->coded_height, + MTK_VENC_MIN_H, + MTK_VENC_HD_MAX_H, 5, 6); + + if (q_data->coded_width < DFT_CFG_WIDTH && + (q_data->coded_width + 16) <= MTK_VENC_HD_MAX_W) + q_data->coded_width += 16; + if (q_data->coded_height < DFT_CFG_HEIGHT && + (q_data->coded_height + 32) <= MTK_VENC_HD_MAX_H) + q_data->coded_height += 32; + + q_data->sizeimage[0] = + q_data->coded_width * q_data->coded_height+ + ((ALIGN(q_data->coded_width, 16) * 2) * 16); + q_data->bytesperline[0] = q_data->coded_width; + q_data->sizeimage[1] = + (q_data->coded_width * q_data->coded_height) / 2 + + (ALIGN(q_data->coded_width, 16) * 16); + q_data->bytesperline[1] = q_data->coded_width; + + q_data = &ctx->q_data[MTK_Q_DATA_DST]; + memset(q_data, 0, sizeof(struct mtk_q_data)); + q_data->coded_width = DFT_CFG_WIDTH; + q_data->coded_height = DFT_CFG_HEIGHT; + q_data->fmt = &ctx->dev->venc_pdata->capture_formats[0]; + q_data->field = V4L2_FIELD_NONE; + ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] = + DFT_CFG_WIDTH * DFT_CFG_HEIGHT; + ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] = 0; + + ctx->enc_params.framerate_num = MTK_DEFAULT_FRAMERATE_NUM; + ctx->enc_params.framerate_denom = MTK_DEFAULT_FRAMERATE_DENOM; + } + + int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_enc_ctx *ctx) + { + const struct v4l2_ctrl_ops *ops = &mtk_vcodec_enc_ctrl_ops; + struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl; + u8 h264_max_level; + + if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) + h264_max_level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1; + else + h264_max_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2; + + v4l2_ctrl_handler_init(handler, MTK_MAX_CTRLS_HINT); + + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + 1, 1, 1, 1); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE, + ctx->dev->venc_pdata->min_bitrate, + ctx->dev->venc_pdata->max_bitrate, 1, 4000000); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_B_FRAMES, + 0, 2, 1, 0); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, + 0, 1, 1, 1); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP, + 0, 51, 1, 51); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, + 0, 65535, 1, 0); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE, + 0, 65535, 1, 0); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, + 0, 1, 1, 0); + v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, + 0, 0, 0, 0); + v4l2_ctrl_new_std_menu(handler, ops, + V4L2_CID_MPEG_VIDEO_HEADER_MODE, + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME, + 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); + v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); + v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, + h264_max_level, + 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0); + v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + V4L2_MPEG_VIDEO_VP8_PROFILE_0, 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0); + v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + ~(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR), + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); + + + if (handler->error) { + mtk_v4l2_venc_err(ctx, "Init control handler fail %d", handler->error); + return handler->error; + } + + v4l2_ctrl_handler_setup(&ctx->ctrl_hdl); + + return 0; + } + + int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) + { + struct mtk_vcodec_enc_ctx *ctx = priv; + int ret; + + /* Note: VB2_USERPTR works with dma-contig because mt8173 + * support iommu + * https://patchwork.kernel.org/patch/8335461/ + * https://patchwork.kernel.org/patch/7596181/ + */ + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR; + src_vq->drv_priv = ctx; + src_vq->buf_struct_size = sizeof(struct mtk_video_enc_buf); + src_vq->ops = &mtk_venc_vb2_ops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->lock = &ctx->q_mutex; + src_vq->dev = &ctx->dev->plat_dev->dev; + + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR; + dst_vq->drv_priv = ctx; + dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); + dst_vq->ops = &mtk_venc_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->lock = &ctx->q_mutex; + dst_vq->dev = &ctx->dev->plat_dev->dev; + + return vb2_queue_init(dst_vq); + } + + int mtk_venc_unlock(struct mtk_vcodec_enc_ctx *ctx) + { + struct mtk_vcodec_enc_dev *dev = ctx->dev; + + mutex_unlock(&dev->enc_mutex); + return 0; + } + + int mtk_venc_lock(struct mtk_vcodec_enc_ctx *ctx) + { + struct mtk_vcodec_enc_dev *dev = ctx->dev; + + mutex_lock(&dev->enc_mutex); + return 0; + } + + void mtk_vcodec_enc_release(struct mtk_vcodec_enc_ctx *ctx) + { + int ret = venc_if_deinit(ctx); + + if (ret) + mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret); + + ctx->state = MTK_STATE_FREE; + } diff --cc drivers/media/platform/nxp/imx7-media-csi.c index 791bde67f439c,ffe372d321b6b..15049c6aab372 --- a/drivers/media/platform/nxp/imx7-media-csi.c +++ b/drivers/media/platform/nxp/imx7-media-csi.c @@@ -9,11 -9,9 +9,11 @@@ #include #include #include +#include #include +#include #include - #include + #include #include #include #include diff --cc drivers/media/platform/nxp/imx8mq-mipi-csi2.c index 0000000000000,39f7e86ad5312..ed048f73c982b mode 000000,100644..100644 --- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c +++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c @@@ -1,0 -1,966 +1,965 @@@ + // SPDX-License-Identifier: GPL-2.0 + /* + * NXP i.MX8MQ SoC series MIPI-CSI2 receiver driver + * + * Copyright (C) 2021 Purism SPC + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include -#include + #include + #include + #include + #include + #include + #include + + #include + #include + #include + #include + #include + + #define MIPI_CSI2_DRIVER_NAME "imx8mq-mipi-csi2" + #define MIPI_CSI2_SUBDEV_NAME MIPI_CSI2_DRIVER_NAME + + #define MIPI_CSI2_PAD_SINK 0 + #define MIPI_CSI2_PAD_SOURCE 1 + #define MIPI_CSI2_PADS_NUM 2 + + #define MIPI_CSI2_DEF_PIX_WIDTH 640 + #define MIPI_CSI2_DEF_PIX_HEIGHT 480 + + /* Register map definition */ + + /* i.MX8MQ CSI-2 controller CSR */ + #define CSI2RX_CFG_NUM_LANES 0x100 + #define CSI2RX_CFG_DISABLE_DATA_LANES 0x104 + #define CSI2RX_BIT_ERR 0x108 + #define CSI2RX_IRQ_STATUS 0x10c + #define CSI2RX_IRQ_MASK 0x110 + #define CSI2RX_IRQ_MASK_ALL 0x1ff + #define CSI2RX_IRQ_MASK_ULPS_STATUS_CHANGE 0x8 + #define CSI2RX_ULPS_STATUS 0x114 + #define CSI2RX_PPI_ERRSOT_HS 0x118 + #define CSI2RX_PPI_ERRSOTSYNC_HS 0x11c + #define CSI2RX_PPI_ERRESC 0x120 + #define CSI2RX_PPI_ERRSYNCESC 0x124 + #define CSI2RX_PPI_ERRCONTROL 0x128 + #define CSI2RX_CFG_DISABLE_PAYLOAD_0 0x12c + #define CSI2RX_CFG_VID_VC_IGNORE 0x180 + #define CSI2RX_CFG_VID_VC 0x184 + #define CSI2RX_CFG_VID_P_FIFO_SEND_LEVEL 0x188 + #define CSI2RX_CFG_DISABLE_PAYLOAD_1 0x130 + + enum { + ST_POWERED = 1, + ST_STREAMING = 2, + ST_SUSPENDED = 4, + }; + + enum imx8mq_mipi_csi_clk { + CSI2_CLK_CORE, + CSI2_CLK_ESC, + CSI2_CLK_UI, + CSI2_NUM_CLKS, + }; + + static const char * const imx8mq_mipi_csi_clk_id[CSI2_NUM_CLKS] = { + [CSI2_CLK_CORE] = "core", + [CSI2_CLK_ESC] = "esc", + [CSI2_CLK_UI] = "ui", + }; + + #define CSI2_NUM_CLKS ARRAY_SIZE(imx8mq_mipi_csi_clk_id) + + #define GPR_CSI2_1_RX_ENABLE BIT(13) + #define GPR_CSI2_1_VID_INTFC_ENB BIT(12) + #define GPR_CSI2_1_HSEL BIT(10) + #define GPR_CSI2_1_CONT_CLK_MODE BIT(8) + #define GPR_CSI2_1_S_PRG_RXHS_SETTLE(x) (((x) & 0x3f) << 2) + + /* + * The send level configures the number of entries that must accumulate in + * the Pixel FIFO before the data will be transferred to the video output. + * The exact value needed for this configuration is dependent on the rate at + * which the sensor transfers data to the CSI-2 Controller and the user + * video clock. + * + * The calculation is the classical rate-in rate-out type of problem: If the + * video bandwidth is 10% faster than the incoming mipi data and the video + * line length is 500 pixels, then the fifo should be allowed to fill + * 10% of the line length or 50 pixels. If the gap data is ok, then the level + * can be set to 16 and ignored. + */ + #define CSI2RX_SEND_LEVEL 64 + + struct csi_state { + struct device *dev; + void __iomem *regs; + struct clk_bulk_data clks[CSI2_NUM_CLKS]; + struct reset_control *rst; + struct regulator *mipi_phy_regulator; + + struct v4l2_subdev sd; + struct media_pad pads[MIPI_CSI2_PADS_NUM]; + struct v4l2_async_notifier notifier; + struct v4l2_subdev *src_sd; + + struct v4l2_mbus_config_mipi_csi2 bus; + + struct mutex lock; /* Protect state */ + u32 state; + + struct regmap *phy_gpr; + u8 phy_gpr_reg; + + struct icc_path *icc_path; + s32 icc_path_bw; + }; + + /* ----------------------------------------------------------------------------- + * Format helpers + */ + + struct csi2_pix_format { + u32 code; + u8 width; + }; + + static const struct csi2_pix_format imx8mq_mipi_csi_formats[] = { + /* RAW (Bayer and greyscale) formats. */ + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .width = 8, + }, { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .width = 8, + }, { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .width = 8, + }, { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .width = 8, + }, { + .code = MEDIA_BUS_FMT_Y8_1X8, + .width = 8, + }, { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .width = 10, + }, { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .width = 10, + }, { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .width = 10, + }, { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .width = 10, + }, { + .code = MEDIA_BUS_FMT_Y10_1X10, + .width = 10, + }, { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .width = 12, + }, { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .width = 12, + }, { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .width = 12, + }, { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .width = 12, + }, { + .code = MEDIA_BUS_FMT_Y12_1X12, + .width = 12, + }, { + .code = MEDIA_BUS_FMT_SBGGR14_1X14, + .width = 14, + }, { + .code = MEDIA_BUS_FMT_SGBRG14_1X14, + .width = 14, + }, { + .code = MEDIA_BUS_FMT_SGRBG14_1X14, + .width = 14, + }, { + .code = MEDIA_BUS_FMT_SRGGB14_1X14, + .width = 14, + }, + /* YUV formats */ + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .width = 16, + }, { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .width = 16, + } + }; + + static const struct csi2_pix_format *find_csi2_format(u32 code) + { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(imx8mq_mipi_csi_formats); i++) + if (code == imx8mq_mipi_csi_formats[i].code) + return &imx8mq_mipi_csi_formats[i]; + return NULL; + } + + /* ----------------------------------------------------------------------------- + * Hardware configuration + */ + + static inline void imx8mq_mipi_csi_write(struct csi_state *state, u32 reg, u32 val) + { + writel(val, state->regs + reg); + } + + static int imx8mq_mipi_csi_sw_reset(struct csi_state *state) + { + int ret; + + /* + * these are most likely self-clearing reset bits. to make it + * more clear, the reset-imx7 driver should implement the + * .reset() operation. + */ + ret = reset_control_assert(state->rst); + if (ret < 0) { + dev_err(state->dev, "Failed to assert resets: %d\n", ret); + return ret; + } + + return 0; + } + + static void imx8mq_mipi_csi_set_params(struct csi_state *state) + { + int lanes = state->bus.num_data_lanes; + + imx8mq_mipi_csi_write(state, CSI2RX_CFG_NUM_LANES, lanes - 1); + imx8mq_mipi_csi_write(state, CSI2RX_CFG_DISABLE_DATA_LANES, + (0xf << lanes) & 0xf); + imx8mq_mipi_csi_write(state, CSI2RX_IRQ_MASK, CSI2RX_IRQ_MASK_ALL); + /* + * 0x180 bit 0 controls the Virtual Channel behaviour: when set the + * interface ignores the Virtual Channel (VC) field in received packets; + * when cleared it causes the interface to only accept packets whose VC + * matches the value to which VC is set at offset 0x184. + */ + imx8mq_mipi_csi_write(state, CSI2RX_CFG_VID_VC_IGNORE, 1); + imx8mq_mipi_csi_write(state, CSI2RX_CFG_VID_P_FIFO_SEND_LEVEL, + CSI2RX_SEND_LEVEL); + } + + static int imx8mq_mipi_csi_clk_enable(struct csi_state *state) + { + return clk_bulk_prepare_enable(CSI2_NUM_CLKS, state->clks); + } + + static void imx8mq_mipi_csi_clk_disable(struct csi_state *state) + { + clk_bulk_disable_unprepare(CSI2_NUM_CLKS, state->clks); + } + + static int imx8mq_mipi_csi_clk_get(struct csi_state *state) + { + unsigned int i; + + for (i = 0; i < CSI2_NUM_CLKS; i++) + state->clks[i].id = imx8mq_mipi_csi_clk_id[i]; + + return devm_clk_bulk_get(state->dev, CSI2_NUM_CLKS, state->clks); + } + + static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state, + struct v4l2_subdev_state *sd_state, + u32 *hs_settle) + { + s64 link_freq; + u32 lane_rate; + unsigned long esc_clk_rate; + u32 min_ths_settle, max_ths_settle, ths_settle_ns, esc_clk_period_ns; + const struct v4l2_mbus_framefmt *fmt; + const struct csi2_pix_format *csi2_fmt; + + /* Calculate the line rate from the pixel rate. */ + + fmt = v4l2_subdev_get_pad_format(&state->sd, sd_state, MIPI_CSI2_PAD_SINK); + csi2_fmt = find_csi2_format(fmt->code); + + link_freq = v4l2_get_link_freq(state->src_sd->ctrl_handler, + csi2_fmt->width, + state->bus.num_data_lanes * 2); + if (link_freq < 0) { + dev_err(state->dev, "Unable to obtain link frequency: %d\n", + (int)link_freq); + return link_freq; + } + + lane_rate = link_freq * 2; + if (lane_rate < 80000000 || lane_rate > 1500000000) { + dev_dbg(state->dev, "Out-of-bound lane rate %u\n", lane_rate); + return -EINVAL; + } + + /* + * The D-PHY specification requires Ths-settle to be in the range + * 85ns + 6*UI to 140ns + 10*UI, with the unit interval UI being half + * the clock period. + * + * The Ths-settle value is expressed in the hardware as a multiple of + * the Esc clock period: + * + * Ths-settle = (PRG_RXHS_SETTLE + 1) * Tperiod of RxClkInEsc + * + * Due to the one cycle inaccuracy introduced by rounding, the + * documentation recommends picking a value away from the boundaries. + * Let's pick the average. + */ + esc_clk_rate = clk_get_rate(state->clks[CSI2_CLK_ESC].clk); + if (!esc_clk_rate) { + dev_err(state->dev, "Could not get esc clock rate.\n"); + return -EINVAL; + } + + dev_dbg(state->dev, "esc clk rate: %lu\n", esc_clk_rate); + esc_clk_period_ns = 1000000000 / esc_clk_rate; + + min_ths_settle = 85 + 6 * 1000000 / (lane_rate / 1000); + max_ths_settle = 140 + 10 * 1000000 / (lane_rate / 1000); + ths_settle_ns = (min_ths_settle + max_ths_settle) / 2; + + *hs_settle = ths_settle_ns / esc_clk_period_ns - 1; + + dev_dbg(state->dev, "lane rate %u Ths_settle %u hs_settle %u\n", + lane_rate, ths_settle_ns, *hs_settle); + + return 0; + } + + static int imx8mq_mipi_csi_start_stream(struct csi_state *state, + struct v4l2_subdev_state *sd_state) + { + int ret; + u32 hs_settle = 0; + + ret = imx8mq_mipi_csi_sw_reset(state); + if (ret) + return ret; + + imx8mq_mipi_csi_set_params(state); + ret = imx8mq_mipi_csi_calc_hs_settle(state, sd_state, &hs_settle); + if (ret) + return ret; + + regmap_update_bits(state->phy_gpr, + state->phy_gpr_reg, + 0x3fff, + GPR_CSI2_1_RX_ENABLE | + GPR_CSI2_1_VID_INTFC_ENB | + GPR_CSI2_1_HSEL | + GPR_CSI2_1_CONT_CLK_MODE | + GPR_CSI2_1_S_PRG_RXHS_SETTLE(hs_settle)); + + return 0; + } + + static void imx8mq_mipi_csi_stop_stream(struct csi_state *state) + { + imx8mq_mipi_csi_write(state, CSI2RX_CFG_DISABLE_DATA_LANES, 0xf); + } + + /* ----------------------------------------------------------------------------- + * V4L2 subdev operations + */ + + static struct csi_state *mipi_sd_to_csi2_state(struct v4l2_subdev *sdev) + { + return container_of(sdev, struct csi_state, sd); + } + + static int imx8mq_mipi_csi_s_stream(struct v4l2_subdev *sd, int enable) + { + struct csi_state *state = mipi_sd_to_csi2_state(sd); + struct v4l2_subdev_state *sd_state; + int ret = 0; + + if (enable) { + ret = pm_runtime_resume_and_get(state->dev); + if (ret < 0) + return ret; + } + + mutex_lock(&state->lock); + + if (enable) { + if (state->state & ST_SUSPENDED) { + ret = -EBUSY; + goto unlock; + } + + sd_state = v4l2_subdev_lock_and_get_active_state(sd); + ret = imx8mq_mipi_csi_start_stream(state, sd_state); + v4l2_subdev_unlock_state(sd_state); + + if (ret < 0) + goto unlock; + + ret = v4l2_subdev_call(state->src_sd, video, s_stream, 1); + if (ret < 0) + goto unlock; + + state->state |= ST_STREAMING; + } else { + v4l2_subdev_call(state->src_sd, video, s_stream, 0); + imx8mq_mipi_csi_stop_stream(state); + state->state &= ~ST_STREAMING; + } + + unlock: + mutex_unlock(&state->lock); + + if (!enable || ret < 0) + pm_runtime_put(state->dev); + + return ret; + } + + static int imx8mq_mipi_csi_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state) + { + struct v4l2_mbus_framefmt *fmt_sink; + struct v4l2_mbus_framefmt *fmt_source; + + fmt_sink = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SINK); + fmt_source = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SOURCE); + + fmt_sink->code = MEDIA_BUS_FMT_SGBRG10_1X10; + fmt_sink->width = MIPI_CSI2_DEF_PIX_WIDTH; + fmt_sink->height = MIPI_CSI2_DEF_PIX_HEIGHT; + fmt_sink->field = V4L2_FIELD_NONE; + + fmt_sink->colorspace = V4L2_COLORSPACE_RAW; + fmt_sink->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt_sink->colorspace); + fmt_sink->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt_sink->colorspace); + fmt_sink->quantization = + V4L2_MAP_QUANTIZATION_DEFAULT(false, fmt_sink->colorspace, + fmt_sink->ycbcr_enc); + + *fmt_source = *fmt_sink; + + return 0; + } + + static int imx8mq_mipi_csi_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) + { + /* + * We can't transcode in any way, the source format is identical + * to the sink format. + */ + if (code->pad == MIPI_CSI2_PAD_SOURCE) { + struct v4l2_mbus_framefmt *fmt; + + if (code->index > 0) + return -EINVAL; + + fmt = v4l2_subdev_get_pad_format(sd, sd_state, code->pad); + code->code = fmt->code; + return 0; + } + + if (code->pad != MIPI_CSI2_PAD_SINK) + return -EINVAL; + + if (code->index >= ARRAY_SIZE(imx8mq_mipi_csi_formats)) + return -EINVAL; + + code->code = imx8mq_mipi_csi_formats[code->index].code; + + return 0; + } + + static int imx8mq_mipi_csi_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *sdformat) + { + const struct csi2_pix_format *csi2_fmt; + struct v4l2_mbus_framefmt *fmt; + + /* + * The device can't transcode in any way, the source format can't be + * modified. + */ + if (sdformat->pad == MIPI_CSI2_PAD_SOURCE) + return v4l2_subdev_get_fmt(sd, sd_state, sdformat); + + if (sdformat->pad != MIPI_CSI2_PAD_SINK) + return -EINVAL; + + csi2_fmt = find_csi2_format(sdformat->format.code); + if (!csi2_fmt) + csi2_fmt = &imx8mq_mipi_csi_formats[0]; + + fmt = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad); + + fmt->code = csi2_fmt->code; + fmt->width = sdformat->format.width; + fmt->height = sdformat->format.height; + + sdformat->format = *fmt; + + /* Propagate the format from sink to source. */ + fmt = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SOURCE); + *fmt = sdformat->format; + + return 0; + } + + static const struct v4l2_subdev_video_ops imx8mq_mipi_csi_video_ops = { + .s_stream = imx8mq_mipi_csi_s_stream, + }; + + static const struct v4l2_subdev_pad_ops imx8mq_mipi_csi_pad_ops = { + .init_cfg = imx8mq_mipi_csi_init_cfg, + .enum_mbus_code = imx8mq_mipi_csi_enum_mbus_code, + .get_fmt = v4l2_subdev_get_fmt, + .set_fmt = imx8mq_mipi_csi_set_fmt, + }; + + static const struct v4l2_subdev_ops imx8mq_mipi_csi_subdev_ops = { + .video = &imx8mq_mipi_csi_video_ops, + .pad = &imx8mq_mipi_csi_pad_ops, + }; + + /* ----------------------------------------------------------------------------- + * Media entity operations + */ + + static const struct media_entity_operations imx8mq_mipi_csi_entity_ops = { + .link_validate = v4l2_subdev_link_validate, + .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1, + }; + + /* ----------------------------------------------------------------------------- + * Async subdev notifier + */ + + static struct csi_state * + mipi_notifier_to_csi2_state(struct v4l2_async_notifier *n) + { + return container_of(n, struct csi_state, notifier); + } + + static int imx8mq_mipi_csi_notify_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_connection *asd) + { + struct csi_state *state = mipi_notifier_to_csi2_state(notifier); + struct media_pad *sink = &state->sd.entity.pads[MIPI_CSI2_PAD_SINK]; + + state->src_sd = sd; + + return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + } + + static const struct v4l2_async_notifier_operations imx8mq_mipi_csi_notify_ops = { + .bound = imx8mq_mipi_csi_notify_bound, + }; + + static int imx8mq_mipi_csi_async_register(struct csi_state *state) + { + struct v4l2_fwnode_endpoint vep = { + .bus_type = V4L2_MBUS_CSI2_DPHY, + }; + struct v4l2_async_connection *asd; + struct fwnode_handle *ep; + unsigned int i; + int ret; + + v4l2_async_subdev_nf_init(&state->notifier, &state->sd); + + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0, + FWNODE_GRAPH_ENDPOINT_NEXT); + if (!ep) + return -ENOTCONN; + + ret = v4l2_fwnode_endpoint_parse(ep, &vep); + if (ret) + goto err_parse; + + for (i = 0; i < vep.bus.mipi_csi2.num_data_lanes; ++i) { + if (vep.bus.mipi_csi2.data_lanes[i] != i + 1) { + dev_err(state->dev, + "data lanes reordering is not supported"); + ret = -EINVAL; + goto err_parse; + } + } + + state->bus = vep.bus.mipi_csi2; + + dev_dbg(state->dev, "data lanes: %d flags: 0x%08x\n", + state->bus.num_data_lanes, + state->bus.flags); + + asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep, + struct v4l2_async_connection); + if (IS_ERR(asd)) { + ret = PTR_ERR(asd); + goto err_parse; + } + + fwnode_handle_put(ep); + + state->notifier.ops = &imx8mq_mipi_csi_notify_ops; + + ret = v4l2_async_nf_register(&state->notifier); + if (ret) + return ret; + + return v4l2_async_register_subdev(&state->sd); + + err_parse: + fwnode_handle_put(ep); + + return ret; + } + + /* ----------------------------------------------------------------------------- + * Suspend/resume + */ + + static void imx8mq_mipi_csi_pm_suspend(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + mutex_lock(&state->lock); + + if (state->state & ST_POWERED) { + imx8mq_mipi_csi_stop_stream(state); + imx8mq_mipi_csi_clk_disable(state); + state->state &= ~ST_POWERED; + } + + mutex_unlock(&state->lock); + } + + static int imx8mq_mipi_csi_pm_resume(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + struct v4l2_subdev_state *sd_state; + int ret = 0; + + mutex_lock(&state->lock); + + if (!(state->state & ST_POWERED)) { + state->state |= ST_POWERED; + ret = imx8mq_mipi_csi_clk_enable(state); + } + if (state->state & ST_STREAMING) { + sd_state = v4l2_subdev_lock_and_get_active_state(sd); + ret = imx8mq_mipi_csi_start_stream(state, sd_state); + v4l2_subdev_unlock_state(sd_state); + if (ret) + goto unlock; + } + + state->state &= ~ST_SUSPENDED; + + unlock: + mutex_unlock(&state->lock); + + return ret ? -EAGAIN : 0; + } + + static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + imx8mq_mipi_csi_pm_suspend(dev); + + state->state |= ST_SUSPENDED; + + return 0; + } + + static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + if (!(state->state & ST_SUSPENDED)) + return 0; + + return imx8mq_mipi_csi_pm_resume(dev); + } + + static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + int ret; + + imx8mq_mipi_csi_pm_suspend(dev); + + ret = icc_set_bw(state->icc_path, 0, 0); + if (ret) + dev_err(dev, "icc_set_bw failed with %d\n", ret); + + return ret; + } + + static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev) + { + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + int ret; + + ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw); + if (ret) { + dev_err(dev, "icc_set_bw failed with %d\n", ret); + return ret; + } + + return imx8mq_mipi_csi_pm_resume(dev); + } + + static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = { + SET_RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend, + imx8mq_mipi_csi_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume) + }; + + /* ----------------------------------------------------------------------------- + * Probe/remove & platform driver + */ + + static int imx8mq_mipi_csi_subdev_init(struct csi_state *state) + { + struct v4l2_subdev *sd = &state->sd; + int ret; + + v4l2_subdev_init(sd, &imx8mq_mipi_csi_subdev_ops); + sd->owner = THIS_MODULE; + snprintf(sd->name, sizeof(sd->name), "%s %s", + MIPI_CSI2_SUBDEV_NAME, dev_name(state->dev)); + + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + + sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + sd->entity.ops = &imx8mq_mipi_csi_entity_ops; + + sd->dev = state->dev; + + state->pads[MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + state->pads[MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE + | MEDIA_PAD_FL_MUST_CONNECT; + ret = media_entity_pads_init(&sd->entity, MIPI_CSI2_PADS_NUM, + state->pads); + if (ret) + return ret; + + ret = v4l2_subdev_init_finalize(sd); + if (ret) { + media_entity_cleanup(&sd->entity); + return ret; + } + + return 0; + } + + static void imx8mq_mipi_csi_release_icc(struct platform_device *pdev) + { + struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + icc_put(state->icc_path); + } + + static int imx8mq_mipi_csi_init_icc(struct platform_device *pdev) + { + struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + /* Optional interconnect request */ + state->icc_path = of_icc_get(&pdev->dev, "dram"); + if (IS_ERR_OR_NULL(state->icc_path)) + return PTR_ERR_OR_ZERO(state->icc_path); + + state->icc_path_bw = MBps_to_icc(700); + + return 0; + } + + static int imx8mq_mipi_csi_parse_dt(struct csi_state *state) + { + struct device *dev = state->dev; + struct device_node *np = state->dev->of_node; + struct device_node *node; + phandle ph; + u32 out_val[2]; + int ret = 0; + + state->rst = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(state->rst)) { + dev_err(dev, "Failed to get reset: %pe\n", state->rst); + return PTR_ERR(state->rst); + } + + ret = of_property_read_u32_array(np, "fsl,mipi-phy-gpr", out_val, + ARRAY_SIZE(out_val)); + if (ret) { + dev_err(dev, "no fsl,mipi-phy-gpr property found: %d\n", ret); + return ret; + } + + ph = *out_val; + + node = of_find_node_by_phandle(ph); + if (!node) { + dev_err(dev, "Error finding node by phandle\n"); + return -ENODEV; + } + state->phy_gpr = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(state->phy_gpr)) { + dev_err(dev, "failed to get gpr regmap: %pe\n", state->phy_gpr); + return PTR_ERR(state->phy_gpr); + } + + state->phy_gpr_reg = out_val[1]; + dev_dbg(dev, "phy gpr register set to 0x%x\n", state->phy_gpr_reg); + + return ret; + } + + static int imx8mq_mipi_csi_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct csi_state *state; + int ret; + + state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->dev = dev; + + ret = imx8mq_mipi_csi_parse_dt(state); + if (ret < 0) { + dev_err(dev, "Failed to parse device tree: %d\n", ret); + return ret; + } + + /* Acquire resources. */ + state->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(state->regs)) + return PTR_ERR(state->regs); + + ret = imx8mq_mipi_csi_clk_get(state); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, &state->sd); + + mutex_init(&state->lock); + + ret = imx8mq_mipi_csi_subdev_init(state); + if (ret < 0) + goto mutex; + + ret = imx8mq_mipi_csi_init_icc(pdev); + if (ret) + goto mutex; + + /* Enable runtime PM. */ + pm_runtime_enable(dev); + if (!pm_runtime_enabled(dev)) { + ret = imx8mq_mipi_csi_runtime_resume(dev); + if (ret < 0) + goto icc; + } + + ret = imx8mq_mipi_csi_async_register(state); + if (ret < 0) + goto cleanup; + + return 0; + + cleanup: + pm_runtime_disable(&pdev->dev); + imx8mq_mipi_csi_runtime_suspend(&pdev->dev); + + media_entity_cleanup(&state->sd.entity); + v4l2_subdev_cleanup(&state->sd); + v4l2_async_nf_unregister(&state->notifier); + v4l2_async_nf_cleanup(&state->notifier); + v4l2_async_unregister_subdev(&state->sd); + icc: + imx8mq_mipi_csi_release_icc(pdev); + mutex: + mutex_destroy(&state->lock); + + return ret; + } + + static void imx8mq_mipi_csi_remove(struct platform_device *pdev) + { + struct v4l2_subdev *sd = platform_get_drvdata(pdev); + struct csi_state *state = mipi_sd_to_csi2_state(sd); + + v4l2_async_nf_unregister(&state->notifier); + v4l2_async_nf_cleanup(&state->notifier); + v4l2_async_unregister_subdev(&state->sd); + + pm_runtime_disable(&pdev->dev); + imx8mq_mipi_csi_runtime_suspend(&pdev->dev); + media_entity_cleanup(&state->sd.entity); + v4l2_subdev_cleanup(&state->sd); + mutex_destroy(&state->lock); + pm_runtime_set_suspended(&pdev->dev); + imx8mq_mipi_csi_release_icc(pdev); + } + + static const struct of_device_id imx8mq_mipi_csi_of_match[] = { + { .compatible = "fsl,imx8mq-mipi-csi2", }, + { /* sentinel */ }, + }; + MODULE_DEVICE_TABLE(of, imx8mq_mipi_csi_of_match); + + static struct platform_driver imx8mq_mipi_csi_driver = { + .probe = imx8mq_mipi_csi_probe, + .remove_new = imx8mq_mipi_csi_remove, + .driver = { + .of_match_table = imx8mq_mipi_csi_of_match, + .name = MIPI_CSI2_DRIVER_NAME, + .pm = &imx8mq_mipi_csi_pm_ops, + }, + }; + + module_platform_driver(imx8mq_mipi_csi_driver); + + MODULE_DESCRIPTION("i.MX8MQ MIPI CSI-2 receiver driver"); + MODULE_AUTHOR("Martin Kepplinger "); + MODULE_LICENSE("GPL v2"); + MODULE_ALIAS("platform:imx8mq-mipi-csi2");