1 // SPDX-License-Identifier: GPL-2.0
3 * Hantro VPU codec driver
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/clk.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
27 #include "hantro_v4l2.h"
29 #include "hantro_hw.h"
31 #define DRIVER_NAME "hantro-vpu"
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
40 struct v4l2_ctrl *ctrl;
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
52 index = vb2_find_timestamp(q, ts, 0);
55 buf = vb2_get_buffer(q, index);
56 return hantro_get_dec_buf_addr(ctx, buf);
59 static void hantro_job_finish(struct hantro_dev *vpu,
60 struct hantro_ctx *ctx,
61 enum vb2_buffer_state result)
63 struct vb2_v4l2_buffer *src, *dst;
65 pm_runtime_mark_last_busy(vpu->dev);
66 pm_runtime_put_autosuspend(vpu->dev);
67 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
69 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
70 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
77 src->sequence = ctx->sequence_out++;
78 dst->sequence = ctx->sequence_cap++;
80 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
84 void hantro_irq_done(struct hantro_dev *vpu,
85 enum vb2_buffer_state result)
87 struct hantro_ctx *ctx =
88 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
91 * If cancel_delayed_work returns false
92 * the timeout expired. The watchdog is running,
93 * and will take care of finishing the job.
95 if (cancel_delayed_work(&vpu->watchdog_work)) {
96 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
97 ctx->codec_ops->done(ctx);
98 hantro_job_finish(vpu, ctx, result);
102 void hantro_watchdog(struct work_struct *work)
104 struct hantro_dev *vpu;
105 struct hantro_ctx *ctx;
107 vpu = container_of(to_delayed_work(work),
108 struct hantro_dev, watchdog_work);
109 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
111 vpu_err("frame processing timed out!\n");
112 ctx->codec_ops->reset(ctx);
113 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
117 void hantro_start_prepare_run(struct hantro_ctx *ctx)
119 struct vb2_v4l2_buffer *src_buf;
121 src_buf = hantro_get_src_buf(ctx);
122 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
125 if (!ctx->is_encoder) {
126 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
127 hantro_postproc_enable(ctx);
129 hantro_postproc_disable(ctx);
133 void hantro_end_prepare_run(struct hantro_ctx *ctx)
135 struct vb2_v4l2_buffer *src_buf;
137 src_buf = hantro_get_src_buf(ctx);
138 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
141 /* Kick the watchdog. */
142 schedule_delayed_work(&ctx->dev->watchdog_work,
143 msecs_to_jiffies(2000));
146 static void device_run(void *priv)
148 struct hantro_ctx *ctx = priv;
149 struct vb2_v4l2_buffer *src, *dst;
152 src = hantro_get_src_buf(ctx);
153 dst = hantro_get_dst_buf(ctx);
155 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
158 ret = pm_runtime_get_sync(ctx->dev->dev);
162 v4l2_m2m_buf_copy_metadata(src, dst, true);
164 ctx->codec_ops->run(ctx);
168 hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
171 static struct v4l2_m2m_ops vpu_m2m_ops = {
172 .device_run = device_run,
176 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
178 struct hantro_ctx *ctx = priv;
181 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
182 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
183 src_vq->drv_priv = ctx;
184 src_vq->ops = &hantro_queue_ops;
185 src_vq->mem_ops = &vb2_dma_contig_memops;
188 * Driver does mostly sequential access, so sacrifice TLB efficiency
189 * for faster allocation. Also, no CPU access on the source queue,
190 * so no kernel mapping needed.
192 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
193 DMA_ATTR_NO_KERNEL_MAPPING;
194 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
195 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
196 src_vq->lock = &ctx->dev->vpu_mutex;
197 src_vq->dev = ctx->dev->v4l2_dev.dev;
198 src_vq->supports_requests = true;
200 ret = vb2_queue_init(src_vq);
205 * When encoding, the CAPTURE queue doesn't need dma memory,
206 * as the CPU needs to create the JPEG frames, from the
207 * hardware-produced JPEG payload.
209 * For the DMA destination buffer, we use a bounce buffer.
211 if (ctx->is_encoder) {
212 dst_vq->mem_ops = &vb2_vmalloc_memops;
214 dst_vq->bidirectional = true;
215 dst_vq->mem_ops = &vb2_dma_contig_memops;
216 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
217 DMA_ATTR_NO_KERNEL_MAPPING;
220 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
221 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
222 dst_vq->drv_priv = ctx;
223 dst_vq->ops = &hantro_queue_ops;
224 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
225 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
226 dst_vq->lock = &ctx->dev->vpu_mutex;
227 dst_vq->dev = ctx->dev->v4l2_dev.dev;
229 return vb2_queue_init(dst_vq);
232 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
234 if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_SPS) {
235 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
237 if (sps->chroma_format_idc > 1)
238 /* Only 4:0:0 and 4:2:0 are supported */
240 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
241 /* Luma and chroma bit depth mismatch */
243 if (sps->bit_depth_luma_minus8 != 0)
244 /* Only 8-bit is supported */
250 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
252 struct hantro_ctx *ctx;
254 ctx = container_of(ctrl->handler,
255 struct hantro_ctx, ctrl_handler);
257 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
260 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
261 ctx->jpeg_quality = ctrl->val;
270 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
271 .try_ctrl = hantro_try_ctrl,
274 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
275 .s_ctrl = hantro_jpeg_s_ctrl,
278 static const struct hantro_ctrl controls[] = {
280 .codec = HANTRO_JPEG_ENCODER,
282 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
287 .ops = &hantro_jpeg_ctrl_ops,
290 .codec = HANTRO_MPEG2_DECODER,
292 .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
295 .codec = HANTRO_MPEG2_DECODER,
297 .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
300 .codec = HANTRO_VP8_DECODER,
302 .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
305 .codec = HANTRO_H264_DECODER,
307 .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
310 .codec = HANTRO_H264_DECODER,
312 .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
313 .ops = &hantro_ctrl_ops,
316 .codec = HANTRO_H264_DECODER,
318 .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
321 .codec = HANTRO_H264_DECODER,
323 .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
326 .codec = HANTRO_H264_DECODER,
328 .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
329 .min = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
330 .def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
331 .max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
334 .codec = HANTRO_H264_DECODER,
336 .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
337 .min = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
338 .def = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
339 .max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
342 .codec = HANTRO_H264_DECODER,
344 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
345 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
346 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
348 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
349 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
355 static int hantro_ctrls_setup(struct hantro_dev *vpu,
356 struct hantro_ctx *ctx,
359 int i, num_ctrls = ARRAY_SIZE(controls);
361 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
363 for (i = 0; i < num_ctrls; i++) {
364 if (!(allowed_codecs & controls[i].codec))
367 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
368 &controls[i].cfg, NULL);
369 if (ctx->ctrl_handler.error) {
370 vpu_err("Adding control (%d) failed %d\n",
372 ctx->ctrl_handler.error);
373 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
374 return ctx->ctrl_handler.error;
377 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
381 * V4L2 file operations.
384 static int hantro_open(struct file *filp)
386 struct hantro_dev *vpu = video_drvdata(filp);
387 struct video_device *vdev = video_devdata(filp);
388 struct hantro_func *func = hantro_vdev_to_func(vdev);
389 struct hantro_ctx *ctx;
390 int allowed_codecs, ret;
393 * We do not need any extra locking here, because we operate only
394 * on local data here, except reading few fields from dev, which
395 * do not change through device's lifetime (which is guaranteed by
396 * reference on module from open()) and V4L2 internal objects (such
397 * as vdev and ctx->fh), which have proper locking done in respective
398 * helper functions used here.
401 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
406 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
407 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
408 ctx->is_encoder = true;
409 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
410 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
411 ctx->is_encoder = false;
417 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
418 if (IS_ERR(ctx->fh.m2m_ctx)) {
419 ret = PTR_ERR(ctx->fh.m2m_ctx);
423 v4l2_fh_init(&ctx->fh, vdev);
424 filp->private_data = &ctx->fh;
425 v4l2_fh_add(&ctx->fh);
427 hantro_reset_fmts(ctx);
429 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
431 vpu_err("Failed to set up controls\n");
434 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
439 v4l2_fh_del(&ctx->fh);
440 v4l2_fh_exit(&ctx->fh);
446 static int hantro_release(struct file *filp)
448 struct hantro_ctx *ctx =
449 container_of(filp->private_data, struct hantro_ctx, fh);
452 * No need for extra locking because this was the last reference
455 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
456 v4l2_fh_del(&ctx->fh);
457 v4l2_fh_exit(&ctx->fh);
458 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
464 static const struct v4l2_file_operations hantro_fops = {
465 .owner = THIS_MODULE,
467 .release = hantro_release,
468 .poll = v4l2_m2m_fop_poll,
469 .unlocked_ioctl = video_ioctl2,
470 .mmap = v4l2_m2m_fop_mmap,
473 static const struct of_device_id of_hantro_match[] = {
474 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
475 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
476 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
477 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
479 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
480 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
484 MODULE_DEVICE_TABLE(of, of_hantro_match);
486 static int hantro_register_entity(struct media_device *mdev,
487 struct media_entity *entity,
488 const char *entity_name,
489 struct media_pad *pads, int num_pads,
490 int function, struct video_device *vdev)
495 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
496 if (function == MEDIA_ENT_F_IO_V4L) {
497 entity->info.dev.major = VIDEO_MAJOR;
498 entity->info.dev.minor = vdev->minor;
501 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
507 entity->function = function;
509 ret = media_entity_pads_init(entity, num_pads, pads);
513 ret = media_device_register_entity(mdev, entity);
520 static int hantro_attach_func(struct hantro_dev *vpu,
521 struct hantro_func *func)
523 struct media_device *mdev = &vpu->mdev;
524 struct media_link *link;
527 /* Create the three encoder entities with their pads */
528 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
529 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
530 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
535 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
536 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
537 ret = hantro_register_entity(mdev, &func->proc, "proc",
538 func->proc_pads, 2, func->id,
541 goto err_rel_entity0;
543 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
544 ret = hantro_register_entity(mdev, &func->sink, "sink",
545 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
548 goto err_rel_entity1;
550 /* Connect the three entities */
551 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
552 MEDIA_LNK_FL_IMMUTABLE |
553 MEDIA_LNK_FL_ENABLED);
555 goto err_rel_entity2;
557 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
558 MEDIA_LNK_FL_IMMUTABLE |
559 MEDIA_LNK_FL_ENABLED);
563 /* Create video interface */
564 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
567 if (!func->intf_devnode) {
572 /* Connect the two DMA engines to the interface */
573 link = media_create_intf_link(&func->vdev.entity,
574 &func->intf_devnode->intf,
575 MEDIA_LNK_FL_IMMUTABLE |
576 MEDIA_LNK_FL_ENABLED);
582 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
583 MEDIA_LNK_FL_IMMUTABLE |
584 MEDIA_LNK_FL_ENABLED);
592 media_devnode_remove(func->intf_devnode);
595 media_entity_remove_links(&func->sink);
598 media_entity_remove_links(&func->proc);
599 media_entity_remove_links(&func->vdev.entity);
602 media_device_unregister_entity(&func->sink);
605 media_device_unregister_entity(&func->proc);
608 media_device_unregister_entity(&func->vdev.entity);
612 static void hantro_detach_func(struct hantro_func *func)
614 media_devnode_remove(func->intf_devnode);
615 media_entity_remove_links(&func->sink);
616 media_entity_remove_links(&func->proc);
617 media_entity_remove_links(&func->vdev.entity);
618 media_device_unregister_entity(&func->sink);
619 media_device_unregister_entity(&func->proc);
620 media_device_unregister_entity(&func->vdev.entity);
623 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
625 const struct of_device_id *match;
626 struct hantro_func *func;
627 struct video_device *vfd;
630 match = of_match_node(of_hantro_match, vpu->dev->of_node);
631 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
633 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
640 vfd->fops = &hantro_fops;
641 vfd->release = video_device_release_empty;
642 vfd->lock = &vpu->vpu_mutex;
643 vfd->v4l2_dev = &vpu->v4l2_dev;
644 vfd->vfl_dir = VFL_DIR_M2M;
645 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
646 vfd->ioctl_ops = &hantro_ioctl_ops;
647 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
648 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
650 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
655 video_set_drvdata(vfd, vpu);
657 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
659 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
663 ret = hantro_attach_func(vpu, func);
665 v4l2_err(&vpu->v4l2_dev,
666 "Failed to attach functionality to the media device\n");
670 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
676 video_unregister_device(vfd);
680 static int hantro_add_enc_func(struct hantro_dev *vpu)
682 if (!vpu->variant->enc_fmts)
685 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
688 static int hantro_add_dec_func(struct hantro_dev *vpu)
690 if (!vpu->variant->dec_fmts)
693 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
696 static void hantro_remove_func(struct hantro_dev *vpu,
699 struct hantro_func *func;
701 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
709 hantro_detach_func(func);
710 video_unregister_device(&func->vdev);
713 static void hantro_remove_enc_func(struct hantro_dev *vpu)
715 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
718 static void hantro_remove_dec_func(struct hantro_dev *vpu)
720 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
723 static const struct media_device_ops hantro_m2m_media_ops = {
724 .req_validate = vb2_request_validate,
725 .req_queue = v4l2_m2m_request_queue,
728 static int hantro_probe(struct platform_device *pdev)
730 const struct of_device_id *match;
731 struct hantro_dev *vpu;
732 struct resource *res;
736 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
740 vpu->dev = &pdev->dev;
742 mutex_init(&vpu->vpu_mutex);
743 spin_lock_init(&vpu->irqlock);
745 match = of_match_node(of_hantro_match, pdev->dev.of_node);
746 vpu->variant = match->data;
748 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
750 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
751 sizeof(*vpu->clocks), GFP_KERNEL);
755 for (i = 0; i < vpu->variant->num_clocks; i++)
756 vpu->clocks[i].id = vpu->variant->clk_names[i];
757 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
762 num_bases = vpu->variant->num_regs ?: 1;
763 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
764 sizeof(*vpu->reg_bases), GFP_KERNEL);
768 for (i = 0; i < num_bases; i++) {
769 res = vpu->variant->reg_names ?
770 platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
771 vpu->variant->reg_names[i]) :
772 platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
773 vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
774 if (IS_ERR(vpu->reg_bases[i]))
775 return PTR_ERR(vpu->reg_bases[i]);
777 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
778 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
780 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
782 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
785 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
787 for (i = 0; i < vpu->variant->num_irqs; i++) {
788 const char *irq_name = vpu->variant->irqs[i].name;
791 if (!vpu->variant->irqs[i].handler)
794 irq = platform_get_irq_byname(vpu->pdev, irq_name);
798 ret = devm_request_irq(vpu->dev, irq,
799 vpu->variant->irqs[i].handler, 0,
800 dev_name(vpu->dev), vpu);
802 dev_err(vpu->dev, "Could not request %s IRQ.\n",
808 ret = vpu->variant->init(vpu);
810 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
814 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
815 pm_runtime_use_autosuspend(vpu->dev);
816 pm_runtime_enable(vpu->dev);
818 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
820 dev_err(&pdev->dev, "Failed to prepare clocks\n");
824 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
826 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
827 goto err_clk_unprepare;
829 platform_set_drvdata(pdev, vpu);
831 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
832 if (IS_ERR(vpu->m2m_dev)) {
833 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
834 ret = PTR_ERR(vpu->m2m_dev);
838 vpu->mdev.dev = vpu->dev;
839 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
840 strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
841 sizeof(vpu->mdev.model));
842 media_device_init(&vpu->mdev);
843 vpu->mdev.ops = &hantro_m2m_media_ops;
844 vpu->v4l2_dev.mdev = &vpu->mdev;
846 ret = hantro_add_enc_func(vpu);
848 dev_err(&pdev->dev, "Failed to register encoder\n");
852 ret = hantro_add_dec_func(vpu);
854 dev_err(&pdev->dev, "Failed to register decoder\n");
855 goto err_rm_enc_func;
858 ret = media_device_register(&vpu->mdev);
860 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
861 goto err_rm_dec_func;
867 hantro_remove_dec_func(vpu);
869 hantro_remove_enc_func(vpu);
871 media_device_cleanup(&vpu->mdev);
872 v4l2_m2m_release(vpu->m2m_dev);
874 v4l2_device_unregister(&vpu->v4l2_dev);
876 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
877 pm_runtime_dont_use_autosuspend(vpu->dev);
878 pm_runtime_disable(vpu->dev);
882 static int hantro_remove(struct platform_device *pdev)
884 struct hantro_dev *vpu = platform_get_drvdata(pdev);
886 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
888 media_device_unregister(&vpu->mdev);
889 hantro_remove_dec_func(vpu);
890 hantro_remove_enc_func(vpu);
891 media_device_cleanup(&vpu->mdev);
892 v4l2_m2m_release(vpu->m2m_dev);
893 v4l2_device_unregister(&vpu->v4l2_dev);
894 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
895 pm_runtime_dont_use_autosuspend(vpu->dev);
896 pm_runtime_disable(vpu->dev);
901 static int hantro_runtime_resume(struct device *dev)
903 struct hantro_dev *vpu = dev_get_drvdata(dev);
905 if (vpu->variant->runtime_resume)
906 return vpu->variant->runtime_resume(vpu);
912 static const struct dev_pm_ops hantro_pm_ops = {
913 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
914 pm_runtime_force_resume)
915 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
918 static struct platform_driver hantro_driver = {
919 .probe = hantro_probe,
920 .remove = hantro_remove,
923 .of_match_table = of_match_ptr(of_hantro_match),
924 .pm = &hantro_pm_ops,
927 module_platform_driver(hantro_driver);
929 MODULE_LICENSE("GPL v2");
930 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
931 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
932 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
933 MODULE_DESCRIPTION("Hantro VPU codec driver");