2 * Copyright (C) 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/reservation.h>
11 #include <drm/drm_encoder.h>
12 #include <drm/drm_gem_cma_helper.h>
14 #include "uapi/drm/vc4_drm.h"
16 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
19 enum vc4_kernel_bo_type {
20 /* Any kernel allocation (gem_create_object hook) before it
21 * gets another type set.
25 VC4_BO_TYPE_V3D_SHADER,
30 VC4_BO_TYPE_KERNEL_CACHE,
34 /* Performance monitor object. The perform lifetime is controlled by userspace
35 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
36 * request, and when this is the case, HW perf counters will be activated just
37 * before the submit_cl is submitted to the GPU and disabled when the job is
38 * done. This way, only events related to a specific job will be counted.
41 /* Tracks the number of users of the perfmon, when this counter reaches
42 * zero the perfmon is destroyed.
46 /* Number of counters activated in this perfmon instance
47 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
51 /* Events counted by the HW perf counters. */
52 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
54 /* Storage for counter values. Counters are incremented by the HW
55 * perf counter values every time the perfmon is attached to a GPU job.
56 * This way, perfmon users don't have to retrieve the results after
57 * each job if they want to track events covering several submissions.
58 * Note that counter values can't be reset, but you can fake a reset by
59 * destroying the perfmon and creating a new one.
65 struct drm_device *dev;
67 struct vc4_hdmi *hdmi;
74 struct vc4_hang_state *hang_state;
76 /* The kernel-space BO cache. Tracks buffers that have been
77 * unreferenced by all other users (refcounts of 0!) but not
78 * yet freed, so we can do cheap allocations.
81 /* Array of list heads for entries in the BO cache,
82 * based on number of pages, so we can do O(1) lookups
83 * in the cache when allocating.
85 struct list_head *size_list;
86 uint32_t size_list_size;
88 /* List of all BOs in the cache, ordered by age, so we
89 * can do O(1) lookups when trying to free old
92 struct list_head time_list;
93 struct work_struct time_work;
94 struct timer_list time_timer;
104 /* Protects bo_cache and bo_labels. */
105 struct mutex bo_lock;
107 /* Purgeable BO pool. All BOs in this pool can have their memory
108 * reclaimed if the driver is unable to allocate new BOs. We also
109 * keep stats related to the purge mechanism here.
112 struct list_head list;
115 unsigned int purged_num;
120 uint64_t dma_fence_context;
122 /* Sequence number for the last job queued in bin_job_list.
123 * Starts at 0 (no jobs emitted).
127 /* Sequence number for the last completed job on the GPU.
128 * Starts at 0 (no jobs completed).
130 uint64_t finished_seqno;
132 /* List of all struct vc4_exec_info for jobs to be executed in
133 * the binner. The first job in the list is the one currently
134 * programmed into ct0ca for execution.
136 struct list_head bin_job_list;
138 /* List of all struct vc4_exec_info for jobs that have
139 * completed binning and are ready for rendering. The first
140 * job in the list is the one currently programmed into ct1ca
143 struct list_head render_job_list;
145 /* List of the finished vc4_exec_infos waiting to be freed by
148 struct list_head job_done_list;
149 /* Spinlock used to synchronize the job_list and seqno
150 * accesses between the IRQ handler and GEM ioctls.
153 wait_queue_head_t job_wait_queue;
154 struct work_struct job_done_work;
156 /* Used to track the active perfmon if any. Access to this field is
157 * protected by job_lock.
159 struct vc4_perfmon *active_perfmon;
161 /* List of struct vc4_seqno_cb for callbacks to be made from a
162 * workqueue when the given seqno is passed.
164 struct list_head seqno_cb_list;
166 /* The memory used for storing binner tile alloc, tile state,
167 * and overflow memory allocations. This is freed when V3D
170 struct vc4_bo *bin_bo;
172 /* Size of blocks allocated within bin_bo. */
173 uint32_t bin_alloc_size;
175 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
178 uint32_t bin_alloc_used;
180 /* Bitmask of the current bin_alloc used for overflow memory. */
181 uint32_t bin_alloc_overflow;
183 struct work_struct overflow_mem_work;
187 /* Mutex controlling the power refcount. */
188 struct mutex power_lock;
191 struct timer_list timer;
192 struct work_struct reset_work;
195 struct semaphore async_modeset;
198 static inline struct vc4_dev *
199 to_vc4_dev(struct drm_device *dev)
201 return (struct vc4_dev *)dev->dev_private;
205 struct drm_gem_cma_object base;
207 /* seqno of the last job to render using this BO. */
210 /* seqno of the last job to use the RCL to write to this BO.
212 * Note that this doesn't include binner overflow memory
215 uint64_t write_seqno;
219 /* List entry for the BO's position in either
220 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
222 struct list_head unref_head;
224 /* Time in jiffies when the BO was put in vc4->bo_cache. */
225 unsigned long free_time;
227 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
228 struct list_head size_head;
230 /* Struct for shader validation state, if created by
231 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
233 struct vc4_validated_shader_info *validated_shader;
235 /* normally (resv == &_resv) except for imported bo's */
236 struct reservation_object *resv;
237 struct reservation_object _resv;
239 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
240 * for user-allocated labels.
244 /* Count the number of active users. This is needed to determine
245 * whether we can move the BO to the purgeable list or not (when the BO
246 * is used by the GPU or the display engine we can't purge it).
250 /* Store purgeable/purged state here */
252 struct mutex madv_lock;
255 static inline struct vc4_bo *
256 to_vc4_bo(struct drm_gem_object *bo)
258 return (struct vc4_bo *)bo;
262 struct dma_fence base;
263 struct drm_device *dev;
264 /* vc4 seqno for signaled() test */
268 static inline struct vc4_fence *
269 to_vc4_fence(struct dma_fence *fence)
271 return (struct vc4_fence *)fence;
274 struct vc4_seqno_cb {
275 struct work_struct work;
277 void (*func)(struct vc4_seqno_cb *cb);
282 struct platform_device *pdev;
288 struct platform_device *pdev;
292 /* Memory manager for CRTCs to allocate space in the display
293 * list. Units are dwords.
295 struct drm_mm dlist_mm;
296 /* Memory manager for the LBM memory used by HVS scaling. */
297 struct drm_mm lbm_mm;
300 struct drm_mm_node mitchell_netravali_filter;
304 struct drm_plane base;
307 static inline struct vc4_plane *
308 to_vc4_plane(struct drm_plane *plane)
310 return (struct vc4_plane *)plane;
313 enum vc4_scaling_mode {
319 struct vc4_plane_state {
320 struct drm_plane_state base;
321 /* System memory copy of the display list for this element, computed
322 * at atomic_check time.
325 u32 dlist_size; /* Number of dwords allocated for the display list */
326 u32 dlist_count; /* Number of used dwords in the display list. */
328 /* Offset in the dlist to various words, for pageflip or
335 /* Offset where the plane's dlist was last stored in the
336 * hardware at vc4_crtc_atomic_flush() time.
338 u32 __iomem *hw_dlist;
340 /* Clipped coordinates of the plane on the display. */
341 int crtc_x, crtc_y, crtc_w, crtc_h;
342 /* Clipped area being scanned from in the FB. */
345 u32 src_w[2], src_h[2];
347 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
348 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
352 /* Offset to start scanning out from the start of the plane's
357 /* Our allocation in LBM for temporary storage during scaling. */
358 struct drm_mm_node lbm;
360 /* Set when the plane has per-pixel alpha content or does not cover
361 * the entire screen. This is a hint to the CRTC that it might need
362 * to enable background color fill.
367 static inline struct vc4_plane_state *
368 to_vc4_plane_state(struct drm_plane_state *state)
370 return (struct vc4_plane_state *)state;
373 enum vc4_encoder_type {
374 VC4_ENCODER_TYPE_NONE,
375 VC4_ENCODER_TYPE_HDMI,
376 VC4_ENCODER_TYPE_VEC,
377 VC4_ENCODER_TYPE_DSI0,
378 VC4_ENCODER_TYPE_DSI1,
379 VC4_ENCODER_TYPE_SMI,
380 VC4_ENCODER_TYPE_DPI,
384 struct drm_encoder base;
385 enum vc4_encoder_type type;
389 static inline struct vc4_encoder *
390 to_vc4_encoder(struct drm_encoder *encoder)
392 return container_of(encoder, struct vc4_encoder, base);
395 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
396 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
397 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
398 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
400 struct vc4_exec_info {
401 /* Sequence number for this bin/render job. */
404 /* Latest write_seqno of any BO that binning depends on. */
405 uint64_t bin_dep_seqno;
407 struct dma_fence *fence;
409 /* Last current addresses the hardware was processing when the
410 * hangcheck timer checked on us.
412 uint32_t last_ct0ca, last_ct1ca;
414 /* Kernel-space copy of the ioctl arguments */
415 struct drm_vc4_submit_cl *args;
417 /* This is the array of BOs that were looked up at the start of exec.
418 * Command validation will use indices into this array.
420 struct drm_gem_cma_object **bo;
423 /* List of BOs that are being written by the RCL. Other than
424 * the binner temporary storage, this is all the BOs written
427 struct drm_gem_cma_object *rcl_write_bo[4];
428 uint32_t rcl_write_bo_count;
430 /* Pointers for our position in vc4->job_list */
431 struct list_head head;
433 /* List of other BOs used in the job that need to be released
434 * once the job is complete.
436 struct list_head unref_list;
438 /* Current unvalidated indices into @bo loaded by the non-hardware
439 * VC4_PACKET_GEM_HANDLES.
441 uint32_t bo_index[2];
443 /* This is the BO where we store the validated command lists, shader
444 * records, and uniforms.
446 struct drm_gem_cma_object *exec_bo;
449 * This tracks the per-shader-record state (packet 64) that
450 * determines the length of the shader record and the offset
451 * it's expected to be found at. It gets read in from the
454 struct vc4_shader_state {
456 /* Maximum vertex index referenced by any primitive using this
462 /** How many shader states the user declared they were using. */
463 uint32_t shader_state_size;
464 /** How many shader state records the validator has seen. */
465 uint32_t shader_state_count;
467 bool found_tile_binning_mode_config_packet;
468 bool found_start_tile_binning_packet;
469 bool found_increment_semaphore_packet;
471 uint8_t bin_tiles_x, bin_tiles_y;
472 /* Physical address of the start of the tile alloc array
473 * (where each tile's binned CL will start)
475 uint32_t tile_alloc_offset;
476 /* Bitmask of which binner slots are freed when this job completes. */
480 * Computed addresses pointing into exec_bo where we start the
481 * bin thread (ct0) and render thread (ct1).
483 uint32_t ct0ca, ct0ea;
484 uint32_t ct1ca, ct1ea;
486 /* Pointer to the unvalidated bin CL (if present). */
489 /* Pointers to the shader recs. These paddr gets incremented as CL
490 * packets are relocated in validate_gl_shader_state, and the vaddrs
491 * (u and v) get incremented and size decremented as the shader recs
492 * themselves are validated.
496 uint32_t shader_rec_p;
497 uint32_t shader_rec_size;
499 /* Pointers to the uniform data. These pointers are incremented, and
500 * size decremented, as each batch of uniforms is uploaded.
505 uint32_t uniforms_size;
507 /* Pointer to a performance monitor object if the user requested it,
510 struct vc4_perfmon *perfmon;
513 /* Per-open file private data. Any driver-specific resource that has to be
514 * released when the DRM file is closed should be placed here.
523 static inline struct vc4_exec_info *
524 vc4_first_bin_job(struct vc4_dev *vc4)
526 return list_first_entry_or_null(&vc4->bin_job_list,
527 struct vc4_exec_info, head);
530 static inline struct vc4_exec_info *
531 vc4_first_render_job(struct vc4_dev *vc4)
533 return list_first_entry_or_null(&vc4->render_job_list,
534 struct vc4_exec_info, head);
537 static inline struct vc4_exec_info *
538 vc4_last_render_job(struct vc4_dev *vc4)
540 if (list_empty(&vc4->render_job_list))
542 return list_last_entry(&vc4->render_job_list,
543 struct vc4_exec_info, head);
547 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
550 * This will be used at draw time to relocate the reference to the texture
551 * contents in p0, and validate that the offset combined with
552 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
553 * Note that the hardware treats unprovided config parameters as 0, so not all
554 * of them need to be set up for every texure sample, and we'll store ~0 as
555 * the offset to mark the unused ones.
557 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
558 * Setup") for definitions of the texture parameters.
560 struct vc4_texture_sample_info {
562 uint32_t p_offset[4];
566 * struct vc4_validated_shader_info - information about validated shaders that
567 * needs to be used from command list validation.
569 * For a given shader, each time a shader state record references it, we need
570 * to verify that the shader doesn't read more uniforms than the shader state
571 * record's uniform BO pointer can provide, and we need to apply relocations
572 * and validate the shader state record's uniforms that define the texture
575 struct vc4_validated_shader_info {
576 uint32_t uniforms_size;
577 uint32_t uniforms_src_size;
578 uint32_t num_texture_samples;
579 struct vc4_texture_sample_info *texture_samples;
581 uint32_t num_uniform_addr_offsets;
582 uint32_t *uniform_addr_offsets;
588 * _wait_for - magic (register) wait macro
590 * Does the right thing for modeset paths when run under kdgb or similar atomic
591 * contexts. Note that it's important that we check the condition again after
592 * having timed out, since the timeout could be due to preemption or similar and
593 * we've never had a chance to check the condition before the timeout.
595 #define _wait_for(COND, MS, W) ({ \
596 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
599 if (time_after(jiffies, timeout__)) { \
601 ret__ = -ETIMEDOUT; \
604 if (W && drm_can_sleep()) { \
613 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
616 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
617 void vc4_free_object(struct drm_gem_object *gem_obj);
618 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
619 bool from_cache, enum vc4_kernel_bo_type type);
620 int vc4_dumb_create(struct drm_file *file_priv,
621 struct drm_device *dev,
622 struct drm_mode_create_dumb *args);
623 struct dma_buf *vc4_prime_export(struct drm_device *dev,
624 struct drm_gem_object *obj, int flags);
625 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
626 struct drm_file *file_priv);
627 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
628 struct drm_file *file_priv);
629 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
630 struct drm_file *file_priv);
631 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
632 struct drm_file *file_priv);
633 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
634 struct drm_file *file_priv);
635 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
636 struct drm_file *file_priv);
637 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
638 struct drm_file *file_priv);
639 int vc4_fault(struct vm_fault *vmf);
640 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
641 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
642 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
643 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
644 struct dma_buf_attachment *attach,
645 struct sg_table *sgt);
646 void *vc4_prime_vmap(struct drm_gem_object *obj);
647 int vc4_bo_cache_init(struct drm_device *dev);
648 void vc4_bo_cache_destroy(struct drm_device *dev);
649 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
650 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
651 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
652 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
653 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
656 extern struct platform_driver vc4_crtc_driver;
657 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
658 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
659 bool in_vblank_irq, int *vpos, int *hpos,
660 ktime_t *stime, ktime_t *etime,
661 const struct drm_display_mode *mode);
664 int vc4_debugfs_init(struct drm_minor *minor);
667 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
670 extern struct platform_driver vc4_dpi_driver;
671 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
674 extern struct platform_driver vc4_dsi_driver;
675 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
678 extern const struct dma_fence_ops vc4_fence_ops;
681 void vc4_gem_init(struct drm_device *dev);
682 void vc4_gem_destroy(struct drm_device *dev);
683 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
684 struct drm_file *file_priv);
685 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
686 struct drm_file *file_priv);
687 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
688 struct drm_file *file_priv);
689 void vc4_submit_next_bin_job(struct drm_device *dev);
690 void vc4_submit_next_render_job(struct drm_device *dev);
691 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
692 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
693 uint64_t timeout_ns, bool interruptible);
694 void vc4_job_handle_completed(struct vc4_dev *vc4);
695 int vc4_queue_seqno_cb(struct drm_device *dev,
696 struct vc4_seqno_cb *cb, uint64_t seqno,
697 void (*func)(struct vc4_seqno_cb *cb));
698 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
699 struct drm_file *file_priv);
702 extern struct platform_driver vc4_hdmi_driver;
703 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
706 extern struct platform_driver vc4_vec_driver;
707 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
710 irqreturn_t vc4_irq(int irq, void *arg);
711 void vc4_irq_preinstall(struct drm_device *dev);
712 int vc4_irq_postinstall(struct drm_device *dev);
713 void vc4_irq_uninstall(struct drm_device *dev);
714 void vc4_irq_reset(struct drm_device *dev);
717 extern struct platform_driver vc4_hvs_driver;
718 void vc4_hvs_dump_state(struct drm_device *dev);
719 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
722 int vc4_kms_load(struct drm_device *dev);
725 struct drm_plane *vc4_plane_init(struct drm_device *dev,
726 enum drm_plane_type type);
727 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
728 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
729 void vc4_plane_async_set_fb(struct drm_plane *plane,
730 struct drm_framebuffer *fb);
733 extern struct platform_driver vc4_v3d_driver;
734 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
735 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
736 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
740 vc4_validate_bin_cl(struct drm_device *dev,
743 struct vc4_exec_info *exec);
746 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
748 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
751 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
753 bool vc4_check_tex_size(struct vc4_exec_info *exec,
754 struct drm_gem_cma_object *fbo,
755 uint32_t offset, uint8_t tiling_format,
756 uint32_t width, uint32_t height, uint8_t cpp);
758 /* vc4_validate_shader.c */
759 struct vc4_validated_shader_info *
760 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
763 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
764 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
765 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
766 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
768 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
769 void vc4_perfmon_open_file(struct vc4_file *vc4file);
770 void vc4_perfmon_close_file(struct vc4_file *vc4file);
771 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
772 struct drm_file *file_priv);
773 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
774 struct drm_file *file_priv);
775 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
776 struct drm_file *file_priv);