1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_drv.h"
13 struct etnaviv_gem_submit;
14 struct etnaviv_vram_mapping;
16 struct etnaviv_chip_identity {
23 /* Supported feature fields. */
26 /* Supported minor feature fields. */
40 /* Number of streams supported. */
43 /* Total number of temporary registers per thread. */
46 /* Maximum number of threads. */
49 /* Number of shader cores. */
50 u32 shader_core_count;
52 /* Size of the vertex cache. */
53 u32 vertex_cache_size;
55 /* Number of entries in the vertex output buffer. */
56 u32 vertex_output_buffer_size;
58 /* Number of pixel pipes. */
61 /* Number of instructions. */
62 u32 instruction_count;
64 /* Number of constants. */
70 /* Number of varyings */
74 enum etnaviv_sec_mode {
80 struct etnaviv_event {
81 struct dma_fence *fence;
82 struct etnaviv_gem_submit *submit;
84 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
87 struct etnaviv_cmdbuf_suballoc;
91 #define ETNA_NR_EVENTS 30
94 struct drm_device *drm;
95 struct thermal_cooling_device *cooling;
98 struct etnaviv_chip_identity identity;
99 enum etnaviv_sec_mode sec_mode;
100 struct workqueue_struct *wq;
101 struct drm_gpu_scheduler sched;
105 struct etnaviv_vram_mapping cmdbuf_mapping;
106 struct etnaviv_cmdbuf buffer;
109 /* bus base address of memory */
112 /* event management: */
113 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
114 struct etnaviv_event event[ETNA_NR_EVENTS];
115 struct completion event_free;
116 spinlock_t event_spinlock;
120 /* Fencing support */
121 struct mutex fence_lock;
122 struct idr fence_idr;
125 wait_queue_head_t fence_event;
127 spinlock_t fence_spinlock;
129 /* worker for handling 'sync' points: */
130 struct work_struct sync_point_work;
131 int sync_point_event;
134 u32 hangcheck_dma_addr;
139 struct etnaviv_iommu *mmu;
140 unsigned int flush_seq;
145 struct clk *clk_core;
146 struct clk *clk_shader;
148 unsigned int freq_scale;
149 unsigned long base_rate_core;
150 unsigned long base_rate_shader;
153 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
155 writel(data, gpu->mmio + reg);
158 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
160 return readl(gpu->mmio + reg);
163 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
165 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
166 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
168 #ifdef CONFIG_DEBUG_FS
169 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
172 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
173 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
174 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
175 u32 fence, struct timespec *timeout);
176 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
177 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
178 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
179 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
180 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
181 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
182 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
184 extern struct platform_driver etnaviv_gpu_driver;
186 #endif /* __ETNAVIV_GPU_H__ */