1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
15 HOST1X_CLASS_HOST1X = 0x1,
16 HOST1X_CLASS_GR2D = 0x51,
17 HOST1X_CLASS_GR2D_SB = 0x52,
18 HOST1X_CLASS_VIC = 0x5D,
19 HOST1X_CLASS_GR3D = 0x60,
20 HOST1X_CLASS_NVDEC = 0xF0,
21 HOST1X_CLASS_NVDEC1 = 0xF5,
28 u64 host1x_get_dma_mask(struct host1x *host1x);
31 * struct host1x_bo_cache - host1x buffer object cache
32 * @mappings: list of mappings
33 * @lock: synchronizes accesses to the list of mappings
35 struct host1x_bo_cache {
36 struct list_head mappings;
40 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
42 INIT_LIST_HEAD(&cache->mappings);
43 mutex_init(&cache->lock);
46 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
48 /* XXX warn if not empty? */
49 mutex_destroy(&cache->lock);
53 * struct host1x_client_ops - host1x client operations
54 * @early_init: host1x client early initialization code
55 * @init: host1x client initialization code
56 * @exit: host1x client tear down code
57 * @late_exit: host1x client late tear down code
58 * @suspend: host1x client suspend code
59 * @resume: host1x client resume code
61 struct host1x_client_ops {
62 int (*early_init)(struct host1x_client *client);
63 int (*init)(struct host1x_client *client);
64 int (*exit)(struct host1x_client *client);
65 int (*late_exit)(struct host1x_client *client);
66 int (*suspend)(struct host1x_client *client);
67 int (*resume)(struct host1x_client *client);
71 * struct host1x_client - host1x client structure
72 * @list: list node for the host1x client
73 * @host: pointer to struct device representing the host1x controller
74 * @dev: pointer to struct device backing this host1x client
75 * @group: IOMMU group that this client is a member of
76 * @ops: host1x client operations
77 * @class: host1x class represented by this client
78 * @channel: host1x channel associated with this client
79 * @syncpts: array of syncpoints requested for this client
80 * @num_syncpts: number of syncpoints requested for this client
81 * @parent: pointer to parent structure
82 * @usecount: reference count for this structure
83 * @lock: mutex for mutually exclusive concurrency
85 struct host1x_client {
86 struct list_head list;
89 struct iommu_group *group;
91 const struct host1x_client_ops *ops;
93 enum host1x_class class;
94 struct host1x_channel *channel;
96 struct host1x_syncpt **syncpts;
97 unsigned int num_syncpts;
99 struct host1x_client *parent;
100 unsigned int usecount;
103 struct host1x_bo_cache cache;
107 * host1x buffer objects
113 struct host1x_bo_mapping {
115 struct dma_buf_attachment *attach;
116 enum dma_data_direction direction;
117 struct list_head list;
118 struct host1x_bo *bo;
119 struct sg_table *sgt;
125 struct host1x_bo_cache *cache;
126 struct list_head entry;
129 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
131 return container_of(ref, struct host1x_bo_mapping, ref);
134 struct host1x_bo_ops {
135 struct host1x_bo *(*get)(struct host1x_bo *bo);
136 void (*put)(struct host1x_bo *bo);
137 struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
138 enum dma_data_direction dir);
139 void (*unpin)(struct host1x_bo_mapping *map);
140 void *(*mmap)(struct host1x_bo *bo);
141 void (*munmap)(struct host1x_bo *bo, void *addr);
145 const struct host1x_bo_ops *ops;
146 struct list_head mappings;
150 static inline void host1x_bo_init(struct host1x_bo *bo,
151 const struct host1x_bo_ops *ops)
153 INIT_LIST_HEAD(&bo->mappings);
154 spin_lock_init(&bo->lock);
158 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
160 return bo->ops->get(bo);
163 static inline void host1x_bo_put(struct host1x_bo *bo)
168 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
169 enum dma_data_direction dir,
170 struct host1x_bo_cache *cache);
171 void host1x_bo_unpin(struct host1x_bo_mapping *map);
173 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
175 return bo->ops->mmap(bo);
178 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
180 bo->ops->munmap(bo, addr);
187 #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
188 #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
190 struct host1x_syncpt_base;
191 struct host1x_syncpt;
194 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
195 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
196 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
197 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
198 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
199 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
200 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
201 int host1x_syncpt_incr(struct host1x_syncpt *sp);
202 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
203 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
205 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
206 unsigned long flags);
207 void host1x_syncpt_put(struct host1x_syncpt *sp);
208 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
212 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
213 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
215 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
218 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
224 struct host1x_channel;
227 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
228 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
229 void host1x_channel_stop(struct host1x_channel *channel);
230 void host1x_channel_put(struct host1x_channel *channel);
231 int host1x_job_submit(struct host1x_job *job);
237 #define HOST1X_RELOC_READ (1 << 0)
238 #define HOST1X_RELOC_WRITE (1 << 1)
240 struct host1x_reloc {
242 struct host1x_bo *bo;
243 unsigned long offset;
246 struct host1x_bo *bo;
247 unsigned long offset;
254 /* When refcount goes to zero, job can be freed */
258 struct list_head list;
260 /* Channel where job is submitted to */
261 struct host1x_channel *channel;
263 /* client where the job originated */
264 struct host1x_client *client;
266 /* Gathers and their memory */
267 struct host1x_job_cmd *cmds;
268 unsigned int num_cmds;
270 /* Array of handles to be pinned & unpinned */
271 struct host1x_reloc *relocs;
272 unsigned int num_relocs;
273 struct host1x_job_unpin_data *unpins;
274 unsigned int num_unpins;
276 dma_addr_t *addr_phys;
277 dma_addr_t *gather_addr_phys;
278 dma_addr_t *reloc_addr_phys;
280 /* Sync point id, number of increments and end related to the submit */
281 struct host1x_syncpt *syncpt;
285 /* Completion waiter ref */
288 /* Maximum time to wait for this job */
289 unsigned int timeout;
291 /* Job has timed out and should be released */
294 /* Index and number of slots used in the push buffer */
295 unsigned int first_get;
296 unsigned int num_slots;
298 /* Copy of gathers */
299 size_t gather_copy_size;
300 dma_addr_t gather_copy;
301 u8 *gather_copy_mapped;
303 /* Check if register is marked as an address reg */
304 int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
306 /* Check if class belongs to the unit */
307 int (*is_valid_class)(u32 class);
309 /* Request a SETCLASS to this class */
312 /* Add a channel wait for previous ops to complete */
315 /* Fast-forward syncpoint increments on job timeout */
316 bool syncpt_recovery;
318 /* Callback called when job is freed */
319 void (*release)(struct host1x_job *job);
322 /* Whether host1x-side firewall should be ran for this job or not */
323 bool enable_firewall;
326 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
327 u32 num_cmdbufs, u32 num_relocs,
329 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
330 unsigned int words, unsigned int offset);
331 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
332 bool relative, u32 next_class);
333 struct host1x_job *host1x_job_get(struct host1x_job *job);
334 void host1x_job_put(struct host1x_job *job);
335 int host1x_job_pin(struct host1x_job *job, struct device *dev);
336 void host1x_job_unpin(struct host1x_job *job);
339 * subdevice probe infrastructure
342 struct host1x_device;
345 * struct host1x_driver - host1x logical device driver
346 * @driver: core driver
347 * @subdevs: table of OF device IDs matching subdevices for this driver
348 * @list: list node for the driver
349 * @probe: called when the host1x logical device is probed
350 * @remove: called when the host1x logical device is removed
351 * @shutdown: called when the host1x logical device is shut down
353 struct host1x_driver {
354 struct device_driver driver;
356 const struct of_device_id *subdevs;
357 struct list_head list;
359 int (*probe)(struct host1x_device *device);
360 int (*remove)(struct host1x_device *device);
361 void (*shutdown)(struct host1x_device *device);
364 static inline struct host1x_driver *
365 to_host1x_driver(struct device_driver *driver)
367 return container_of(driver, struct host1x_driver, driver);
370 int host1x_driver_register_full(struct host1x_driver *driver,
371 struct module *owner);
372 void host1x_driver_unregister(struct host1x_driver *driver);
374 #define host1x_driver_register(driver) \
375 host1x_driver_register_full(driver, THIS_MODULE)
377 struct host1x_device {
378 struct host1x_driver *driver;
379 struct list_head list;
382 struct mutex subdevs_lock;
383 struct list_head subdevs;
384 struct list_head active;
386 struct mutex clients_lock;
387 struct list_head clients;
391 struct device_dma_parameters dma_parms;
394 static inline struct host1x_device *to_host1x_device(struct device *dev)
396 return container_of(dev, struct host1x_device, dev);
399 int host1x_device_init(struct host1x_device *device);
400 int host1x_device_exit(struct host1x_device *device);
402 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
403 void host1x_client_exit(struct host1x_client *client);
405 #define host1x_client_init(client) \
407 static struct lock_class_key __key; \
408 __host1x_client_init(client, &__key); \
411 int __host1x_client_register(struct host1x_client *client);
414 * Note that this wrapper calls __host1x_client_init() for compatibility
415 * with existing callers. Callers that want to separately initialize and
416 * register a host1x client must first initialize using either of the
417 * __host1x_client_init() or host1x_client_init() functions and then use
418 * the low-level __host1x_client_register() function to avoid the client
419 * getting reinitialized.
421 #define host1x_client_register(client) \
423 static struct lock_class_key __key; \
424 __host1x_client_init(client, &__key); \
425 __host1x_client_register(client); \
428 int host1x_client_unregister(struct host1x_client *client);
430 int host1x_client_suspend(struct host1x_client *client);
431 int host1x_client_resume(struct host1x_client *client);
433 struct tegra_mipi_device;
435 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
436 struct device_node *np);
437 void tegra_mipi_free(struct tegra_mipi_device *device);
438 int tegra_mipi_enable(struct tegra_mipi_device *device);
439 int tegra_mipi_disable(struct tegra_mipi_device *device);
440 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
441 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);