1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
10 void msm_submitqueue_destroy(struct kref *kref)
12 struct msm_gpu_submitqueue *queue = container_of(kref,
13 struct msm_gpu_submitqueue, ref);
18 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
21 struct msm_gpu_submitqueue *entry;
26 read_lock(&ctx->queuelock);
28 list_for_each_entry(entry, &ctx->submitqueues, node) {
29 if (entry->id == id) {
30 kref_get(&entry->ref);
31 read_unlock(&ctx->queuelock);
37 read_unlock(&ctx->queuelock);
41 void msm_submitqueue_close(struct msm_file_private *ctx)
43 struct msm_gpu_submitqueue *entry, *tmp;
49 * No lock needed in close and there won't
50 * be any more user ioctls coming our way
52 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
53 msm_submitqueue_put(entry);
56 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
57 u32 prio, u32 flags, u32 *id)
59 struct msm_drm_private *priv = drm->dev_private;
60 struct msm_gpu_submitqueue *queue;
65 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
70 kref_init(&queue->ref);
74 if (prio >= priv->gpu->nr_rings) {
82 write_lock(&ctx->queuelock);
84 queue->id = ctx->queueid++;
89 list_add_tail(&queue->node, &ctx->submitqueues);
91 write_unlock(&ctx->queuelock);
96 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
98 struct msm_drm_private *priv = drm->dev_private;
105 * Select priority 2 as the "default priority" unless nr_rings is less
106 * than 2 and then pick the lowest pirority
108 default_prio = priv->gpu ?
109 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
111 INIT_LIST_HEAD(&ctx->submitqueues);
113 rwlock_init(&ctx->queuelock);
115 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
118 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
119 struct drm_msm_submitqueue_query *args)
121 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
124 /* If a zero length was passed in, return the data size we expect */
126 args->len = sizeof(queue->faults);
130 /* Set the length to the actual size of the data */
133 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
135 return ret ? -EFAULT : 0;
138 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
139 struct drm_msm_submitqueue_query *args)
141 struct msm_gpu_submitqueue *queue;
147 queue = msm_submitqueue_get(ctx, args->id);
151 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
152 ret = msm_submitqueue_query_faults(queue, args);
154 msm_submitqueue_put(queue);
159 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
161 struct msm_gpu_submitqueue *entry;
167 * id 0 is the "default" queue and can't be destroyed
173 write_lock(&ctx->queuelock);
175 list_for_each_entry(entry, &ctx->submitqueues, node) {
176 if (entry->id == id) {
177 list_del(&entry->node);
178 write_unlock(&ctx->queuelock);
180 msm_submitqueue_put(entry);
185 write_unlock(&ctx->queuelock);