Merge remote-tracking branch 'spi/for-5.12' into spi-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_query.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/nospec.h>
8
9 #include "i915_drv.h"
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include <uapi/drm/i915_drm.h>
13
14 static int copy_query_item(void *query_hdr, size_t query_sz,
15                            u32 total_length,
16                            struct drm_i915_query_item *query_item)
17 {
18         if (query_item->length == 0)
19                 return total_length;
20
21         if (query_item->length < total_length)
22                 return -EINVAL;
23
24         if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
25                            query_sz))
26                 return -EFAULT;
27
28         return 0;
29 }
30
31 static int query_topology_info(struct drm_i915_private *dev_priv,
32                                struct drm_i915_query_item *query_item)
33 {
34         const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
35         struct drm_i915_query_topology_info topo;
36         u32 slice_length, subslice_length, eu_length, total_length;
37         int ret;
38
39         if (query_item->flags != 0)
40                 return -EINVAL;
41
42         if (sseu->max_slices == 0)
43                 return -ENODEV;
44
45         BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
46
47         slice_length = sizeof(sseu->slice_mask);
48         subslice_length = sseu->max_slices * sseu->ss_stride;
49         eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
50         total_length = sizeof(topo) + slice_length + subslice_length +
51                        eu_length;
52
53         ret = copy_query_item(&topo, sizeof(topo), total_length,
54                               query_item);
55         if (ret != 0)
56                 return ret;
57
58         if (topo.flags != 0)
59                 return -EINVAL;
60
61         memset(&topo, 0, sizeof(topo));
62         topo.max_slices = sseu->max_slices;
63         topo.max_subslices = sseu->max_subslices;
64         topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
65
66         topo.subslice_offset = slice_length;
67         topo.subslice_stride = sseu->ss_stride;
68         topo.eu_offset = slice_length + subslice_length;
69         topo.eu_stride = sseu->eu_stride;
70
71         if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
72                            &topo, sizeof(topo)))
73                 return -EFAULT;
74
75         if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
76                            &sseu->slice_mask, slice_length))
77                 return -EFAULT;
78
79         if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
80                                            sizeof(topo) + slice_length),
81                            sseu->subslice_mask, subslice_length))
82                 return -EFAULT;
83
84         if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
85                                            sizeof(topo) +
86                                            slice_length + subslice_length),
87                            sseu->eu_mask, eu_length))
88                 return -EFAULT;
89
90         return total_length;
91 }
92
93 static int
94 query_engine_info(struct drm_i915_private *i915,
95                   struct drm_i915_query_item *query_item)
96 {
97         struct drm_i915_query_engine_info __user *query_ptr =
98                                 u64_to_user_ptr(query_item->data_ptr);
99         struct drm_i915_engine_info __user *info_ptr;
100         struct drm_i915_query_engine_info query;
101         struct drm_i915_engine_info info = { };
102         unsigned int num_uabi_engines = 0;
103         struct intel_engine_cs *engine;
104         int len, ret;
105
106         if (query_item->flags)
107                 return -EINVAL;
108
109         for_each_uabi_engine(engine, i915)
110                 num_uabi_engines++;
111
112         len = struct_size(query_ptr, engines, num_uabi_engines);
113
114         ret = copy_query_item(&query, sizeof(query), len, query_item);
115         if (ret != 0)
116                 return ret;
117
118         if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
119             query.rsvd[2])
120                 return -EINVAL;
121
122         info_ptr = &query_ptr->engines[0];
123
124         for_each_uabi_engine(engine, i915) {
125                 info.engine.engine_class = engine->uabi_class;
126                 info.engine.engine_instance = engine->uabi_instance;
127                 info.capabilities = engine->uabi_capabilities;
128
129                 if (copy_to_user(info_ptr, &info, sizeof(info)))
130                         return -EFAULT;
131
132                 query.num_engines++;
133                 info_ptr++;
134         }
135
136         if (copy_to_user(query_ptr, &query, sizeof(query)))
137                 return -EFAULT;
138
139         return len;
140 }
141
142 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
143                                                     u64 user_regs_ptr,
144                                                     u32 kernel_n_regs)
145 {
146         /*
147          * We'll just put the number of registers, and won't copy the
148          * register.
149          */
150         if (user_n_regs == 0)
151                 return 0;
152
153         if (user_n_regs < kernel_n_regs)
154                 return -EINVAL;
155
156         return 0;
157 }
158
159 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
160                                                 u32 kernel_n_regs,
161                                                 u64 user_regs_ptr,
162                                                 u32 *user_n_regs)
163 {
164         u32 __user *p = u64_to_user_ptr(user_regs_ptr);
165         u32 r;
166
167         if (*user_n_regs == 0) {
168                 *user_n_regs = kernel_n_regs;
169                 return 0;
170         }
171
172         *user_n_regs = kernel_n_regs;
173
174         if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
175                 return -EFAULT;
176
177         for (r = 0; r < kernel_n_regs; r++, p += 2) {
178                 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
179                                 p, Efault);
180                 unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
181         }
182         user_write_access_end();
183         return 0;
184 Efault:
185         user_write_access_end();
186         return -EFAULT;
187 }
188
189 static int query_perf_config_data(struct drm_i915_private *i915,
190                                   struct drm_i915_query_item *query_item,
191                                   bool use_uuid)
192 {
193         struct drm_i915_query_perf_config __user *user_query_config_ptr =
194                 u64_to_user_ptr(query_item->data_ptr);
195         struct drm_i915_perf_oa_config __user *user_config_ptr =
196                 u64_to_user_ptr(query_item->data_ptr +
197                                 sizeof(struct drm_i915_query_perf_config));
198         struct drm_i915_perf_oa_config user_config;
199         struct i915_perf *perf = &i915->perf;
200         struct i915_oa_config *oa_config;
201         char uuid[UUID_STRING_LEN + 1];
202         u64 config_id;
203         u32 flags, total_size;
204         int ret;
205
206         if (!perf->i915)
207                 return -ENODEV;
208
209         total_size =
210                 sizeof(struct drm_i915_query_perf_config) +
211                 sizeof(struct drm_i915_perf_oa_config);
212
213         if (query_item->length == 0)
214                 return total_size;
215
216         if (query_item->length < total_size) {
217                 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
218                           query_item->length, total_size);
219                 return -EINVAL;
220         }
221
222         if (get_user(flags, &user_query_config_ptr->flags))
223                 return -EFAULT;
224
225         if (flags != 0)
226                 return -EINVAL;
227
228         if (use_uuid) {
229                 struct i915_oa_config *tmp;
230                 int id;
231
232                 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
233
234                 memset(&uuid, 0, sizeof(uuid));
235                 if (copy_from_user(uuid, user_query_config_ptr->uuid,
236                                      sizeof(user_query_config_ptr->uuid)))
237                         return -EFAULT;
238
239                 oa_config = NULL;
240                 rcu_read_lock();
241                 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
242                         if (!strcmp(tmp->uuid, uuid)) {
243                                 oa_config = i915_oa_config_get(tmp);
244                                 break;
245                         }
246                 }
247                 rcu_read_unlock();
248         } else {
249                 if (get_user(config_id, &user_query_config_ptr->config))
250                         return -EFAULT;
251
252                 oa_config = i915_perf_get_oa_config(perf, config_id);
253         }
254         if (!oa_config)
255                 return -ENOENT;
256
257         if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
258                 ret = -EFAULT;
259                 goto out;
260         }
261
262         ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
263                                                        user_config.boolean_regs_ptr,
264                                                        oa_config->b_counter_regs_len);
265         if (ret)
266                 goto out;
267
268         ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
269                                                        user_config.flex_regs_ptr,
270                                                        oa_config->flex_regs_len);
271         if (ret)
272                 goto out;
273
274         ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
275                                                        user_config.mux_regs_ptr,
276                                                        oa_config->mux_regs_len);
277         if (ret)
278                 goto out;
279
280         ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
281                                                    oa_config->b_counter_regs_len,
282                                                    user_config.boolean_regs_ptr,
283                                                    &user_config.n_boolean_regs);
284         if (ret)
285                 goto out;
286
287         ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
288                                                    oa_config->flex_regs_len,
289                                                    user_config.flex_regs_ptr,
290                                                    &user_config.n_flex_regs);
291         if (ret)
292                 goto out;
293
294         ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
295                                                    oa_config->mux_regs_len,
296                                                    user_config.mux_regs_ptr,
297                                                    &user_config.n_mux_regs);
298         if (ret)
299                 goto out;
300
301         memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
302
303         if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
304                 ret = -EFAULT;
305                 goto out;
306         }
307
308         ret = total_size;
309
310 out:
311         i915_oa_config_put(oa_config);
312         return ret;
313 }
314
315 static size_t sizeof_perf_config_list(size_t count)
316 {
317         return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
318 }
319
320 static size_t sizeof_perf_metrics(struct i915_perf *perf)
321 {
322         struct i915_oa_config *tmp;
323         size_t i;
324         int id;
325
326         i = 1;
327         rcu_read_lock();
328         idr_for_each_entry(&perf->metrics_idr, tmp, id)
329                 i++;
330         rcu_read_unlock();
331
332         return sizeof_perf_config_list(i);
333 }
334
335 static int query_perf_config_list(struct drm_i915_private *i915,
336                                   struct drm_i915_query_item *query_item)
337 {
338         struct drm_i915_query_perf_config __user *user_query_config_ptr =
339                 u64_to_user_ptr(query_item->data_ptr);
340         struct i915_perf *perf = &i915->perf;
341         u64 *oa_config_ids = NULL;
342         int alloc, n_configs;
343         u32 flags;
344         int ret;
345
346         if (!perf->i915)
347                 return -ENODEV;
348
349         if (query_item->length == 0)
350                 return sizeof_perf_metrics(perf);
351
352         if (get_user(flags, &user_query_config_ptr->flags))
353                 return -EFAULT;
354
355         if (flags != 0)
356                 return -EINVAL;
357
358         n_configs = 1;
359         do {
360                 struct i915_oa_config *tmp;
361                 u64 *ids;
362                 int id;
363
364                 ids = krealloc(oa_config_ids,
365                                n_configs * sizeof(*oa_config_ids),
366                                GFP_KERNEL);
367                 if (!ids)
368                         return -ENOMEM;
369
370                 alloc = fetch_and_zero(&n_configs);
371
372                 ids[n_configs++] = 1ull; /* reserved for test_config */
373                 rcu_read_lock();
374                 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
375                         if (n_configs < alloc)
376                                 ids[n_configs] = id;
377                         n_configs++;
378                 }
379                 rcu_read_unlock();
380
381                 oa_config_ids = ids;
382         } while (n_configs > alloc);
383
384         if (query_item->length < sizeof_perf_config_list(n_configs)) {
385                 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
386                           query_item->length,
387                           sizeof_perf_config_list(n_configs));
388                 kfree(oa_config_ids);
389                 return -EINVAL;
390         }
391
392         if (put_user(n_configs, &user_query_config_ptr->config)) {
393                 kfree(oa_config_ids);
394                 return -EFAULT;
395         }
396
397         ret = copy_to_user(user_query_config_ptr + 1,
398                            oa_config_ids,
399                            n_configs * sizeof(*oa_config_ids));
400         kfree(oa_config_ids);
401         if (ret)
402                 return -EFAULT;
403
404         return sizeof_perf_config_list(n_configs);
405 }
406
407 static int query_perf_config(struct drm_i915_private *i915,
408                              struct drm_i915_query_item *query_item)
409 {
410         switch (query_item->flags) {
411         case DRM_I915_QUERY_PERF_CONFIG_LIST:
412                 return query_perf_config_list(i915, query_item);
413         case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
414                 return query_perf_config_data(i915, query_item, true);
415         case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
416                 return query_perf_config_data(i915, query_item, false);
417         default:
418                 return -EINVAL;
419         }
420 }
421
422 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
423                                         struct drm_i915_query_item *query_item) = {
424         query_topology_info,
425         query_engine_info,
426         query_perf_config,
427 };
428
429 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
430 {
431         struct drm_i915_private *dev_priv = to_i915(dev);
432         struct drm_i915_query *args = data;
433         struct drm_i915_query_item __user *user_item_ptr =
434                 u64_to_user_ptr(args->items_ptr);
435         u32 i;
436
437         if (args->flags != 0)
438                 return -EINVAL;
439
440         for (i = 0; i < args->num_items; i++, user_item_ptr++) {
441                 struct drm_i915_query_item item;
442                 unsigned long func_idx;
443                 int ret;
444
445                 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
446                         return -EFAULT;
447
448                 if (item.query_id == 0)
449                         return -EINVAL;
450
451                 if (overflows_type(item.query_id - 1, unsigned long))
452                         return -EINVAL;
453
454                 func_idx = item.query_id - 1;
455
456                 ret = -EINVAL;
457                 if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
458                         func_idx = array_index_nospec(func_idx,
459                                                       ARRAY_SIZE(i915_query_funcs));
460                         ret = i915_query_funcs[func_idx](dev_priv, &item);
461                 }
462
463                 /* Only write the length back to userspace if they differ. */
464                 if (ret != item.length && put_user(ret, &user_item_ptr->length))
465                         return -EFAULT;
466         }
467
468         return 0;
469 }