drm/amdkfd: Simplify the mmap offset related bit operations
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_chardev.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/device.h>
24 #include <linux/export.h>
25 #include <linux/err.h>
26 #include <linux/fs.h>
27 #include <linux/file.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/compat.h>
32 #include <uapi/linux/kfd_ioctl.h>
33 #include <linux/time.h>
34 #include <linux/mm.h>
35 #include <linux/mman.h>
36 #include <linux/dma-buf.h>
37 #include <asm/processor.h>
38 #include "kfd_priv.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "amdgpu_amdkfd.h"
42
43 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
44 static int kfd_open(struct inode *, struct file *);
45 static int kfd_mmap(struct file *, struct vm_area_struct *);
46
47 static const char kfd_dev_name[] = "kfd";
48
49 static const struct file_operations kfd_fops = {
50         .owner = THIS_MODULE,
51         .unlocked_ioctl = kfd_ioctl,
52         .compat_ioctl = kfd_ioctl,
53         .open = kfd_open,
54         .mmap = kfd_mmap,
55 };
56
57 static int kfd_char_dev_major = -1;
58 static struct class *kfd_class;
59 struct device *kfd_device;
60
61 int kfd_chardev_init(void)
62 {
63         int err = 0;
64
65         kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
66         err = kfd_char_dev_major;
67         if (err < 0)
68                 goto err_register_chrdev;
69
70         kfd_class = class_create(THIS_MODULE, kfd_dev_name);
71         err = PTR_ERR(kfd_class);
72         if (IS_ERR(kfd_class))
73                 goto err_class_create;
74
75         kfd_device = device_create(kfd_class, NULL,
76                                         MKDEV(kfd_char_dev_major, 0),
77                                         NULL, kfd_dev_name);
78         err = PTR_ERR(kfd_device);
79         if (IS_ERR(kfd_device))
80                 goto err_device_create;
81
82         return 0;
83
84 err_device_create:
85         class_destroy(kfd_class);
86 err_class_create:
87         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
88 err_register_chrdev:
89         return err;
90 }
91
92 void kfd_chardev_exit(void)
93 {
94         device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
95         class_destroy(kfd_class);
96         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
97 }
98
99 struct device *kfd_chardev(void)
100 {
101         return kfd_device;
102 }
103
104
105 static int kfd_open(struct inode *inode, struct file *filep)
106 {
107         struct kfd_process *process;
108         bool is_32bit_user_mode;
109
110         if (iminor(inode) != 0)
111                 return -ENODEV;
112
113         is_32bit_user_mode = in_compat_syscall();
114
115         if (is_32bit_user_mode) {
116                 dev_warn(kfd_device,
117                         "Process %d (32-bit) failed to open /dev/kfd\n"
118                         "32-bit processes are not supported by amdkfd\n",
119                         current->pid);
120                 return -EPERM;
121         }
122
123         process = kfd_create_process(filep);
124         if (IS_ERR(process))
125                 return PTR_ERR(process);
126
127         if (kfd_is_locked())
128                 return -EAGAIN;
129
130         dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
131                 process->pasid, process->is_32bit_user_mode);
132
133         return 0;
134 }
135
136 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
137                                         void *data)
138 {
139         struct kfd_ioctl_get_version_args *args = data;
140
141         args->major_version = KFD_IOCTL_MAJOR_VERSION;
142         args->minor_version = KFD_IOCTL_MINOR_VERSION;
143
144         return 0;
145 }
146
147 static int set_queue_properties_from_user(struct queue_properties *q_properties,
148                                 struct kfd_ioctl_create_queue_args *args)
149 {
150         if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
151                 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
152                 return -EINVAL;
153         }
154
155         if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
156                 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
157                 return -EINVAL;
158         }
159
160         if ((args->ring_base_address) &&
161                 (!access_ok((const void __user *) args->ring_base_address,
162                         sizeof(uint64_t)))) {
163                 pr_err("Can't access ring base address\n");
164                 return -EFAULT;
165         }
166
167         if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
168                 pr_err("Ring size must be a power of 2 or 0\n");
169                 return -EINVAL;
170         }
171
172         if (!access_ok((const void __user *) args->read_pointer_address,
173                         sizeof(uint32_t))) {
174                 pr_err("Can't access read pointer\n");
175                 return -EFAULT;
176         }
177
178         if (!access_ok((const void __user *) args->write_pointer_address,
179                         sizeof(uint32_t))) {
180                 pr_err("Can't access write pointer\n");
181                 return -EFAULT;
182         }
183
184         if (args->eop_buffer_address &&
185                 !access_ok((const void __user *) args->eop_buffer_address,
186                         sizeof(uint32_t))) {
187                 pr_debug("Can't access eop buffer");
188                 return -EFAULT;
189         }
190
191         if (args->ctx_save_restore_address &&
192                 !access_ok((const void __user *) args->ctx_save_restore_address,
193                         sizeof(uint32_t))) {
194                 pr_debug("Can't access ctx save restore buffer");
195                 return -EFAULT;
196         }
197
198         q_properties->is_interop = false;
199         q_properties->queue_percent = args->queue_percentage;
200         q_properties->priority = args->queue_priority;
201         q_properties->queue_address = args->ring_base_address;
202         q_properties->queue_size = args->ring_size;
203         q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
204         q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
205         q_properties->eop_ring_buffer_address = args->eop_buffer_address;
206         q_properties->eop_ring_buffer_size = args->eop_buffer_size;
207         q_properties->ctx_save_restore_area_address =
208                         args->ctx_save_restore_address;
209         q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
210         q_properties->ctl_stack_size = args->ctl_stack_size;
211         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
212                 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
213                 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
214         else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
215                 q_properties->type = KFD_QUEUE_TYPE_SDMA;
216         else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
217                 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
218         else
219                 return -ENOTSUPP;
220
221         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
222                 q_properties->format = KFD_QUEUE_FORMAT_AQL;
223         else
224                 q_properties->format = KFD_QUEUE_FORMAT_PM4;
225
226         pr_debug("Queue Percentage: %d, %d\n",
227                         q_properties->queue_percent, args->queue_percentage);
228
229         pr_debug("Queue Priority: %d, %d\n",
230                         q_properties->priority, args->queue_priority);
231
232         pr_debug("Queue Address: 0x%llX, 0x%llX\n",
233                         q_properties->queue_address, args->ring_base_address);
234
235         pr_debug("Queue Size: 0x%llX, %u\n",
236                         q_properties->queue_size, args->ring_size);
237
238         pr_debug("Queue r/w Pointers: %px, %px\n",
239                         q_properties->read_ptr,
240                         q_properties->write_ptr);
241
242         pr_debug("Queue Format: %d\n", q_properties->format);
243
244         pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
245
246         pr_debug("Queue CTX save area: 0x%llX\n",
247                         q_properties->ctx_save_restore_area_address);
248
249         return 0;
250 }
251
252 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
253                                         void *data)
254 {
255         struct kfd_ioctl_create_queue_args *args = data;
256         struct kfd_dev *dev;
257         int err = 0;
258         unsigned int queue_id;
259         struct kfd_process_device *pdd;
260         struct queue_properties q_properties;
261
262         memset(&q_properties, 0, sizeof(struct queue_properties));
263
264         pr_debug("Creating queue ioctl\n");
265
266         err = set_queue_properties_from_user(&q_properties, args);
267         if (err)
268                 return err;
269
270         pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
271         dev = kfd_device_by_id(args->gpu_id);
272         if (!dev) {
273                 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
274                 return -EINVAL;
275         }
276
277         mutex_lock(&p->mutex);
278
279         pdd = kfd_bind_process_to_device(dev, p);
280         if (IS_ERR(pdd)) {
281                 err = -ESRCH;
282                 goto err_bind_process;
283         }
284
285         pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
286                         p->pasid,
287                         dev->id);
288
289         err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
290         if (err != 0)
291                 goto err_create_queue;
292
293         args->queue_id = queue_id;
294
295
296         /* Return gpu_id as doorbell offset for mmap usage */
297         args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
298         args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
299         if (KFD_IS_SOC15(dev->device_info->asic_family))
300                 /* On SOC15 ASICs, doorbell allocation must be
301                  * per-device, and independent from the per-process
302                  * queue_id. Return the doorbell offset within the
303                  * doorbell aperture to user mode.
304                  */
305                 args->doorbell_offset |= q_properties.doorbell_off;
306
307         mutex_unlock(&p->mutex);
308
309         pr_debug("Queue id %d was created successfully\n", args->queue_id);
310
311         pr_debug("Ring buffer address == 0x%016llX\n",
312                         args->ring_base_address);
313
314         pr_debug("Read ptr address    == 0x%016llX\n",
315                         args->read_pointer_address);
316
317         pr_debug("Write ptr address   == 0x%016llX\n",
318                         args->write_pointer_address);
319
320         return 0;
321
322 err_create_queue:
323 err_bind_process:
324         mutex_unlock(&p->mutex);
325         return err;
326 }
327
328 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
329                                         void *data)
330 {
331         int retval;
332         struct kfd_ioctl_destroy_queue_args *args = data;
333
334         pr_debug("Destroying queue id %d for pasid 0x%x\n",
335                                 args->queue_id,
336                                 p->pasid);
337
338         mutex_lock(&p->mutex);
339
340         retval = pqm_destroy_queue(&p->pqm, args->queue_id);
341
342         mutex_unlock(&p->mutex);
343         return retval;
344 }
345
346 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
347                                         void *data)
348 {
349         int retval;
350         struct kfd_ioctl_update_queue_args *args = data;
351         struct queue_properties properties;
352
353         if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
354                 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
355                 return -EINVAL;
356         }
357
358         if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
359                 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
360                 return -EINVAL;
361         }
362
363         if ((args->ring_base_address) &&
364                 (!access_ok((const void __user *) args->ring_base_address,
365                         sizeof(uint64_t)))) {
366                 pr_err("Can't access ring base address\n");
367                 return -EFAULT;
368         }
369
370         if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
371                 pr_err("Ring size must be a power of 2 or 0\n");
372                 return -EINVAL;
373         }
374
375         properties.queue_address = args->ring_base_address;
376         properties.queue_size = args->ring_size;
377         properties.queue_percent = args->queue_percentage;
378         properties.priority = args->queue_priority;
379
380         pr_debug("Updating queue id %d for pasid 0x%x\n",
381                         args->queue_id, p->pasid);
382
383         mutex_lock(&p->mutex);
384
385         retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
386
387         mutex_unlock(&p->mutex);
388
389         return retval;
390 }
391
392 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
393                                         void *data)
394 {
395         int retval;
396         const int max_num_cus = 1024;
397         struct kfd_ioctl_set_cu_mask_args *args = data;
398         struct queue_properties properties;
399         uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
400         size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
401
402         if ((args->num_cu_mask % 32) != 0) {
403                 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
404                                 args->num_cu_mask);
405                 return -EINVAL;
406         }
407
408         properties.cu_mask_count = args->num_cu_mask;
409         if (properties.cu_mask_count == 0) {
410                 pr_debug("CU mask cannot be 0");
411                 return -EINVAL;
412         }
413
414         /* To prevent an unreasonably large CU mask size, set an arbitrary
415          * limit of max_num_cus bits.  We can then just drop any CU mask bits
416          * past max_num_cus bits and just use the first max_num_cus bits.
417          */
418         if (properties.cu_mask_count > max_num_cus) {
419                 pr_debug("CU mask cannot be greater than 1024 bits");
420                 properties.cu_mask_count = max_num_cus;
421                 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
422         }
423
424         properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
425         if (!properties.cu_mask)
426                 return -ENOMEM;
427
428         retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
429         if (retval) {
430                 pr_debug("Could not copy CU mask from userspace");
431                 kfree(properties.cu_mask);
432                 return -EFAULT;
433         }
434
435         mutex_lock(&p->mutex);
436
437         retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
438
439         mutex_unlock(&p->mutex);
440
441         if (retval)
442                 kfree(properties.cu_mask);
443
444         return retval;
445 }
446
447 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
448                                           struct kfd_process *p, void *data)
449 {
450         struct kfd_ioctl_get_queue_wave_state_args *args = data;
451         int r;
452
453         mutex_lock(&p->mutex);
454
455         r = pqm_get_wave_state(&p->pqm, args->queue_id,
456                                (void __user *)args->ctl_stack_address,
457                                &args->ctl_stack_used_size,
458                                &args->save_area_used_size);
459
460         mutex_unlock(&p->mutex);
461
462         return r;
463 }
464
465 static int kfd_ioctl_set_memory_policy(struct file *filep,
466                                         struct kfd_process *p, void *data)
467 {
468         struct kfd_ioctl_set_memory_policy_args *args = data;
469         struct kfd_dev *dev;
470         int err = 0;
471         struct kfd_process_device *pdd;
472         enum cache_policy default_policy, alternate_policy;
473
474         if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
475             && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
476                 return -EINVAL;
477         }
478
479         if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
480             && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
481                 return -EINVAL;
482         }
483
484         dev = kfd_device_by_id(args->gpu_id);
485         if (!dev)
486                 return -EINVAL;
487
488         mutex_lock(&p->mutex);
489
490         pdd = kfd_bind_process_to_device(dev, p);
491         if (IS_ERR(pdd)) {
492                 err = -ESRCH;
493                 goto out;
494         }
495
496         default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
497                          ? cache_policy_coherent : cache_policy_noncoherent;
498
499         alternate_policy =
500                 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
501                    ? cache_policy_coherent : cache_policy_noncoherent;
502
503         if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
504                                 &pdd->qpd,
505                                 default_policy,
506                                 alternate_policy,
507                                 (void __user *)args->alternate_aperture_base,
508                                 args->alternate_aperture_size))
509                 err = -EINVAL;
510
511 out:
512         mutex_unlock(&p->mutex);
513
514         return err;
515 }
516
517 static int kfd_ioctl_set_trap_handler(struct file *filep,
518                                         struct kfd_process *p, void *data)
519 {
520         struct kfd_ioctl_set_trap_handler_args *args = data;
521         struct kfd_dev *dev;
522         int err = 0;
523         struct kfd_process_device *pdd;
524
525         dev = kfd_device_by_id(args->gpu_id);
526         if (!dev)
527                 return -EINVAL;
528
529         mutex_lock(&p->mutex);
530
531         pdd = kfd_bind_process_to_device(dev, p);
532         if (IS_ERR(pdd)) {
533                 err = -ESRCH;
534                 goto out;
535         }
536
537         if (dev->dqm->ops.set_trap_handler(dev->dqm,
538                                         &pdd->qpd,
539                                         args->tba_addr,
540                                         args->tma_addr))
541                 err = -EINVAL;
542
543 out:
544         mutex_unlock(&p->mutex);
545
546         return err;
547 }
548
549 static int kfd_ioctl_dbg_register(struct file *filep,
550                                 struct kfd_process *p, void *data)
551 {
552         struct kfd_ioctl_dbg_register_args *args = data;
553         struct kfd_dev *dev;
554         struct kfd_dbgmgr *dbgmgr_ptr;
555         struct kfd_process_device *pdd;
556         bool create_ok;
557         long status = 0;
558
559         dev = kfd_device_by_id(args->gpu_id);
560         if (!dev)
561                 return -EINVAL;
562
563         if (dev->device_info->asic_family == CHIP_CARRIZO) {
564                 pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
565                 return -EINVAL;
566         }
567
568         mutex_lock(&p->mutex);
569         mutex_lock(kfd_get_dbgmgr_mutex());
570
571         /*
572          * make sure that we have pdd, if this the first queue created for
573          * this process
574          */
575         pdd = kfd_bind_process_to_device(dev, p);
576         if (IS_ERR(pdd)) {
577                 status = PTR_ERR(pdd);
578                 goto out;
579         }
580
581         if (!dev->dbgmgr) {
582                 /* In case of a legal call, we have no dbgmgr yet */
583                 create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
584                 if (create_ok) {
585                         status = kfd_dbgmgr_register(dbgmgr_ptr, p);
586                         if (status != 0)
587                                 kfd_dbgmgr_destroy(dbgmgr_ptr);
588                         else
589                                 dev->dbgmgr = dbgmgr_ptr;
590                 }
591         } else {
592                 pr_debug("debugger already registered\n");
593                 status = -EINVAL;
594         }
595
596 out:
597         mutex_unlock(kfd_get_dbgmgr_mutex());
598         mutex_unlock(&p->mutex);
599
600         return status;
601 }
602
603 static int kfd_ioctl_dbg_unregister(struct file *filep,
604                                 struct kfd_process *p, void *data)
605 {
606         struct kfd_ioctl_dbg_unregister_args *args = data;
607         struct kfd_dev *dev;
608         long status;
609
610         dev = kfd_device_by_id(args->gpu_id);
611         if (!dev || !dev->dbgmgr)
612                 return -EINVAL;
613
614         if (dev->device_info->asic_family == CHIP_CARRIZO) {
615                 pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
616                 return -EINVAL;
617         }
618
619         mutex_lock(kfd_get_dbgmgr_mutex());
620
621         status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
622         if (!status) {
623                 kfd_dbgmgr_destroy(dev->dbgmgr);
624                 dev->dbgmgr = NULL;
625         }
626
627         mutex_unlock(kfd_get_dbgmgr_mutex());
628
629         return status;
630 }
631
632 /*
633  * Parse and generate variable size data structure for address watch.
634  * Total size of the buffer and # watch points is limited in order
635  * to prevent kernel abuse. (no bearing to the much smaller HW limitation
636  * which is enforced by dbgdev module)
637  * please also note that the watch address itself are not "copied from user",
638  * since it be set into the HW in user mode values.
639  *
640  */
641 static int kfd_ioctl_dbg_address_watch(struct file *filep,
642                                         struct kfd_process *p, void *data)
643 {
644         struct kfd_ioctl_dbg_address_watch_args *args = data;
645         struct kfd_dev *dev;
646         struct dbg_address_watch_info aw_info;
647         unsigned char *args_buff;
648         long status;
649         void __user *cmd_from_user;
650         uint64_t watch_mask_value = 0;
651         unsigned int args_idx = 0;
652
653         memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
654
655         dev = kfd_device_by_id(args->gpu_id);
656         if (!dev)
657                 return -EINVAL;
658
659         if (dev->device_info->asic_family == CHIP_CARRIZO) {
660                 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
661                 return -EINVAL;
662         }
663
664         cmd_from_user = (void __user *) args->content_ptr;
665
666         /* Validate arguments */
667
668         if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
669                 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
670                 (cmd_from_user == NULL))
671                 return -EINVAL;
672
673         /* this is the actual buffer to work with */
674         args_buff = memdup_user(cmd_from_user,
675                                 args->buf_size_in_bytes - sizeof(*args));
676         if (IS_ERR(args_buff))
677                 return PTR_ERR(args_buff);
678
679         aw_info.process = p;
680
681         aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
682         args_idx += sizeof(aw_info.num_watch_points);
683
684         aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
685         args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
686
687         /*
688          * set watch address base pointer to point on the array base
689          * within args_buff
690          */
691         aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
692
693         /* skip over the addresses buffer */
694         args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
695
696         if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
697                 status = -EINVAL;
698                 goto out;
699         }
700
701         watch_mask_value = (uint64_t) args_buff[args_idx];
702
703         if (watch_mask_value > 0) {
704                 /*
705                  * There is an array of masks.
706                  * set watch mask base pointer to point on the array base
707                  * within args_buff
708                  */
709                 aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
710
711                 /* skip over the masks buffer */
712                 args_idx += sizeof(aw_info.watch_mask) *
713                                 aw_info.num_watch_points;
714         } else {
715                 /* just the NULL mask, set to NULL and skip over it */
716                 aw_info.watch_mask = NULL;
717                 args_idx += sizeof(aw_info.watch_mask);
718         }
719
720         if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
721                 status = -EINVAL;
722                 goto out;
723         }
724
725         /* Currently HSA Event is not supported for DBG */
726         aw_info.watch_event = NULL;
727
728         mutex_lock(kfd_get_dbgmgr_mutex());
729
730         status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
731
732         mutex_unlock(kfd_get_dbgmgr_mutex());
733
734 out:
735         kfree(args_buff);
736
737         return status;
738 }
739
740 /* Parse and generate fixed size data structure for wave control */
741 static int kfd_ioctl_dbg_wave_control(struct file *filep,
742                                         struct kfd_process *p, void *data)
743 {
744         struct kfd_ioctl_dbg_wave_control_args *args = data;
745         struct kfd_dev *dev;
746         struct dbg_wave_control_info wac_info;
747         unsigned char *args_buff;
748         uint32_t computed_buff_size;
749         long status;
750         void __user *cmd_from_user;
751         unsigned int args_idx = 0;
752
753         memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
754
755         /* we use compact form, independent of the packing attribute value */
756         computed_buff_size = sizeof(*args) +
757                                 sizeof(wac_info.mode) +
758                                 sizeof(wac_info.operand) +
759                                 sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
760                                 sizeof(wac_info.dbgWave_msg.MemoryVA) +
761                                 sizeof(wac_info.trapId);
762
763         dev = kfd_device_by_id(args->gpu_id);
764         if (!dev)
765                 return -EINVAL;
766
767         if (dev->device_info->asic_family == CHIP_CARRIZO) {
768                 pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
769                 return -EINVAL;
770         }
771
772         /* input size must match the computed "compact" size */
773         if (args->buf_size_in_bytes != computed_buff_size) {
774                 pr_debug("size mismatch, computed : actual %u : %u\n",
775                                 args->buf_size_in_bytes, computed_buff_size);
776                 return -EINVAL;
777         }
778
779         cmd_from_user = (void __user *) args->content_ptr;
780
781         if (cmd_from_user == NULL)
782                 return -EINVAL;
783
784         /* copy the entire buffer from user */
785
786         args_buff = memdup_user(cmd_from_user,
787                                 args->buf_size_in_bytes - sizeof(*args));
788         if (IS_ERR(args_buff))
789                 return PTR_ERR(args_buff);
790
791         /* move ptr to the start of the "pay-load" area */
792         wac_info.process = p;
793
794         wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
795         args_idx += sizeof(wac_info.operand);
796
797         wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
798         args_idx += sizeof(wac_info.mode);
799
800         wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
801         args_idx += sizeof(wac_info.trapId);
802
803         wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
804                                         *((uint32_t *)(&args_buff[args_idx]));
805         wac_info.dbgWave_msg.MemoryVA = NULL;
806
807         mutex_lock(kfd_get_dbgmgr_mutex());
808
809         pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
810                         wac_info.process, wac_info.operand,
811                         wac_info.mode, wac_info.trapId,
812                         wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
813
814         status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
815
816         pr_debug("Returned status of dbg manager is %ld\n", status);
817
818         mutex_unlock(kfd_get_dbgmgr_mutex());
819
820         kfree(args_buff);
821
822         return status;
823 }
824
825 static int kfd_ioctl_get_clock_counters(struct file *filep,
826                                 struct kfd_process *p, void *data)
827 {
828         struct kfd_ioctl_get_clock_counters_args *args = data;
829         struct kfd_dev *dev;
830
831         dev = kfd_device_by_id(args->gpu_id);
832         if (dev)
833                 /* Reading GPU clock counter from KGD */
834                 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
835         else
836                 /* Node without GPU resource */
837                 args->gpu_clock_counter = 0;
838
839         /* No access to rdtsc. Using raw monotonic time */
840         args->cpu_clock_counter = ktime_get_raw_ns();
841         args->system_clock_counter = ktime_get_boottime_ns();
842
843         /* Since the counter is in nano-seconds we use 1GHz frequency */
844         args->system_clock_freq = 1000000000;
845
846         return 0;
847 }
848
849
850 static int kfd_ioctl_get_process_apertures(struct file *filp,
851                                 struct kfd_process *p, void *data)
852 {
853         struct kfd_ioctl_get_process_apertures_args *args = data;
854         struct kfd_process_device_apertures *pAperture;
855         struct kfd_process_device *pdd;
856
857         dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
858
859         args->num_of_nodes = 0;
860
861         mutex_lock(&p->mutex);
862
863         /*if the process-device list isn't empty*/
864         if (kfd_has_process_device_data(p)) {
865                 /* Run over all pdd of the process */
866                 pdd = kfd_get_first_process_device_data(p);
867                 do {
868                         pAperture =
869                                 &args->process_apertures[args->num_of_nodes];
870                         pAperture->gpu_id = pdd->dev->id;
871                         pAperture->lds_base = pdd->lds_base;
872                         pAperture->lds_limit = pdd->lds_limit;
873                         pAperture->gpuvm_base = pdd->gpuvm_base;
874                         pAperture->gpuvm_limit = pdd->gpuvm_limit;
875                         pAperture->scratch_base = pdd->scratch_base;
876                         pAperture->scratch_limit = pdd->scratch_limit;
877
878                         dev_dbg(kfd_device,
879                                 "node id %u\n", args->num_of_nodes);
880                         dev_dbg(kfd_device,
881                                 "gpu id %u\n", pdd->dev->id);
882                         dev_dbg(kfd_device,
883                                 "lds_base %llX\n", pdd->lds_base);
884                         dev_dbg(kfd_device,
885                                 "lds_limit %llX\n", pdd->lds_limit);
886                         dev_dbg(kfd_device,
887                                 "gpuvm_base %llX\n", pdd->gpuvm_base);
888                         dev_dbg(kfd_device,
889                                 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
890                         dev_dbg(kfd_device,
891                                 "scratch_base %llX\n", pdd->scratch_base);
892                         dev_dbg(kfd_device,
893                                 "scratch_limit %llX\n", pdd->scratch_limit);
894
895                         args->num_of_nodes++;
896
897                         pdd = kfd_get_next_process_device_data(p, pdd);
898                 } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
899         }
900
901         mutex_unlock(&p->mutex);
902
903         return 0;
904 }
905
906 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
907                                 struct kfd_process *p, void *data)
908 {
909         struct kfd_ioctl_get_process_apertures_new_args *args = data;
910         struct kfd_process_device_apertures *pa;
911         struct kfd_process_device *pdd;
912         uint32_t nodes = 0;
913         int ret;
914
915         dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
916
917         if (args->num_of_nodes == 0) {
918                 /* Return number of nodes, so that user space can alloacate
919                  * sufficient memory
920                  */
921                 mutex_lock(&p->mutex);
922
923                 if (!kfd_has_process_device_data(p))
924                         goto out_unlock;
925
926                 /* Run over all pdd of the process */
927                 pdd = kfd_get_first_process_device_data(p);
928                 do {
929                         args->num_of_nodes++;
930                         pdd = kfd_get_next_process_device_data(p, pdd);
931                 } while (pdd);
932
933                 goto out_unlock;
934         }
935
936         /* Fill in process-aperture information for all available
937          * nodes, but not more than args->num_of_nodes as that is
938          * the amount of memory allocated by user
939          */
940         pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
941                                 args->num_of_nodes), GFP_KERNEL);
942         if (!pa)
943                 return -ENOMEM;
944
945         mutex_lock(&p->mutex);
946
947         if (!kfd_has_process_device_data(p)) {
948                 args->num_of_nodes = 0;
949                 kfree(pa);
950                 goto out_unlock;
951         }
952
953         /* Run over all pdd of the process */
954         pdd = kfd_get_first_process_device_data(p);
955         do {
956                 pa[nodes].gpu_id = pdd->dev->id;
957                 pa[nodes].lds_base = pdd->lds_base;
958                 pa[nodes].lds_limit = pdd->lds_limit;
959                 pa[nodes].gpuvm_base = pdd->gpuvm_base;
960                 pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
961                 pa[nodes].scratch_base = pdd->scratch_base;
962                 pa[nodes].scratch_limit = pdd->scratch_limit;
963
964                 dev_dbg(kfd_device,
965                         "gpu id %u\n", pdd->dev->id);
966                 dev_dbg(kfd_device,
967                         "lds_base %llX\n", pdd->lds_base);
968                 dev_dbg(kfd_device,
969                         "lds_limit %llX\n", pdd->lds_limit);
970                 dev_dbg(kfd_device,
971                         "gpuvm_base %llX\n", pdd->gpuvm_base);
972                 dev_dbg(kfd_device,
973                         "gpuvm_limit %llX\n", pdd->gpuvm_limit);
974                 dev_dbg(kfd_device,
975                         "scratch_base %llX\n", pdd->scratch_base);
976                 dev_dbg(kfd_device,
977                         "scratch_limit %llX\n", pdd->scratch_limit);
978                 nodes++;
979
980                 pdd = kfd_get_next_process_device_data(p, pdd);
981         } while (pdd && (nodes < args->num_of_nodes));
982         mutex_unlock(&p->mutex);
983
984         args->num_of_nodes = nodes;
985         ret = copy_to_user(
986                         (void __user *)args->kfd_process_device_apertures_ptr,
987                         pa,
988                         (nodes * sizeof(struct kfd_process_device_apertures)));
989         kfree(pa);
990         return ret ? -EFAULT : 0;
991
992 out_unlock:
993         mutex_unlock(&p->mutex);
994         return 0;
995 }
996
997 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
998                                         void *data)
999 {
1000         struct kfd_ioctl_create_event_args *args = data;
1001         int err;
1002
1003         /* For dGPUs the event page is allocated in user mode. The
1004          * handle is passed to KFD with the first call to this IOCTL
1005          * through the event_page_offset field.
1006          */
1007         if (args->event_page_offset) {
1008                 struct kfd_dev *kfd;
1009                 struct kfd_process_device *pdd;
1010                 void *mem, *kern_addr;
1011                 uint64_t size;
1012
1013                 if (p->signal_page) {
1014                         pr_err("Event page is already set\n");
1015                         return -EINVAL;
1016                 }
1017
1018                 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
1019                 if (!kfd) {
1020                         pr_err("Getting device by id failed in %s\n", __func__);
1021                         return -EINVAL;
1022                 }
1023
1024                 mutex_lock(&p->mutex);
1025                 pdd = kfd_bind_process_to_device(kfd, p);
1026                 if (IS_ERR(pdd)) {
1027                         err = PTR_ERR(pdd);
1028                         goto out_unlock;
1029                 }
1030
1031                 mem = kfd_process_device_translate_handle(pdd,
1032                                 GET_IDR_HANDLE(args->event_page_offset));
1033                 if (!mem) {
1034                         pr_err("Can't find BO, offset is 0x%llx\n",
1035                                args->event_page_offset);
1036                         err = -EINVAL;
1037                         goto out_unlock;
1038                 }
1039                 mutex_unlock(&p->mutex);
1040
1041                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
1042                                                 mem, &kern_addr, &size);
1043                 if (err) {
1044                         pr_err("Failed to map event page to kernel\n");
1045                         return err;
1046                 }
1047
1048                 err = kfd_event_page_set(p, kern_addr, size);
1049                 if (err) {
1050                         pr_err("Failed to set event page\n");
1051                         return err;
1052                 }
1053         }
1054
1055         err = kfd_event_create(filp, p, args->event_type,
1056                                 args->auto_reset != 0, args->node_id,
1057                                 &args->event_id, &args->event_trigger_data,
1058                                 &args->event_page_offset,
1059                                 &args->event_slot_index);
1060
1061         return err;
1062
1063 out_unlock:
1064         mutex_unlock(&p->mutex);
1065         return err;
1066 }
1067
1068 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
1069                                         void *data)
1070 {
1071         struct kfd_ioctl_destroy_event_args *args = data;
1072
1073         return kfd_event_destroy(p, args->event_id);
1074 }
1075
1076 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
1077                                 void *data)
1078 {
1079         struct kfd_ioctl_set_event_args *args = data;
1080
1081         return kfd_set_event(p, args->event_id);
1082 }
1083
1084 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
1085                                 void *data)
1086 {
1087         struct kfd_ioctl_reset_event_args *args = data;
1088
1089         return kfd_reset_event(p, args->event_id);
1090 }
1091
1092 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
1093                                 void *data)
1094 {
1095         struct kfd_ioctl_wait_events_args *args = data;
1096         int err;
1097
1098         err = kfd_wait_on_events(p, args->num_events,
1099                         (void __user *)args->events_ptr,
1100                         (args->wait_for_all != 0),
1101                         args->timeout, &args->wait_result);
1102
1103         return err;
1104 }
1105 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
1106                                         struct kfd_process *p, void *data)
1107 {
1108         struct kfd_ioctl_set_scratch_backing_va_args *args = data;
1109         struct kfd_process_device *pdd;
1110         struct kfd_dev *dev;
1111         long err;
1112
1113         dev = kfd_device_by_id(args->gpu_id);
1114         if (!dev)
1115                 return -EINVAL;
1116
1117         mutex_lock(&p->mutex);
1118
1119         pdd = kfd_bind_process_to_device(dev, p);
1120         if (IS_ERR(pdd)) {
1121                 err = PTR_ERR(pdd);
1122                 goto bind_process_to_device_fail;
1123         }
1124
1125         pdd->qpd.sh_hidden_private_base = args->va_addr;
1126
1127         mutex_unlock(&p->mutex);
1128
1129         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
1130             pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
1131                 dev->kfd2kgd->set_scratch_backing_va(
1132                         dev->kgd, args->va_addr, pdd->qpd.vmid);
1133
1134         return 0;
1135
1136 bind_process_to_device_fail:
1137         mutex_unlock(&p->mutex);
1138         return err;
1139 }
1140
1141 static int kfd_ioctl_get_tile_config(struct file *filep,
1142                 struct kfd_process *p, void *data)
1143 {
1144         struct kfd_ioctl_get_tile_config_args *args = data;
1145         struct kfd_dev *dev;
1146         struct tile_config config;
1147         int err = 0;
1148
1149         dev = kfd_device_by_id(args->gpu_id);
1150         if (!dev)
1151                 return -EINVAL;
1152
1153         dev->kfd2kgd->get_tile_config(dev->kgd, &config);
1154
1155         args->gb_addr_config = config.gb_addr_config;
1156         args->num_banks = config.num_banks;
1157         args->num_ranks = config.num_ranks;
1158
1159         if (args->num_tile_configs > config.num_tile_configs)
1160                 args->num_tile_configs = config.num_tile_configs;
1161         err = copy_to_user((void __user *)args->tile_config_ptr,
1162                         config.tile_config_ptr,
1163                         args->num_tile_configs * sizeof(uint32_t));
1164         if (err) {
1165                 args->num_tile_configs = 0;
1166                 return -EFAULT;
1167         }
1168
1169         if (args->num_macro_tile_configs > config.num_macro_tile_configs)
1170                 args->num_macro_tile_configs =
1171                                 config.num_macro_tile_configs;
1172         err = copy_to_user((void __user *)args->macro_tile_config_ptr,
1173                         config.macro_tile_config_ptr,
1174                         args->num_macro_tile_configs * sizeof(uint32_t));
1175         if (err) {
1176                 args->num_macro_tile_configs = 0;
1177                 return -EFAULT;
1178         }
1179
1180         return 0;
1181 }
1182
1183 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
1184                                 void *data)
1185 {
1186         struct kfd_ioctl_acquire_vm_args *args = data;
1187         struct kfd_process_device *pdd;
1188         struct kfd_dev *dev;
1189         struct file *drm_file;
1190         int ret;
1191
1192         dev = kfd_device_by_id(args->gpu_id);
1193         if (!dev)
1194                 return -EINVAL;
1195
1196         drm_file = fget(args->drm_fd);
1197         if (!drm_file)
1198                 return -EINVAL;
1199
1200         mutex_lock(&p->mutex);
1201
1202         pdd = kfd_get_process_device_data(dev, p);
1203         if (!pdd) {
1204                 ret = -EINVAL;
1205                 goto err_unlock;
1206         }
1207
1208         if (pdd->drm_file) {
1209                 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1210                 goto err_unlock;
1211         }
1212
1213         ret = kfd_process_device_init_vm(pdd, drm_file);
1214         if (ret)
1215                 goto err_unlock;
1216         /* On success, the PDD keeps the drm_file reference */
1217         mutex_unlock(&p->mutex);
1218
1219         return 0;
1220
1221 err_unlock:
1222         mutex_unlock(&p->mutex);
1223         fput(drm_file);
1224         return ret;
1225 }
1226
1227 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1228 {
1229         struct kfd_local_mem_info mem_info;
1230
1231         if (debug_largebar) {
1232                 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1233                 return true;
1234         }
1235
1236         if (dev->device_info->needs_iommu_device)
1237                 return false;
1238
1239         amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
1240         if (mem_info.local_mem_size_private == 0 &&
1241                         mem_info.local_mem_size_public > 0)
1242                 return true;
1243         return false;
1244 }
1245
1246 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1247                                         struct kfd_process *p, void *data)
1248 {
1249         struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1250         struct kfd_process_device *pdd;
1251         void *mem;
1252         struct kfd_dev *dev;
1253         int idr_handle;
1254         long err;
1255         uint64_t offset = args->mmap_offset;
1256         uint32_t flags = args->flags;
1257
1258         if (args->size == 0)
1259                 return -EINVAL;
1260
1261         dev = kfd_device_by_id(args->gpu_id);
1262         if (!dev)
1263                 return -EINVAL;
1264
1265         if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1266                 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1267                 !kfd_dev_is_large_bar(dev)) {
1268                 pr_err("Alloc host visible vram on small bar is not allowed\n");
1269                 return -EINVAL;
1270         }
1271
1272         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1273                 if (args->size != kfd_doorbell_process_slice(dev))
1274                         return -EINVAL;
1275                 offset = kfd_get_process_doorbells(dev, p);
1276         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1277                 if (args->size != PAGE_SIZE)
1278                         return -EINVAL;
1279                 offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1280                 if (!offset)
1281                         return -ENOMEM;
1282         }
1283
1284         mutex_lock(&p->mutex);
1285
1286         pdd = kfd_bind_process_to_device(dev, p);
1287         if (IS_ERR(pdd)) {
1288                 err = PTR_ERR(pdd);
1289                 goto err_unlock;
1290         }
1291
1292         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1293                 dev->kgd, args->va_addr, args->size,
1294                 pdd->vm, (struct kgd_mem **) &mem, &offset,
1295                 flags);
1296
1297         if (err)
1298                 goto err_unlock;
1299
1300         idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1301         if (idr_handle < 0) {
1302                 err = -EFAULT;
1303                 goto err_free;
1304         }
1305
1306         mutex_unlock(&p->mutex);
1307
1308         args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1309         args->mmap_offset = offset;
1310
1311         /* MMIO is mapped through kfd device
1312          * Generate a kfd mmap offset
1313          */
1314         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1315                 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1316                                         | KFD_MMAP_GPU_ID(args->gpu_id);
1317
1318         return 0;
1319
1320 err_free:
1321         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1322 err_unlock:
1323         mutex_unlock(&p->mutex);
1324         return err;
1325 }
1326
1327 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1328                                         struct kfd_process *p, void *data)
1329 {
1330         struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1331         struct kfd_process_device *pdd;
1332         void *mem;
1333         struct kfd_dev *dev;
1334         int ret;
1335
1336         dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1337         if (!dev)
1338                 return -EINVAL;
1339
1340         mutex_lock(&p->mutex);
1341
1342         pdd = kfd_get_process_device_data(dev, p);
1343         if (!pdd) {
1344                 pr_err("Process device data doesn't exist\n");
1345                 ret = -EINVAL;
1346                 goto err_unlock;
1347         }
1348
1349         mem = kfd_process_device_translate_handle(
1350                 pdd, GET_IDR_HANDLE(args->handle));
1351         if (!mem) {
1352                 ret = -EINVAL;
1353                 goto err_unlock;
1354         }
1355
1356         ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
1357                                                 (struct kgd_mem *)mem);
1358
1359         /* If freeing the buffer failed, leave the handle in place for
1360          * clean-up during process tear-down.
1361          */
1362         if (!ret)
1363                 kfd_process_device_remove_obj_handle(
1364                         pdd, GET_IDR_HANDLE(args->handle));
1365
1366 err_unlock:
1367         mutex_unlock(&p->mutex);
1368         return ret;
1369 }
1370
1371 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1372                                         struct kfd_process *p, void *data)
1373 {
1374         struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1375         struct kfd_process_device *pdd, *peer_pdd;
1376         void *mem;
1377         struct kfd_dev *dev, *peer;
1378         long err = 0;
1379         int i;
1380         uint32_t *devices_arr = NULL;
1381
1382         dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1383         if (!dev)
1384                 return -EINVAL;
1385
1386         if (!args->n_devices) {
1387                 pr_debug("Device IDs array empty\n");
1388                 return -EINVAL;
1389         }
1390         if (args->n_success > args->n_devices) {
1391                 pr_debug("n_success exceeds n_devices\n");
1392                 return -EINVAL;
1393         }
1394
1395         devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1396                                     GFP_KERNEL);
1397         if (!devices_arr)
1398                 return -ENOMEM;
1399
1400         err = copy_from_user(devices_arr,
1401                              (void __user *)args->device_ids_array_ptr,
1402                              args->n_devices * sizeof(*devices_arr));
1403         if (err != 0) {
1404                 err = -EFAULT;
1405                 goto copy_from_user_failed;
1406         }
1407
1408         mutex_lock(&p->mutex);
1409
1410         pdd = kfd_bind_process_to_device(dev, p);
1411         if (IS_ERR(pdd)) {
1412                 err = PTR_ERR(pdd);
1413                 goto bind_process_to_device_failed;
1414         }
1415
1416         mem = kfd_process_device_translate_handle(pdd,
1417                                                 GET_IDR_HANDLE(args->handle));
1418         if (!mem) {
1419                 err = -ENOMEM;
1420                 goto get_mem_obj_from_handle_failed;
1421         }
1422
1423         for (i = args->n_success; i < args->n_devices; i++) {
1424                 peer = kfd_device_by_id(devices_arr[i]);
1425                 if (!peer) {
1426                         pr_debug("Getting device by id failed for 0x%x\n",
1427                                  devices_arr[i]);
1428                         err = -EINVAL;
1429                         goto get_mem_obj_from_handle_failed;
1430                 }
1431
1432                 peer_pdd = kfd_bind_process_to_device(peer, p);
1433                 if (IS_ERR(peer_pdd)) {
1434                         err = PTR_ERR(peer_pdd);
1435                         goto get_mem_obj_from_handle_failed;
1436                 }
1437                 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1438                         peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1439                 if (err) {
1440                         pr_err("Failed to map to gpu %d/%d\n",
1441                                i, args->n_devices);
1442                         goto map_memory_to_gpu_failed;
1443                 }
1444                 args->n_success = i+1;
1445         }
1446
1447         mutex_unlock(&p->mutex);
1448
1449         err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
1450         if (err) {
1451                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1452                 goto sync_memory_failed;
1453         }
1454
1455         /* Flush TLBs after waiting for the page table updates to complete */
1456         for (i = 0; i < args->n_devices; i++) {
1457                 peer = kfd_device_by_id(devices_arr[i]);
1458                 if (WARN_ON_ONCE(!peer))
1459                         continue;
1460                 peer_pdd = kfd_get_process_device_data(peer, p);
1461                 if (WARN_ON_ONCE(!peer_pdd))
1462                         continue;
1463                 kfd_flush_tlb(peer_pdd);
1464         }
1465
1466         kfree(devices_arr);
1467
1468         return err;
1469
1470 bind_process_to_device_failed:
1471 get_mem_obj_from_handle_failed:
1472 map_memory_to_gpu_failed:
1473         mutex_unlock(&p->mutex);
1474 copy_from_user_failed:
1475 sync_memory_failed:
1476         kfree(devices_arr);
1477
1478         return err;
1479 }
1480
1481 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1482                                         struct kfd_process *p, void *data)
1483 {
1484         struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1485         struct kfd_process_device *pdd, *peer_pdd;
1486         void *mem;
1487         struct kfd_dev *dev, *peer;
1488         long err = 0;
1489         uint32_t *devices_arr = NULL, i;
1490
1491         dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1492         if (!dev)
1493                 return -EINVAL;
1494
1495         if (!args->n_devices) {
1496                 pr_debug("Device IDs array empty\n");
1497                 return -EINVAL;
1498         }
1499         if (args->n_success > args->n_devices) {
1500                 pr_debug("n_success exceeds n_devices\n");
1501                 return -EINVAL;
1502         }
1503
1504         devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1505                                     GFP_KERNEL);
1506         if (!devices_arr)
1507                 return -ENOMEM;
1508
1509         err = copy_from_user(devices_arr,
1510                              (void __user *)args->device_ids_array_ptr,
1511                              args->n_devices * sizeof(*devices_arr));
1512         if (err != 0) {
1513                 err = -EFAULT;
1514                 goto copy_from_user_failed;
1515         }
1516
1517         mutex_lock(&p->mutex);
1518
1519         pdd = kfd_get_process_device_data(dev, p);
1520         if (!pdd) {
1521                 err = -EINVAL;
1522                 goto bind_process_to_device_failed;
1523         }
1524
1525         mem = kfd_process_device_translate_handle(pdd,
1526                                                 GET_IDR_HANDLE(args->handle));
1527         if (!mem) {
1528                 err = -ENOMEM;
1529                 goto get_mem_obj_from_handle_failed;
1530         }
1531
1532         for (i = args->n_success; i < args->n_devices; i++) {
1533                 peer = kfd_device_by_id(devices_arr[i]);
1534                 if (!peer) {
1535                         err = -EINVAL;
1536                         goto get_mem_obj_from_handle_failed;
1537                 }
1538
1539                 peer_pdd = kfd_get_process_device_data(peer, p);
1540                 if (!peer_pdd) {
1541                         err = -ENODEV;
1542                         goto get_mem_obj_from_handle_failed;
1543                 }
1544                 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1545                         peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1546                 if (err) {
1547                         pr_err("Failed to unmap from gpu %d/%d\n",
1548                                i, args->n_devices);
1549                         goto unmap_memory_from_gpu_failed;
1550                 }
1551                 args->n_success = i+1;
1552         }
1553         kfree(devices_arr);
1554
1555         mutex_unlock(&p->mutex);
1556
1557         return 0;
1558
1559 bind_process_to_device_failed:
1560 get_mem_obj_from_handle_failed:
1561 unmap_memory_from_gpu_failed:
1562         mutex_unlock(&p->mutex);
1563 copy_from_user_failed:
1564         kfree(devices_arr);
1565         return err;
1566 }
1567
1568 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1569                 struct kfd_process *p, void *data)
1570 {
1571         struct kfd_ioctl_get_dmabuf_info_args *args = data;
1572         struct kfd_dev *dev = NULL;
1573         struct kgd_dev *dma_buf_kgd;
1574         void *metadata_buffer = NULL;
1575         uint32_t flags;
1576         unsigned int i;
1577         int r;
1578
1579         /* Find a KFD GPU device that supports the get_dmabuf_info query */
1580         for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1581                 if (dev)
1582                         break;
1583         if (!dev)
1584                 return -EINVAL;
1585
1586         if (args->metadata_ptr) {
1587                 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1588                 if (!metadata_buffer)
1589                         return -ENOMEM;
1590         }
1591
1592         /* Get dmabuf info from KGD */
1593         r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
1594                                           &dma_buf_kgd, &args->size,
1595                                           metadata_buffer, args->metadata_size,
1596                                           &args->metadata_size, &flags);
1597         if (r)
1598                 goto exit;
1599
1600         /* Reverse-lookup gpu_id from kgd pointer */
1601         dev = kfd_device_by_kgd(dma_buf_kgd);
1602         if (!dev) {
1603                 r = -EINVAL;
1604                 goto exit;
1605         }
1606         args->gpu_id = dev->id;
1607         args->flags = flags;
1608
1609         /* Copy metadata buffer to user mode */
1610         if (metadata_buffer) {
1611                 r = copy_to_user((void __user *)args->metadata_ptr,
1612                                  metadata_buffer, args->metadata_size);
1613                 if (r != 0)
1614                         r = -EFAULT;
1615         }
1616
1617 exit:
1618         kfree(metadata_buffer);
1619
1620         return r;
1621 }
1622
1623 static int kfd_ioctl_import_dmabuf(struct file *filep,
1624                                    struct kfd_process *p, void *data)
1625 {
1626         struct kfd_ioctl_import_dmabuf_args *args = data;
1627         struct kfd_process_device *pdd;
1628         struct dma_buf *dmabuf;
1629         struct kfd_dev *dev;
1630         int idr_handle;
1631         uint64_t size;
1632         void *mem;
1633         int r;
1634
1635         dev = kfd_device_by_id(args->gpu_id);
1636         if (!dev)
1637                 return -EINVAL;
1638
1639         dmabuf = dma_buf_get(args->dmabuf_fd);
1640         if (IS_ERR(dmabuf))
1641                 return PTR_ERR(dmabuf);
1642
1643         mutex_lock(&p->mutex);
1644
1645         pdd = kfd_bind_process_to_device(dev, p);
1646         if (IS_ERR(pdd)) {
1647                 r = PTR_ERR(pdd);
1648                 goto err_unlock;
1649         }
1650
1651         r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
1652                                               args->va_addr, pdd->vm,
1653                                               (struct kgd_mem **)&mem, &size,
1654                                               NULL);
1655         if (r)
1656                 goto err_unlock;
1657
1658         idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1659         if (idr_handle < 0) {
1660                 r = -EFAULT;
1661                 goto err_free;
1662         }
1663
1664         mutex_unlock(&p->mutex);
1665
1666         args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1667
1668         return 0;
1669
1670 err_free:
1671         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1672 err_unlock:
1673         mutex_unlock(&p->mutex);
1674         return r;
1675 }
1676
1677 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
1678         [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
1679                             .cmd_drv = 0, .name = #ioctl}
1680
1681 /** Ioctl table */
1682 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
1683         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
1684                         kfd_ioctl_get_version, 0),
1685
1686         AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
1687                         kfd_ioctl_create_queue, 0),
1688
1689         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
1690                         kfd_ioctl_destroy_queue, 0),
1691
1692         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
1693                         kfd_ioctl_set_memory_policy, 0),
1694
1695         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
1696                         kfd_ioctl_get_clock_counters, 0),
1697
1698         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
1699                         kfd_ioctl_get_process_apertures, 0),
1700
1701         AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
1702                         kfd_ioctl_update_queue, 0),
1703
1704         AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
1705                         kfd_ioctl_create_event, 0),
1706
1707         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
1708                         kfd_ioctl_destroy_event, 0),
1709
1710         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
1711                         kfd_ioctl_set_event, 0),
1712
1713         AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
1714                         kfd_ioctl_reset_event, 0),
1715
1716         AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
1717                         kfd_ioctl_wait_events, 0),
1718
1719         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
1720                         kfd_ioctl_dbg_register, 0),
1721
1722         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
1723                         kfd_ioctl_dbg_unregister, 0),
1724
1725         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
1726                         kfd_ioctl_dbg_address_watch, 0),
1727
1728         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
1729                         kfd_ioctl_dbg_wave_control, 0),
1730
1731         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
1732                         kfd_ioctl_set_scratch_backing_va, 0),
1733
1734         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
1735                         kfd_ioctl_get_tile_config, 0),
1736
1737         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
1738                         kfd_ioctl_set_trap_handler, 0),
1739
1740         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
1741                         kfd_ioctl_get_process_apertures_new, 0),
1742
1743         AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
1744                         kfd_ioctl_acquire_vm, 0),
1745
1746         AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
1747                         kfd_ioctl_alloc_memory_of_gpu, 0),
1748
1749         AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
1750                         kfd_ioctl_free_memory_of_gpu, 0),
1751
1752         AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
1753                         kfd_ioctl_map_memory_to_gpu, 0),
1754
1755         AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
1756                         kfd_ioctl_unmap_memory_from_gpu, 0),
1757
1758         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
1759                         kfd_ioctl_set_cu_mask, 0),
1760
1761         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
1762                         kfd_ioctl_get_queue_wave_state, 0),
1763
1764         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
1765                                 kfd_ioctl_get_dmabuf_info, 0),
1766
1767         AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
1768                                 kfd_ioctl_import_dmabuf, 0),
1769
1770 };
1771
1772 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
1773
1774 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1775 {
1776         struct kfd_process *process;
1777         amdkfd_ioctl_t *func;
1778         const struct amdkfd_ioctl_desc *ioctl = NULL;
1779         unsigned int nr = _IOC_NR(cmd);
1780         char stack_kdata[128];
1781         char *kdata = NULL;
1782         unsigned int usize, asize;
1783         int retcode = -EINVAL;
1784
1785         if (nr >= AMDKFD_CORE_IOCTL_COUNT)
1786                 goto err_i1;
1787
1788         if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
1789                 u32 amdkfd_size;
1790
1791                 ioctl = &amdkfd_ioctls[nr];
1792
1793                 amdkfd_size = _IOC_SIZE(ioctl->cmd);
1794                 usize = asize = _IOC_SIZE(cmd);
1795                 if (amdkfd_size > asize)
1796                         asize = amdkfd_size;
1797
1798                 cmd = ioctl->cmd;
1799         } else
1800                 goto err_i1;
1801
1802         dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
1803
1804         process = kfd_get_process(current);
1805         if (IS_ERR(process)) {
1806                 dev_dbg(kfd_device, "no process\n");
1807                 goto err_i1;
1808         }
1809
1810         /* Do not trust userspace, use our own definition */
1811         func = ioctl->func;
1812
1813         if (unlikely(!func)) {
1814                 dev_dbg(kfd_device, "no function\n");
1815                 retcode = -EINVAL;
1816                 goto err_i1;
1817         }
1818
1819         if (cmd & (IOC_IN | IOC_OUT)) {
1820                 if (asize <= sizeof(stack_kdata)) {
1821                         kdata = stack_kdata;
1822                 } else {
1823                         kdata = kmalloc(asize, GFP_KERNEL);
1824                         if (!kdata) {
1825                                 retcode = -ENOMEM;
1826                                 goto err_i1;
1827                         }
1828                 }
1829                 if (asize > usize)
1830                         memset(kdata + usize, 0, asize - usize);
1831         }
1832
1833         if (cmd & IOC_IN) {
1834                 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
1835                         retcode = -EFAULT;
1836                         goto err_i1;
1837                 }
1838         } else if (cmd & IOC_OUT) {
1839                 memset(kdata, 0, usize);
1840         }
1841
1842         retcode = func(filep, process, kdata);
1843
1844         if (cmd & IOC_OUT)
1845                 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
1846                         retcode = -EFAULT;
1847
1848 err_i1:
1849         if (!ioctl)
1850                 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1851                           task_pid_nr(current), cmd, nr);
1852
1853         if (kdata != stack_kdata)
1854                 kfree(kdata);
1855
1856         if (retcode)
1857                 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
1858                                 nr, arg, retcode);
1859
1860         return retcode;
1861 }
1862
1863 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
1864                       struct vm_area_struct *vma)
1865 {
1866         phys_addr_t address;
1867         int ret;
1868
1869         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1870                 return -EINVAL;
1871
1872         address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1873
1874         vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
1875                                 VM_DONTDUMP | VM_PFNMAP;
1876
1877         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1878
1879         pr_debug("pasid 0x%x mapping mmio page\n"
1880                  "     target user address == 0x%08llX\n"
1881                  "     physical address    == 0x%08llX\n"
1882                  "     vm_flags            == 0x%04lX\n"
1883                  "     size                == 0x%04lX\n",
1884                  process->pasid, (unsigned long long) vma->vm_start,
1885                  address, vma->vm_flags, PAGE_SIZE);
1886
1887         ret = io_remap_pfn_range(vma,
1888                                 vma->vm_start,
1889                                 address >> PAGE_SHIFT,
1890                                 PAGE_SIZE,
1891                                 vma->vm_page_prot);
1892         return ret;
1893 }
1894
1895
1896 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
1897 {
1898         struct kfd_process *process;
1899         struct kfd_dev *dev = NULL;
1900         unsigned long mmap_offset;
1901         unsigned int gpu_id;
1902
1903         process = kfd_get_process(current);
1904         if (IS_ERR(process))
1905                 return PTR_ERR(process);
1906
1907         mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
1908         gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
1909         if (gpu_id)
1910                 dev = kfd_device_by_id(gpu_id);
1911
1912         switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
1913         case KFD_MMAP_TYPE_DOORBELL:
1914                 if (!dev)
1915                         return -ENODEV;
1916                 return kfd_doorbell_mmap(dev, process, vma);
1917
1918         case KFD_MMAP_TYPE_EVENTS:
1919                 return kfd_event_mmap(process, vma);
1920
1921         case KFD_MMAP_TYPE_RESERVED_MEM:
1922                 if (!dev)
1923                         return -ENODEV;
1924                 return kfd_reserved_mem_mmap(dev, process, vma);
1925         case KFD_MMAP_TYPE_MMIO:
1926                 if (!dev)
1927                         return -ENODEV;
1928                 return kfd_mmio_mmap(dev, process, vma);
1929         }
1930
1931         return -EFAULT;
1932 }