Merge tag 'slab-fix-for-6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_chardev.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <linux/device.h>
25 #include <linux/export.h>
26 #include <linux/err.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/compat.h>
33 #include <uapi/linux/kfd_ioctl.h>
34 #include <linux/time.h>
35 #include <linux/mm.h>
36 #include <linux/mman.h>
37 #include <linux/ptrace.h>
38 #include <linux/dma-buf.h>
39 #include <linux/fdtable.h>
40 #include <linux/processor.h>
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "amdgpu_amdkfd.h"
45 #include "kfd_smi_events.h"
46 #include "amdgpu_dma_buf.h"
47
48 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
49 static int kfd_open(struct inode *, struct file *);
50 static int kfd_release(struct inode *, struct file *);
51 static int kfd_mmap(struct file *, struct vm_area_struct *);
52
53 static const char kfd_dev_name[] = "kfd";
54
55 static const struct file_operations kfd_fops = {
56         .owner = THIS_MODULE,
57         .unlocked_ioctl = kfd_ioctl,
58         .compat_ioctl = compat_ptr_ioctl,
59         .open = kfd_open,
60         .release = kfd_release,
61         .mmap = kfd_mmap,
62 };
63
64 static int kfd_char_dev_major = -1;
65 static struct class *kfd_class;
66 struct device *kfd_device;
67
68 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
69 {
70         struct kfd_process_device *pdd;
71
72         mutex_lock(&p->mutex);
73         pdd = kfd_process_device_data_by_id(p, gpu_id);
74
75         if (pdd)
76                 return pdd;
77
78         mutex_unlock(&p->mutex);
79         return NULL;
80 }
81
82 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
83 {
84         mutex_unlock(&pdd->process->mutex);
85 }
86
87 int kfd_chardev_init(void)
88 {
89         int err = 0;
90
91         kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
92         err = kfd_char_dev_major;
93         if (err < 0)
94                 goto err_register_chrdev;
95
96         kfd_class = class_create(THIS_MODULE, kfd_dev_name);
97         err = PTR_ERR(kfd_class);
98         if (IS_ERR(kfd_class))
99                 goto err_class_create;
100
101         kfd_device = device_create(kfd_class, NULL,
102                                         MKDEV(kfd_char_dev_major, 0),
103                                         NULL, kfd_dev_name);
104         err = PTR_ERR(kfd_device);
105         if (IS_ERR(kfd_device))
106                 goto err_device_create;
107
108         return 0;
109
110 err_device_create:
111         class_destroy(kfd_class);
112 err_class_create:
113         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
114 err_register_chrdev:
115         return err;
116 }
117
118 void kfd_chardev_exit(void)
119 {
120         device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
121         class_destroy(kfd_class);
122         unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
123         kfd_device = NULL;
124 }
125
126
127 static int kfd_open(struct inode *inode, struct file *filep)
128 {
129         struct kfd_process *process;
130         bool is_32bit_user_mode;
131
132         if (iminor(inode) != 0)
133                 return -ENODEV;
134
135         is_32bit_user_mode = in_compat_syscall();
136
137         if (is_32bit_user_mode) {
138                 dev_warn(kfd_device,
139                         "Process %d (32-bit) failed to open /dev/kfd\n"
140                         "32-bit processes are not supported by amdkfd\n",
141                         current->pid);
142                 return -EPERM;
143         }
144
145         process = kfd_create_process(filep);
146         if (IS_ERR(process))
147                 return PTR_ERR(process);
148
149         if (kfd_is_locked()) {
150                 dev_dbg(kfd_device, "kfd is locked!\n"
151                                 "process %d unreferenced", process->pasid);
152                 kfd_unref_process(process);
153                 return -EAGAIN;
154         }
155
156         /* filep now owns the reference returned by kfd_create_process */
157         filep->private_data = process;
158
159         dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
160                 process->pasid, process->is_32bit_user_mode);
161
162         return 0;
163 }
164
165 static int kfd_release(struct inode *inode, struct file *filep)
166 {
167         struct kfd_process *process = filep->private_data;
168
169         if (process)
170                 kfd_unref_process(process);
171
172         return 0;
173 }
174
175 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
176                                         void *data)
177 {
178         struct kfd_ioctl_get_version_args *args = data;
179
180         args->major_version = KFD_IOCTL_MAJOR_VERSION;
181         args->minor_version = KFD_IOCTL_MINOR_VERSION;
182
183         return 0;
184 }
185
186 static int set_queue_properties_from_user(struct queue_properties *q_properties,
187                                 struct kfd_ioctl_create_queue_args *args)
188 {
189         if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
190                 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
191                 return -EINVAL;
192         }
193
194         if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
195                 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
196                 return -EINVAL;
197         }
198
199         if ((args->ring_base_address) &&
200                 (!access_ok((const void __user *) args->ring_base_address,
201                         sizeof(uint64_t)))) {
202                 pr_err("Can't access ring base address\n");
203                 return -EFAULT;
204         }
205
206         if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
207                 pr_err("Ring size must be a power of 2 or 0\n");
208                 return -EINVAL;
209         }
210
211         if (!access_ok((const void __user *) args->read_pointer_address,
212                         sizeof(uint32_t))) {
213                 pr_err("Can't access read pointer\n");
214                 return -EFAULT;
215         }
216
217         if (!access_ok((const void __user *) args->write_pointer_address,
218                         sizeof(uint32_t))) {
219                 pr_err("Can't access write pointer\n");
220                 return -EFAULT;
221         }
222
223         if (args->eop_buffer_address &&
224                 !access_ok((const void __user *) args->eop_buffer_address,
225                         sizeof(uint32_t))) {
226                 pr_debug("Can't access eop buffer");
227                 return -EFAULT;
228         }
229
230         if (args->ctx_save_restore_address &&
231                 !access_ok((const void __user *) args->ctx_save_restore_address,
232                         sizeof(uint32_t))) {
233                 pr_debug("Can't access ctx save restore buffer");
234                 return -EFAULT;
235         }
236
237         q_properties->is_interop = false;
238         q_properties->is_gws = false;
239         q_properties->queue_percent = args->queue_percentage;
240         q_properties->priority = args->queue_priority;
241         q_properties->queue_address = args->ring_base_address;
242         q_properties->queue_size = args->ring_size;
243         q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
244         q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
245         q_properties->eop_ring_buffer_address = args->eop_buffer_address;
246         q_properties->eop_ring_buffer_size = args->eop_buffer_size;
247         q_properties->ctx_save_restore_area_address =
248                         args->ctx_save_restore_address;
249         q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
250         q_properties->ctl_stack_size = args->ctl_stack_size;
251         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
252                 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
253                 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
254         else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
255                 q_properties->type = KFD_QUEUE_TYPE_SDMA;
256         else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
257                 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
258         else
259                 return -ENOTSUPP;
260
261         if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
262                 q_properties->format = KFD_QUEUE_FORMAT_AQL;
263         else
264                 q_properties->format = KFD_QUEUE_FORMAT_PM4;
265
266         pr_debug("Queue Percentage: %d, %d\n",
267                         q_properties->queue_percent, args->queue_percentage);
268
269         pr_debug("Queue Priority: %d, %d\n",
270                         q_properties->priority, args->queue_priority);
271
272         pr_debug("Queue Address: 0x%llX, 0x%llX\n",
273                         q_properties->queue_address, args->ring_base_address);
274
275         pr_debug("Queue Size: 0x%llX, %u\n",
276                         q_properties->queue_size, args->ring_size);
277
278         pr_debug("Queue r/w Pointers: %px, %px\n",
279                         q_properties->read_ptr,
280                         q_properties->write_ptr);
281
282         pr_debug("Queue Format: %d\n", q_properties->format);
283
284         pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
285
286         pr_debug("Queue CTX save area: 0x%llX\n",
287                         q_properties->ctx_save_restore_area_address);
288
289         return 0;
290 }
291
292 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
293                                         void *data)
294 {
295         struct kfd_ioctl_create_queue_args *args = data;
296         struct kfd_dev *dev;
297         int err = 0;
298         unsigned int queue_id;
299         struct kfd_process_device *pdd;
300         struct queue_properties q_properties;
301         uint32_t doorbell_offset_in_process = 0;
302         struct amdgpu_bo *wptr_bo = NULL;
303
304         memset(&q_properties, 0, sizeof(struct queue_properties));
305
306         pr_debug("Creating queue ioctl\n");
307
308         err = set_queue_properties_from_user(&q_properties, args);
309         if (err)
310                 return err;
311
312         pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
313
314         mutex_lock(&p->mutex);
315
316         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
317         if (!pdd) {
318                 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
319                 err = -EINVAL;
320                 goto err_pdd;
321         }
322         dev = pdd->dev;
323
324         pdd = kfd_bind_process_to_device(dev, p);
325         if (IS_ERR(pdd)) {
326                 err = -ESRCH;
327                 goto err_bind_process;
328         }
329
330         if (!pdd->doorbell_index &&
331             kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
332                 err = -ENOMEM;
333                 goto err_alloc_doorbells;
334         }
335
336         /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
337          * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
338          */
339         if (dev->shared_resources.enable_mes &&
340                         ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
341                         >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
342                 struct amdgpu_bo_va_mapping *wptr_mapping;
343                 struct amdgpu_vm *wptr_vm;
344
345                 wptr_vm = drm_priv_to_vm(pdd->drm_priv);
346                 err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
347                 if (err)
348                         goto err_wptr_map_gart;
349
350                 wptr_mapping = amdgpu_vm_bo_lookup_mapping(
351                                 wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
352                 amdgpu_bo_unreserve(wptr_vm->root.bo);
353                 if (!wptr_mapping) {
354                         pr_err("Failed to lookup wptr bo\n");
355                         err = -EINVAL;
356                         goto err_wptr_map_gart;
357                 }
358
359                 wptr_bo = wptr_mapping->bo_va->base.bo;
360                 if (wptr_bo->tbo.base.size > PAGE_SIZE) {
361                         pr_err("Requested GART mapping for wptr bo larger than one page\n");
362                         err = -EINVAL;
363                         goto err_wptr_map_gart;
364                 }
365
366                 err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
367                 if (err) {
368                         pr_err("Failed to map wptr bo to GART\n");
369                         goto err_wptr_map_gart;
370                 }
371         }
372
373         pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
374                         p->pasid,
375                         dev->id);
376
377         err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
378                         NULL, NULL, NULL, &doorbell_offset_in_process);
379         if (err != 0)
380                 goto err_create_queue;
381
382         args->queue_id = queue_id;
383
384
385         /* Return gpu_id as doorbell offset for mmap usage */
386         args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
387         args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
388         if (KFD_IS_SOC15(dev))
389                 /* On SOC15 ASICs, include the doorbell offset within the
390                  * process doorbell frame, which is 2 pages.
391                  */
392                 args->doorbell_offset |= doorbell_offset_in_process;
393
394         mutex_unlock(&p->mutex);
395
396         pr_debug("Queue id %d was created successfully\n", args->queue_id);
397
398         pr_debug("Ring buffer address == 0x%016llX\n",
399                         args->ring_base_address);
400
401         pr_debug("Read ptr address    == 0x%016llX\n",
402                         args->read_pointer_address);
403
404         pr_debug("Write ptr address   == 0x%016llX\n",
405                         args->write_pointer_address);
406
407         return 0;
408
409 err_create_queue:
410         if (wptr_bo)
411                 amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
412 err_wptr_map_gart:
413 err_alloc_doorbells:
414 err_bind_process:
415 err_pdd:
416         mutex_unlock(&p->mutex);
417         return err;
418 }
419
420 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
421                                         void *data)
422 {
423         int retval;
424         struct kfd_ioctl_destroy_queue_args *args = data;
425
426         pr_debug("Destroying queue id %d for pasid 0x%x\n",
427                                 args->queue_id,
428                                 p->pasid);
429
430         mutex_lock(&p->mutex);
431
432         retval = pqm_destroy_queue(&p->pqm, args->queue_id);
433
434         mutex_unlock(&p->mutex);
435         return retval;
436 }
437
438 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
439                                         void *data)
440 {
441         int retval;
442         struct kfd_ioctl_update_queue_args *args = data;
443         struct queue_properties properties;
444
445         if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
446                 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
447                 return -EINVAL;
448         }
449
450         if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
451                 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
452                 return -EINVAL;
453         }
454
455         if ((args->ring_base_address) &&
456                 (!access_ok((const void __user *) args->ring_base_address,
457                         sizeof(uint64_t)))) {
458                 pr_err("Can't access ring base address\n");
459                 return -EFAULT;
460         }
461
462         if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
463                 pr_err("Ring size must be a power of 2 or 0\n");
464                 return -EINVAL;
465         }
466
467         properties.queue_address = args->ring_base_address;
468         properties.queue_size = args->ring_size;
469         properties.queue_percent = args->queue_percentage;
470         properties.priority = args->queue_priority;
471
472         pr_debug("Updating queue id %d for pasid 0x%x\n",
473                         args->queue_id, p->pasid);
474
475         mutex_lock(&p->mutex);
476
477         retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
478
479         mutex_unlock(&p->mutex);
480
481         return retval;
482 }
483
484 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
485                                         void *data)
486 {
487         int retval;
488         const int max_num_cus = 1024;
489         struct kfd_ioctl_set_cu_mask_args *args = data;
490         struct mqd_update_info minfo = {0};
491         uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
492         size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
493
494         if ((args->num_cu_mask % 32) != 0) {
495                 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
496                                 args->num_cu_mask);
497                 return -EINVAL;
498         }
499
500         minfo.cu_mask.count = args->num_cu_mask;
501         if (minfo.cu_mask.count == 0) {
502                 pr_debug("CU mask cannot be 0");
503                 return -EINVAL;
504         }
505
506         /* To prevent an unreasonably large CU mask size, set an arbitrary
507          * limit of max_num_cus bits.  We can then just drop any CU mask bits
508          * past max_num_cus bits and just use the first max_num_cus bits.
509          */
510         if (minfo.cu_mask.count > max_num_cus) {
511                 pr_debug("CU mask cannot be greater than 1024 bits");
512                 minfo.cu_mask.count = max_num_cus;
513                 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
514         }
515
516         minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
517         if (!minfo.cu_mask.ptr)
518                 return -ENOMEM;
519
520         retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
521         if (retval) {
522                 pr_debug("Could not copy CU mask from userspace");
523                 retval = -EFAULT;
524                 goto out;
525         }
526
527         minfo.update_flag = UPDATE_FLAG_CU_MASK;
528
529         mutex_lock(&p->mutex);
530
531         retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
532
533         mutex_unlock(&p->mutex);
534
535 out:
536         kfree(minfo.cu_mask.ptr);
537         return retval;
538 }
539
540 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
541                                           struct kfd_process *p, void *data)
542 {
543         struct kfd_ioctl_get_queue_wave_state_args *args = data;
544         int r;
545
546         mutex_lock(&p->mutex);
547
548         r = pqm_get_wave_state(&p->pqm, args->queue_id,
549                                (void __user *)args->ctl_stack_address,
550                                &args->ctl_stack_used_size,
551                                &args->save_area_used_size);
552
553         mutex_unlock(&p->mutex);
554
555         return r;
556 }
557
558 static int kfd_ioctl_set_memory_policy(struct file *filep,
559                                         struct kfd_process *p, void *data)
560 {
561         struct kfd_ioctl_set_memory_policy_args *args = data;
562         int err = 0;
563         struct kfd_process_device *pdd;
564         enum cache_policy default_policy, alternate_policy;
565
566         if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
567             && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
568                 return -EINVAL;
569         }
570
571         if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
572             && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
573                 return -EINVAL;
574         }
575
576         mutex_lock(&p->mutex);
577         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
578         if (!pdd) {
579                 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
580                 err = -EINVAL;
581                 goto err_pdd;
582         }
583
584         pdd = kfd_bind_process_to_device(pdd->dev, p);
585         if (IS_ERR(pdd)) {
586                 err = -ESRCH;
587                 goto out;
588         }
589
590         default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
591                          ? cache_policy_coherent : cache_policy_noncoherent;
592
593         alternate_policy =
594                 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
595                    ? cache_policy_coherent : cache_policy_noncoherent;
596
597         if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
598                                 &pdd->qpd,
599                                 default_policy,
600                                 alternate_policy,
601                                 (void __user *)args->alternate_aperture_base,
602                                 args->alternate_aperture_size))
603                 err = -EINVAL;
604
605 out:
606 err_pdd:
607         mutex_unlock(&p->mutex);
608
609         return err;
610 }
611
612 static int kfd_ioctl_set_trap_handler(struct file *filep,
613                                         struct kfd_process *p, void *data)
614 {
615         struct kfd_ioctl_set_trap_handler_args *args = data;
616         int err = 0;
617         struct kfd_process_device *pdd;
618
619         mutex_lock(&p->mutex);
620
621         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
622         if (!pdd) {
623                 err = -EINVAL;
624                 goto err_pdd;
625         }
626
627         pdd = kfd_bind_process_to_device(pdd->dev, p);
628         if (IS_ERR(pdd)) {
629                 err = -ESRCH;
630                 goto out;
631         }
632
633         kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
634
635 out:
636 err_pdd:
637         mutex_unlock(&p->mutex);
638
639         return err;
640 }
641
642 static int kfd_ioctl_dbg_register(struct file *filep,
643                                 struct kfd_process *p, void *data)
644 {
645         return -EPERM;
646 }
647
648 static int kfd_ioctl_dbg_unregister(struct file *filep,
649                                 struct kfd_process *p, void *data)
650 {
651         return -EPERM;
652 }
653
654 static int kfd_ioctl_dbg_address_watch(struct file *filep,
655                                         struct kfd_process *p, void *data)
656 {
657         return -EPERM;
658 }
659
660 /* Parse and generate fixed size data structure for wave control */
661 static int kfd_ioctl_dbg_wave_control(struct file *filep,
662                                         struct kfd_process *p, void *data)
663 {
664         return -EPERM;
665 }
666
667 static int kfd_ioctl_get_clock_counters(struct file *filep,
668                                 struct kfd_process *p, void *data)
669 {
670         struct kfd_ioctl_get_clock_counters_args *args = data;
671         struct kfd_process_device *pdd;
672
673         mutex_lock(&p->mutex);
674         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
675         mutex_unlock(&p->mutex);
676         if (pdd)
677                 /* Reading GPU clock counter from KGD */
678                 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
679         else
680                 /* Node without GPU resource */
681                 args->gpu_clock_counter = 0;
682
683         /* No access to rdtsc. Using raw monotonic time */
684         args->cpu_clock_counter = ktime_get_raw_ns();
685         args->system_clock_counter = ktime_get_boottime_ns();
686
687         /* Since the counter is in nano-seconds we use 1GHz frequency */
688         args->system_clock_freq = 1000000000;
689
690         return 0;
691 }
692
693
694 static int kfd_ioctl_get_process_apertures(struct file *filp,
695                                 struct kfd_process *p, void *data)
696 {
697         struct kfd_ioctl_get_process_apertures_args *args = data;
698         struct kfd_process_device_apertures *pAperture;
699         int i;
700
701         dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
702
703         args->num_of_nodes = 0;
704
705         mutex_lock(&p->mutex);
706         /* Run over all pdd of the process */
707         for (i = 0; i < p->n_pdds; i++) {
708                 struct kfd_process_device *pdd = p->pdds[i];
709
710                 pAperture =
711                         &args->process_apertures[args->num_of_nodes];
712                 pAperture->gpu_id = pdd->dev->id;
713                 pAperture->lds_base = pdd->lds_base;
714                 pAperture->lds_limit = pdd->lds_limit;
715                 pAperture->gpuvm_base = pdd->gpuvm_base;
716                 pAperture->gpuvm_limit = pdd->gpuvm_limit;
717                 pAperture->scratch_base = pdd->scratch_base;
718                 pAperture->scratch_limit = pdd->scratch_limit;
719
720                 dev_dbg(kfd_device,
721                         "node id %u\n", args->num_of_nodes);
722                 dev_dbg(kfd_device,
723                         "gpu id %u\n", pdd->dev->id);
724                 dev_dbg(kfd_device,
725                         "lds_base %llX\n", pdd->lds_base);
726                 dev_dbg(kfd_device,
727                         "lds_limit %llX\n", pdd->lds_limit);
728                 dev_dbg(kfd_device,
729                         "gpuvm_base %llX\n", pdd->gpuvm_base);
730                 dev_dbg(kfd_device,
731                         "gpuvm_limit %llX\n", pdd->gpuvm_limit);
732                 dev_dbg(kfd_device,
733                         "scratch_base %llX\n", pdd->scratch_base);
734                 dev_dbg(kfd_device,
735                         "scratch_limit %llX\n", pdd->scratch_limit);
736
737                 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
738                         break;
739         }
740         mutex_unlock(&p->mutex);
741
742         return 0;
743 }
744
745 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
746                                 struct kfd_process *p, void *data)
747 {
748         struct kfd_ioctl_get_process_apertures_new_args *args = data;
749         struct kfd_process_device_apertures *pa;
750         int ret;
751         int i;
752
753         dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
754
755         if (args->num_of_nodes == 0) {
756                 /* Return number of nodes, so that user space can alloacate
757                  * sufficient memory
758                  */
759                 mutex_lock(&p->mutex);
760                 args->num_of_nodes = p->n_pdds;
761                 goto out_unlock;
762         }
763
764         /* Fill in process-aperture information for all available
765          * nodes, but not more than args->num_of_nodes as that is
766          * the amount of memory allocated by user
767          */
768         pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
769                                 args->num_of_nodes), GFP_KERNEL);
770         if (!pa)
771                 return -ENOMEM;
772
773         mutex_lock(&p->mutex);
774
775         if (!p->n_pdds) {
776                 args->num_of_nodes = 0;
777                 kfree(pa);
778                 goto out_unlock;
779         }
780
781         /* Run over all pdd of the process */
782         for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
783                 struct kfd_process_device *pdd = p->pdds[i];
784
785                 pa[i].gpu_id = pdd->dev->id;
786                 pa[i].lds_base = pdd->lds_base;
787                 pa[i].lds_limit = pdd->lds_limit;
788                 pa[i].gpuvm_base = pdd->gpuvm_base;
789                 pa[i].gpuvm_limit = pdd->gpuvm_limit;
790                 pa[i].scratch_base = pdd->scratch_base;
791                 pa[i].scratch_limit = pdd->scratch_limit;
792
793                 dev_dbg(kfd_device,
794                         "gpu id %u\n", pdd->dev->id);
795                 dev_dbg(kfd_device,
796                         "lds_base %llX\n", pdd->lds_base);
797                 dev_dbg(kfd_device,
798                         "lds_limit %llX\n", pdd->lds_limit);
799                 dev_dbg(kfd_device,
800                         "gpuvm_base %llX\n", pdd->gpuvm_base);
801                 dev_dbg(kfd_device,
802                         "gpuvm_limit %llX\n", pdd->gpuvm_limit);
803                 dev_dbg(kfd_device,
804                         "scratch_base %llX\n", pdd->scratch_base);
805                 dev_dbg(kfd_device,
806                         "scratch_limit %llX\n", pdd->scratch_limit);
807         }
808         mutex_unlock(&p->mutex);
809
810         args->num_of_nodes = i;
811         ret = copy_to_user(
812                         (void __user *)args->kfd_process_device_apertures_ptr,
813                         pa,
814                         (i * sizeof(struct kfd_process_device_apertures)));
815         kfree(pa);
816         return ret ? -EFAULT : 0;
817
818 out_unlock:
819         mutex_unlock(&p->mutex);
820         return 0;
821 }
822
823 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
824                                         void *data)
825 {
826         struct kfd_ioctl_create_event_args *args = data;
827         int err;
828
829         /* For dGPUs the event page is allocated in user mode. The
830          * handle is passed to KFD with the first call to this IOCTL
831          * through the event_page_offset field.
832          */
833         if (args->event_page_offset) {
834                 mutex_lock(&p->mutex);
835                 err = kfd_kmap_event_page(p, args->event_page_offset);
836                 mutex_unlock(&p->mutex);
837                 if (err)
838                         return err;
839         }
840
841         err = kfd_event_create(filp, p, args->event_type,
842                                 args->auto_reset != 0, args->node_id,
843                                 &args->event_id, &args->event_trigger_data,
844                                 &args->event_page_offset,
845                                 &args->event_slot_index);
846
847         pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
848         return err;
849 }
850
851 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
852                                         void *data)
853 {
854         struct kfd_ioctl_destroy_event_args *args = data;
855
856         return kfd_event_destroy(p, args->event_id);
857 }
858
859 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
860                                 void *data)
861 {
862         struct kfd_ioctl_set_event_args *args = data;
863
864         return kfd_set_event(p, args->event_id);
865 }
866
867 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
868                                 void *data)
869 {
870         struct kfd_ioctl_reset_event_args *args = data;
871
872         return kfd_reset_event(p, args->event_id);
873 }
874
875 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
876                                 void *data)
877 {
878         struct kfd_ioctl_wait_events_args *args = data;
879
880         return kfd_wait_on_events(p, args->num_events,
881                         (void __user *)args->events_ptr,
882                         (args->wait_for_all != 0),
883                         &args->timeout, &args->wait_result);
884 }
885 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
886                                         struct kfd_process *p, void *data)
887 {
888         struct kfd_ioctl_set_scratch_backing_va_args *args = data;
889         struct kfd_process_device *pdd;
890         struct kfd_dev *dev;
891         long err;
892
893         mutex_lock(&p->mutex);
894         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
895         if (!pdd) {
896                 err = -EINVAL;
897                 goto err_pdd;
898         }
899         dev = pdd->dev;
900
901         pdd = kfd_bind_process_to_device(dev, p);
902         if (IS_ERR(pdd)) {
903                 err = PTR_ERR(pdd);
904                 goto bind_process_to_device_fail;
905         }
906
907         pdd->qpd.sh_hidden_private_base = args->va_addr;
908
909         mutex_unlock(&p->mutex);
910
911         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
912             pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
913                 dev->kfd2kgd->set_scratch_backing_va(
914                         dev->adev, args->va_addr, pdd->qpd.vmid);
915
916         return 0;
917
918 bind_process_to_device_fail:
919 err_pdd:
920         mutex_unlock(&p->mutex);
921         return err;
922 }
923
924 static int kfd_ioctl_get_tile_config(struct file *filep,
925                 struct kfd_process *p, void *data)
926 {
927         struct kfd_ioctl_get_tile_config_args *args = data;
928         struct kfd_process_device *pdd;
929         struct tile_config config;
930         int err = 0;
931
932         mutex_lock(&p->mutex);
933         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
934         mutex_unlock(&p->mutex);
935         if (!pdd)
936                 return -EINVAL;
937
938         amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
939
940         args->gb_addr_config = config.gb_addr_config;
941         args->num_banks = config.num_banks;
942         args->num_ranks = config.num_ranks;
943
944         if (args->num_tile_configs > config.num_tile_configs)
945                 args->num_tile_configs = config.num_tile_configs;
946         err = copy_to_user((void __user *)args->tile_config_ptr,
947                         config.tile_config_ptr,
948                         args->num_tile_configs * sizeof(uint32_t));
949         if (err) {
950                 args->num_tile_configs = 0;
951                 return -EFAULT;
952         }
953
954         if (args->num_macro_tile_configs > config.num_macro_tile_configs)
955                 args->num_macro_tile_configs =
956                                 config.num_macro_tile_configs;
957         err = copy_to_user((void __user *)args->macro_tile_config_ptr,
958                         config.macro_tile_config_ptr,
959                         args->num_macro_tile_configs * sizeof(uint32_t));
960         if (err) {
961                 args->num_macro_tile_configs = 0;
962                 return -EFAULT;
963         }
964
965         return 0;
966 }
967
968 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
969                                 void *data)
970 {
971         struct kfd_ioctl_acquire_vm_args *args = data;
972         struct kfd_process_device *pdd;
973         struct file *drm_file;
974         int ret;
975
976         drm_file = fget(args->drm_fd);
977         if (!drm_file)
978                 return -EINVAL;
979
980         mutex_lock(&p->mutex);
981         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
982         if (!pdd) {
983                 ret = -EINVAL;
984                 goto err_pdd;
985         }
986
987         if (pdd->drm_file) {
988                 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
989                 goto err_drm_file;
990         }
991
992         ret = kfd_process_device_init_vm(pdd, drm_file);
993         if (ret)
994                 goto err_unlock;
995
996         /* On success, the PDD keeps the drm_file reference */
997         mutex_unlock(&p->mutex);
998
999         return 0;
1000
1001 err_unlock:
1002 err_pdd:
1003 err_drm_file:
1004         mutex_unlock(&p->mutex);
1005         fput(drm_file);
1006         return ret;
1007 }
1008
1009 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1010 {
1011         if (debug_largebar) {
1012                 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1013                 return true;
1014         }
1015
1016         if (dev->use_iommu_v2)
1017                 return false;
1018
1019         if (dev->local_mem_info.local_mem_size_private == 0 &&
1020                         dev->local_mem_info.local_mem_size_public > 0)
1021                 return true;
1022         return false;
1023 }
1024
1025 static int kfd_ioctl_get_available_memory(struct file *filep,
1026                                           struct kfd_process *p, void *data)
1027 {
1028         struct kfd_ioctl_get_available_memory_args *args = data;
1029         struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1030
1031         if (!pdd)
1032                 return -EINVAL;
1033         args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
1034         kfd_unlock_pdd(pdd);
1035         return 0;
1036 }
1037
1038 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1039                                         struct kfd_process *p, void *data)
1040 {
1041         struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1042         struct kfd_process_device *pdd;
1043         void *mem;
1044         struct kfd_dev *dev;
1045         int idr_handle;
1046         long err;
1047         uint64_t offset = args->mmap_offset;
1048         uint32_t flags = args->flags;
1049
1050         if (args->size == 0)
1051                 return -EINVAL;
1052
1053 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1054         /* Flush pending deferred work to avoid racing with deferred actions
1055          * from previous memory map changes (e.g. munmap).
1056          */
1057         svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1058         mutex_lock(&p->svms.lock);
1059         mmap_write_unlock(current->mm);
1060         if (interval_tree_iter_first(&p->svms.objects,
1061                                      args->va_addr >> PAGE_SHIFT,
1062                                      (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1063                 pr_err("Address: 0x%llx already allocated by SVM\n",
1064                         args->va_addr);
1065                 mutex_unlock(&p->svms.lock);
1066                 return -EADDRINUSE;
1067         }
1068
1069         /* When register user buffer check if it has been registered by svm by
1070          * buffer cpu virtual address.
1071          */
1072         if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1073             interval_tree_iter_first(&p->svms.objects,
1074                                      args->mmap_offset >> PAGE_SHIFT,
1075                                      (args->mmap_offset  + args->size - 1) >> PAGE_SHIFT)) {
1076                 pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1077                         args->mmap_offset);
1078                 mutex_unlock(&p->svms.lock);
1079                 return -EADDRINUSE;
1080         }
1081
1082         mutex_unlock(&p->svms.lock);
1083 #endif
1084         mutex_lock(&p->mutex);
1085         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1086         if (!pdd) {
1087                 err = -EINVAL;
1088                 goto err_pdd;
1089         }
1090
1091         dev = pdd->dev;
1092
1093         if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1094                 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1095                 !kfd_dev_is_large_bar(dev)) {
1096                 pr_err("Alloc host visible vram on small bar is not allowed\n");
1097                 err = -EINVAL;
1098                 goto err_large_bar;
1099         }
1100
1101         pdd = kfd_bind_process_to_device(dev, p);
1102         if (IS_ERR(pdd)) {
1103                 err = PTR_ERR(pdd);
1104                 goto err_unlock;
1105         }
1106
1107         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1108                 if (args->size != kfd_doorbell_process_slice(dev)) {
1109                         err = -EINVAL;
1110                         goto err_unlock;
1111                 }
1112                 offset = kfd_get_process_doorbells(pdd);
1113                 if (!offset) {
1114                         err = -ENOMEM;
1115                         goto err_unlock;
1116                 }
1117         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1118                 if (args->size != PAGE_SIZE) {
1119                         err = -EINVAL;
1120                         goto err_unlock;
1121                 }
1122                 offset = dev->adev->rmmio_remap.bus_addr;
1123                 if (!offset) {
1124                         err = -ENOMEM;
1125                         goto err_unlock;
1126                 }
1127         }
1128
1129         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1130                 dev->adev, args->va_addr, args->size,
1131                 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1132                 flags, false);
1133
1134         if (err)
1135                 goto err_unlock;
1136
1137         idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1138         if (idr_handle < 0) {
1139                 err = -EFAULT;
1140                 goto err_free;
1141         }
1142
1143         /* Update the VRAM usage count */
1144         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1145                 uint64_t size = args->size;
1146
1147                 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1148                         size >>= 1;
1149                 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
1150         }
1151
1152         mutex_unlock(&p->mutex);
1153
1154         args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1155         args->mmap_offset = offset;
1156
1157         /* MMIO is mapped through kfd device
1158          * Generate a kfd mmap offset
1159          */
1160         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1161                 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1162                                         | KFD_MMAP_GPU_ID(args->gpu_id);
1163
1164         return 0;
1165
1166 err_free:
1167         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1168                                                pdd->drm_priv, NULL);
1169 err_unlock:
1170 err_pdd:
1171 err_large_bar:
1172         mutex_unlock(&p->mutex);
1173         return err;
1174 }
1175
1176 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1177                                         struct kfd_process *p, void *data)
1178 {
1179         struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1180         struct kfd_process_device *pdd;
1181         void *mem;
1182         int ret;
1183         uint64_t size = 0;
1184
1185         mutex_lock(&p->mutex);
1186         /*
1187          * Safeguard to prevent user space from freeing signal BO.
1188          * It will be freed at process termination.
1189          */
1190         if (p->signal_handle && (p->signal_handle == args->handle)) {
1191                 pr_err("Free signal BO is not allowed\n");
1192                 ret = -EPERM;
1193                 goto err_unlock;
1194         }
1195
1196         pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1197         if (!pdd) {
1198                 pr_err("Process device data doesn't exist\n");
1199                 ret = -EINVAL;
1200                 goto err_pdd;
1201         }
1202
1203         mem = kfd_process_device_translate_handle(
1204                 pdd, GET_IDR_HANDLE(args->handle));
1205         if (!mem) {
1206                 ret = -EINVAL;
1207                 goto err_unlock;
1208         }
1209
1210         ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1211                                 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1212
1213         /* If freeing the buffer failed, leave the handle in place for
1214          * clean-up during process tear-down.
1215          */
1216         if (!ret)
1217                 kfd_process_device_remove_obj_handle(
1218                         pdd, GET_IDR_HANDLE(args->handle));
1219
1220         WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1221
1222 err_unlock:
1223 err_pdd:
1224         mutex_unlock(&p->mutex);
1225         return ret;
1226 }
1227
1228 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1229                                         struct kfd_process *p, void *data)
1230 {
1231         struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1232         struct kfd_process_device *pdd, *peer_pdd;
1233         void *mem;
1234         struct kfd_dev *dev;
1235         long err = 0;
1236         int i;
1237         uint32_t *devices_arr = NULL;
1238
1239         if (!args->n_devices) {
1240                 pr_debug("Device IDs array empty\n");
1241                 return -EINVAL;
1242         }
1243         if (args->n_success > args->n_devices) {
1244                 pr_debug("n_success exceeds n_devices\n");
1245                 return -EINVAL;
1246         }
1247
1248         devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1249                                     GFP_KERNEL);
1250         if (!devices_arr)
1251                 return -ENOMEM;
1252
1253         err = copy_from_user(devices_arr,
1254                              (void __user *)args->device_ids_array_ptr,
1255                              args->n_devices * sizeof(*devices_arr));
1256         if (err != 0) {
1257                 err = -EFAULT;
1258                 goto copy_from_user_failed;
1259         }
1260
1261         mutex_lock(&p->mutex);
1262         pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1263         if (!pdd) {
1264                 err = -EINVAL;
1265                 goto get_process_device_data_failed;
1266         }
1267         dev = pdd->dev;
1268
1269         pdd = kfd_bind_process_to_device(dev, p);
1270         if (IS_ERR(pdd)) {
1271                 err = PTR_ERR(pdd);
1272                 goto bind_process_to_device_failed;
1273         }
1274
1275         mem = kfd_process_device_translate_handle(pdd,
1276                                                 GET_IDR_HANDLE(args->handle));
1277         if (!mem) {
1278                 err = -ENOMEM;
1279                 goto get_mem_obj_from_handle_failed;
1280         }
1281
1282         for (i = args->n_success; i < args->n_devices; i++) {
1283                 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1284                 if (!peer_pdd) {
1285                         pr_debug("Getting device by id failed for 0x%x\n",
1286                                  devices_arr[i]);
1287                         err = -EINVAL;
1288                         goto get_mem_obj_from_handle_failed;
1289                 }
1290
1291                 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1292                 if (IS_ERR(peer_pdd)) {
1293                         err = PTR_ERR(peer_pdd);
1294                         goto get_mem_obj_from_handle_failed;
1295                 }
1296
1297                 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1298                         peer_pdd->dev->adev, (struct kgd_mem *)mem,
1299                         peer_pdd->drm_priv);
1300                 if (err) {
1301                         struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1302
1303                         dev_err(dev->adev->dev,
1304                                "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1305                                pci_domain_nr(pdev->bus),
1306                                pdev->bus->number,
1307                                PCI_SLOT(pdev->devfn),
1308                                PCI_FUNC(pdev->devfn),
1309                                ((struct kgd_mem *)mem)->domain);
1310                         goto map_memory_to_gpu_failed;
1311                 }
1312                 args->n_success = i+1;
1313         }
1314
1315         err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1316         if (err) {
1317                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1318                 goto sync_memory_failed;
1319         }
1320
1321         mutex_unlock(&p->mutex);
1322
1323         /* Flush TLBs after waiting for the page table updates to complete */
1324         for (i = 0; i < args->n_devices; i++) {
1325                 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1326                 if (WARN_ON_ONCE(!peer_pdd))
1327                         continue;
1328                 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1329         }
1330         kfree(devices_arr);
1331
1332         return err;
1333
1334 get_process_device_data_failed:
1335 bind_process_to_device_failed:
1336 get_mem_obj_from_handle_failed:
1337 map_memory_to_gpu_failed:
1338 sync_memory_failed:
1339         mutex_unlock(&p->mutex);
1340 copy_from_user_failed:
1341         kfree(devices_arr);
1342
1343         return err;
1344 }
1345
1346 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1347                                         struct kfd_process *p, void *data)
1348 {
1349         struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1350         struct kfd_process_device *pdd, *peer_pdd;
1351         void *mem;
1352         long err = 0;
1353         uint32_t *devices_arr = NULL, i;
1354         bool flush_tlb;
1355
1356         if (!args->n_devices) {
1357                 pr_debug("Device IDs array empty\n");
1358                 return -EINVAL;
1359         }
1360         if (args->n_success > args->n_devices) {
1361                 pr_debug("n_success exceeds n_devices\n");
1362                 return -EINVAL;
1363         }
1364
1365         devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1366                                     GFP_KERNEL);
1367         if (!devices_arr)
1368                 return -ENOMEM;
1369
1370         err = copy_from_user(devices_arr,
1371                              (void __user *)args->device_ids_array_ptr,
1372                              args->n_devices * sizeof(*devices_arr));
1373         if (err != 0) {
1374                 err = -EFAULT;
1375                 goto copy_from_user_failed;
1376         }
1377
1378         mutex_lock(&p->mutex);
1379         pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1380         if (!pdd) {
1381                 err = -EINVAL;
1382                 goto bind_process_to_device_failed;
1383         }
1384
1385         mem = kfd_process_device_translate_handle(pdd,
1386                                                 GET_IDR_HANDLE(args->handle));
1387         if (!mem) {
1388                 err = -ENOMEM;
1389                 goto get_mem_obj_from_handle_failed;
1390         }
1391
1392         for (i = args->n_success; i < args->n_devices; i++) {
1393                 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1394                 if (!peer_pdd) {
1395                         err = -EINVAL;
1396                         goto get_mem_obj_from_handle_failed;
1397                 }
1398                 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1399                         peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1400                 if (err) {
1401                         pr_err("Failed to unmap from gpu %d/%d\n",
1402                                i, args->n_devices);
1403                         goto unmap_memory_from_gpu_failed;
1404                 }
1405                 args->n_success = i+1;
1406         }
1407
1408         flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
1409         if (flush_tlb) {
1410                 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1411                                 (struct kgd_mem *) mem, true);
1412                 if (err) {
1413                         pr_debug("Sync memory failed, wait interrupted by user signal\n");
1414                         goto sync_memory_failed;
1415                 }
1416         }
1417         mutex_unlock(&p->mutex);
1418
1419         if (flush_tlb) {
1420                 /* Flush TLBs after waiting for the page table updates to complete */
1421                 for (i = 0; i < args->n_devices; i++) {
1422                         peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1423                         if (WARN_ON_ONCE(!peer_pdd))
1424                                 continue;
1425                         kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1426                 }
1427         }
1428         kfree(devices_arr);
1429
1430         return 0;
1431
1432 bind_process_to_device_failed:
1433 get_mem_obj_from_handle_failed:
1434 unmap_memory_from_gpu_failed:
1435 sync_memory_failed:
1436         mutex_unlock(&p->mutex);
1437 copy_from_user_failed:
1438         kfree(devices_arr);
1439         return err;
1440 }
1441
1442 static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1443                 struct kfd_process *p, void *data)
1444 {
1445         int retval;
1446         struct kfd_ioctl_alloc_queue_gws_args *args = data;
1447         struct queue *q;
1448         struct kfd_dev *dev;
1449
1450         mutex_lock(&p->mutex);
1451         q = pqm_get_user_queue(&p->pqm, args->queue_id);
1452
1453         if (q) {
1454                 dev = q->device;
1455         } else {
1456                 retval = -EINVAL;
1457                 goto out_unlock;
1458         }
1459
1460         if (!dev->gws) {
1461                 retval = -ENODEV;
1462                 goto out_unlock;
1463         }
1464
1465         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1466                 retval = -ENODEV;
1467                 goto out_unlock;
1468         }
1469
1470         retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1471         mutex_unlock(&p->mutex);
1472
1473         args->first_gws = 0;
1474         return retval;
1475
1476 out_unlock:
1477         mutex_unlock(&p->mutex);
1478         return retval;
1479 }
1480
1481 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1482                 struct kfd_process *p, void *data)
1483 {
1484         struct kfd_ioctl_get_dmabuf_info_args *args = data;
1485         struct kfd_dev *dev = NULL;
1486         struct amdgpu_device *dmabuf_adev;
1487         void *metadata_buffer = NULL;
1488         uint32_t flags;
1489         unsigned int i;
1490         int r;
1491
1492         /* Find a KFD GPU device that supports the get_dmabuf_info query */
1493         for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1494                 if (dev)
1495                         break;
1496         if (!dev)
1497                 return -EINVAL;
1498
1499         if (args->metadata_ptr) {
1500                 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1501                 if (!metadata_buffer)
1502                         return -ENOMEM;
1503         }
1504
1505         /* Get dmabuf info from KGD */
1506         r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1507                                           &dmabuf_adev, &args->size,
1508                                           metadata_buffer, args->metadata_size,
1509                                           &args->metadata_size, &flags);
1510         if (r)
1511                 goto exit;
1512
1513         /* Reverse-lookup gpu_id from kgd pointer */
1514         dev = kfd_device_by_adev(dmabuf_adev);
1515         if (!dev) {
1516                 r = -EINVAL;
1517                 goto exit;
1518         }
1519         args->gpu_id = dev->id;
1520         args->flags = flags;
1521
1522         /* Copy metadata buffer to user mode */
1523         if (metadata_buffer) {
1524                 r = copy_to_user((void __user *)args->metadata_ptr,
1525                                  metadata_buffer, args->metadata_size);
1526                 if (r != 0)
1527                         r = -EFAULT;
1528         }
1529
1530 exit:
1531         kfree(metadata_buffer);
1532
1533         return r;
1534 }
1535
1536 static int kfd_ioctl_import_dmabuf(struct file *filep,
1537                                    struct kfd_process *p, void *data)
1538 {
1539         struct kfd_ioctl_import_dmabuf_args *args = data;
1540         struct kfd_process_device *pdd;
1541         struct dma_buf *dmabuf;
1542         int idr_handle;
1543         uint64_t size;
1544         void *mem;
1545         int r;
1546
1547         dmabuf = dma_buf_get(args->dmabuf_fd);
1548         if (IS_ERR(dmabuf))
1549                 return PTR_ERR(dmabuf);
1550
1551         mutex_lock(&p->mutex);
1552         pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1553         if (!pdd) {
1554                 r = -EINVAL;
1555                 goto err_unlock;
1556         }
1557
1558         pdd = kfd_bind_process_to_device(pdd->dev, p);
1559         if (IS_ERR(pdd)) {
1560                 r = PTR_ERR(pdd);
1561                 goto err_unlock;
1562         }
1563
1564         r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
1565                                               args->va_addr, pdd->drm_priv,
1566                                               (struct kgd_mem **)&mem, &size,
1567                                               NULL);
1568         if (r)
1569                 goto err_unlock;
1570
1571         idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1572         if (idr_handle < 0) {
1573                 r = -EFAULT;
1574                 goto err_free;
1575         }
1576
1577         mutex_unlock(&p->mutex);
1578         dma_buf_put(dmabuf);
1579
1580         args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1581
1582         return 0;
1583
1584 err_free:
1585         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1586                                                pdd->drm_priv, NULL);
1587 err_unlock:
1588         mutex_unlock(&p->mutex);
1589         dma_buf_put(dmabuf);
1590         return r;
1591 }
1592
1593 /* Handle requests for watching SMI events */
1594 static int kfd_ioctl_smi_events(struct file *filep,
1595                                 struct kfd_process *p, void *data)
1596 {
1597         struct kfd_ioctl_smi_events_args *args = data;
1598         struct kfd_process_device *pdd;
1599
1600         mutex_lock(&p->mutex);
1601
1602         pdd = kfd_process_device_data_by_id(p, args->gpuid);
1603         mutex_unlock(&p->mutex);
1604         if (!pdd)
1605                 return -EINVAL;
1606
1607         return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1608 }
1609
1610 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1611
1612 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1613                                     struct kfd_process *p, void *data)
1614 {
1615         struct kfd_ioctl_set_xnack_mode_args *args = data;
1616         int r = 0;
1617
1618         mutex_lock(&p->mutex);
1619         if (args->xnack_enabled >= 0) {
1620                 if (!list_empty(&p->pqm.queues)) {
1621                         pr_debug("Process has user queues running\n");
1622                         r = -EBUSY;
1623                         goto out_unlock;
1624                 }
1625
1626                 if (p->xnack_enabled == args->xnack_enabled)
1627                         goto out_unlock;
1628
1629                 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1630                         r = -EPERM;
1631                         goto out_unlock;
1632                 }
1633
1634                 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1635         } else {
1636                 args->xnack_enabled = p->xnack_enabled;
1637         }
1638
1639 out_unlock:
1640         mutex_unlock(&p->mutex);
1641
1642         return r;
1643 }
1644
1645 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1646 {
1647         struct kfd_ioctl_svm_args *args = data;
1648         int r = 0;
1649
1650         pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1651                  args->start_addr, args->size, args->op, args->nattr);
1652
1653         if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1654                 return -EINVAL;
1655         if (!args->start_addr || !args->size)
1656                 return -EINVAL;
1657
1658         r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1659                       args->attrs);
1660
1661         return r;
1662 }
1663 #else
1664 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1665                                     struct kfd_process *p, void *data)
1666 {
1667         return -EPERM;
1668 }
1669 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1670 {
1671         return -EPERM;
1672 }
1673 #endif
1674
1675 static int criu_checkpoint_process(struct kfd_process *p,
1676                              uint8_t __user *user_priv_data,
1677                              uint64_t *priv_offset)
1678 {
1679         struct kfd_criu_process_priv_data process_priv;
1680         int ret;
1681
1682         memset(&process_priv, 0, sizeof(process_priv));
1683
1684         process_priv.version = KFD_CRIU_PRIV_VERSION;
1685         /* For CR, we don't consider negative xnack mode which is used for
1686          * querying without changing it, here 0 simply means disabled and 1
1687          * means enabled so retry for finding a valid PTE.
1688          */
1689         process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1690
1691         ret = copy_to_user(user_priv_data + *priv_offset,
1692                                 &process_priv, sizeof(process_priv));
1693
1694         if (ret) {
1695                 pr_err("Failed to copy process information to user\n");
1696                 ret = -EFAULT;
1697         }
1698
1699         *priv_offset += sizeof(process_priv);
1700         return ret;
1701 }
1702
1703 static int criu_checkpoint_devices(struct kfd_process *p,
1704                              uint32_t num_devices,
1705                              uint8_t __user *user_addr,
1706                              uint8_t __user *user_priv_data,
1707                              uint64_t *priv_offset)
1708 {
1709         struct kfd_criu_device_priv_data *device_priv = NULL;
1710         struct kfd_criu_device_bucket *device_buckets = NULL;
1711         int ret = 0, i;
1712
1713         device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1714         if (!device_buckets) {
1715                 ret = -ENOMEM;
1716                 goto exit;
1717         }
1718
1719         device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1720         if (!device_priv) {
1721                 ret = -ENOMEM;
1722                 goto exit;
1723         }
1724
1725         for (i = 0; i < num_devices; i++) {
1726                 struct kfd_process_device *pdd = p->pdds[i];
1727
1728                 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1729                 device_buckets[i].actual_gpu_id = pdd->dev->id;
1730
1731                 /*
1732                  * priv_data does not contain useful information for now and is reserved for
1733                  * future use, so we do not set its contents.
1734                  */
1735         }
1736
1737         ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1738         if (ret) {
1739                 pr_err("Failed to copy device information to user\n");
1740                 ret = -EFAULT;
1741                 goto exit;
1742         }
1743
1744         ret = copy_to_user(user_priv_data + *priv_offset,
1745                            device_priv,
1746                            num_devices * sizeof(*device_priv));
1747         if (ret) {
1748                 pr_err("Failed to copy device information to user\n");
1749                 ret = -EFAULT;
1750         }
1751         *priv_offset += num_devices * sizeof(*device_priv);
1752
1753 exit:
1754         kvfree(device_buckets);
1755         kvfree(device_priv);
1756         return ret;
1757 }
1758
1759 static uint32_t get_process_num_bos(struct kfd_process *p)
1760 {
1761         uint32_t num_of_bos = 0;
1762         int i;
1763
1764         /* Run over all PDDs of the process */
1765         for (i = 0; i < p->n_pdds; i++) {
1766                 struct kfd_process_device *pdd = p->pdds[i];
1767                 void *mem;
1768                 int id;
1769
1770                 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1771                         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1772
1773                         if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
1774                                 num_of_bos++;
1775                 }
1776         }
1777         return num_of_bos;
1778 }
1779
1780 static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
1781                                       u32 *shared_fd)
1782 {
1783         struct dma_buf *dmabuf;
1784         int ret;
1785
1786         dmabuf = amdgpu_gem_prime_export(gobj, flags);
1787         if (IS_ERR(dmabuf)) {
1788                 ret = PTR_ERR(dmabuf);
1789                 pr_err("dmabuf export failed for the BO\n");
1790                 return ret;
1791         }
1792
1793         ret = dma_buf_fd(dmabuf, flags);
1794         if (ret < 0) {
1795                 pr_err("dmabuf create fd failed, ret:%d\n", ret);
1796                 goto out_free_dmabuf;
1797         }
1798
1799         *shared_fd = ret;
1800         return 0;
1801
1802 out_free_dmabuf:
1803         dma_buf_put(dmabuf);
1804         return ret;
1805 }
1806
1807 static int criu_checkpoint_bos(struct kfd_process *p,
1808                                uint32_t num_bos,
1809                                uint8_t __user *user_bos,
1810                                uint8_t __user *user_priv_data,
1811                                uint64_t *priv_offset)
1812 {
1813         struct kfd_criu_bo_bucket *bo_buckets;
1814         struct kfd_criu_bo_priv_data *bo_privs;
1815         int ret = 0, pdd_index, bo_index = 0, id;
1816         void *mem;
1817
1818         bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1819         if (!bo_buckets)
1820                 return -ENOMEM;
1821
1822         bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1823         if (!bo_privs) {
1824                 ret = -ENOMEM;
1825                 goto exit;
1826         }
1827
1828         for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1829                 struct kfd_process_device *pdd = p->pdds[pdd_index];
1830                 struct amdgpu_bo *dumper_bo;
1831                 struct kgd_mem *kgd_mem;
1832
1833                 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1834                         struct kfd_criu_bo_bucket *bo_bucket;
1835                         struct kfd_criu_bo_priv_data *bo_priv;
1836                         int i, dev_idx = 0;
1837
1838                         if (!mem) {
1839                                 ret = -ENOMEM;
1840                                 goto exit;
1841                         }
1842
1843                         kgd_mem = (struct kgd_mem *)mem;
1844                         dumper_bo = kgd_mem->bo;
1845
1846                         if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
1847                                 continue;
1848
1849                         bo_bucket = &bo_buckets[bo_index];
1850                         bo_priv = &bo_privs[bo_index];
1851
1852                         bo_bucket->gpu_id = pdd->user_gpu_id;
1853                         bo_bucket->addr = (uint64_t)kgd_mem->va;
1854                         bo_bucket->size = amdgpu_bo_size(dumper_bo);
1855                         bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1856                         bo_priv->idr_handle = id;
1857
1858                         if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1859                                 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1860                                                                 &bo_priv->user_addr);
1861                                 if (ret) {
1862                                         pr_err("Failed to obtain user address for user-pointer bo\n");
1863                                         goto exit;
1864                                 }
1865                         }
1866                         if (bo_bucket->alloc_flags
1867                             & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1868                                 ret = criu_get_prime_handle(&dumper_bo->tbo.base,
1869                                                 bo_bucket->alloc_flags &
1870                                                 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1871                                                 &bo_bucket->dmabuf_fd);
1872                                 if (ret)
1873                                         goto exit;
1874                         } else {
1875                                 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1876                         }
1877
1878                         if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1879                                 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1880                                         KFD_MMAP_GPU_ID(pdd->dev->id);
1881                         else if (bo_bucket->alloc_flags &
1882                                 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1883                                 bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1884                                         KFD_MMAP_GPU_ID(pdd->dev->id);
1885                         else
1886                                 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1887
1888                         for (i = 0; i < p->n_pdds; i++) {
1889                                 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
1890                                         bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1891                         }
1892
1893                         pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1894                                         "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1895                                         bo_bucket->size,
1896                                         bo_bucket->addr,
1897                                         bo_bucket->offset,
1898                                         bo_bucket->gpu_id,
1899                                         bo_bucket->alloc_flags,
1900                                         bo_priv->idr_handle);
1901                         bo_index++;
1902                 }
1903         }
1904
1905         ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1906         if (ret) {
1907                 pr_err("Failed to copy BO information to user\n");
1908                 ret = -EFAULT;
1909                 goto exit;
1910         }
1911
1912         ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1913         if (ret) {
1914                 pr_err("Failed to copy BO priv information to user\n");
1915                 ret = -EFAULT;
1916                 goto exit;
1917         }
1918
1919         *priv_offset += num_bos * sizeof(*bo_privs);
1920
1921 exit:
1922         while (ret && bo_index--) {
1923                 if (bo_buckets[bo_index].alloc_flags
1924                     & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
1925                         close_fd(bo_buckets[bo_index].dmabuf_fd);
1926         }
1927
1928         kvfree(bo_buckets);
1929         kvfree(bo_privs);
1930         return ret;
1931 }
1932
1933 static int criu_get_process_object_info(struct kfd_process *p,
1934                                         uint32_t *num_devices,
1935                                         uint32_t *num_bos,
1936                                         uint32_t *num_objects,
1937                                         uint64_t *objs_priv_size)
1938 {
1939         uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
1940         uint32_t num_queues, num_events, num_svm_ranges;
1941         int ret;
1942
1943         *num_devices = p->n_pdds;
1944         *num_bos = get_process_num_bos(p);
1945
1946         ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
1947         if (ret)
1948                 return ret;
1949
1950         num_events = kfd_get_num_events(p);
1951
1952         ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
1953         if (ret)
1954                 return ret;
1955
1956         *num_objects = num_queues + num_events + num_svm_ranges;
1957
1958         if (objs_priv_size) {
1959                 priv_size = sizeof(struct kfd_criu_process_priv_data);
1960                 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
1961                 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
1962                 priv_size += queues_priv_data_size;
1963                 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
1964                 priv_size += svm_priv_data_size;
1965                 *objs_priv_size = priv_size;
1966         }
1967         return 0;
1968 }
1969
1970 static int criu_checkpoint(struct file *filep,
1971                            struct kfd_process *p,
1972                            struct kfd_ioctl_criu_args *args)
1973 {
1974         int ret;
1975         uint32_t num_devices, num_bos, num_objects;
1976         uint64_t priv_size, priv_offset = 0, bo_priv_offset;
1977
1978         if (!args->devices || !args->bos || !args->priv_data)
1979                 return -EINVAL;
1980
1981         mutex_lock(&p->mutex);
1982
1983         if (!p->n_pdds) {
1984                 pr_err("No pdd for given process\n");
1985                 ret = -ENODEV;
1986                 goto exit_unlock;
1987         }
1988
1989         /* Confirm all process queues are evicted */
1990         if (!p->queues_paused) {
1991                 pr_err("Cannot dump process when queues are not in evicted state\n");
1992                 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */
1993                 ret = -EINVAL;
1994                 goto exit_unlock;
1995         }
1996
1997         ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
1998         if (ret)
1999                 goto exit_unlock;
2000
2001         if (num_devices != args->num_devices ||
2002             num_bos != args->num_bos ||
2003             num_objects != args->num_objects ||
2004             priv_size != args->priv_data_size) {
2005
2006                 ret = -EINVAL;
2007                 goto exit_unlock;
2008         }
2009
2010         /* each function will store private data inside priv_data and adjust priv_offset */
2011         ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2012         if (ret)
2013                 goto exit_unlock;
2014
2015         ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2016                                 (uint8_t __user *)args->priv_data, &priv_offset);
2017         if (ret)
2018                 goto exit_unlock;
2019
2020         /* Leave room for BOs in the private data. They need to be restored
2021          * before events, but we checkpoint them last to simplify the error
2022          * handling.
2023          */
2024         bo_priv_offset = priv_offset;
2025         priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2026
2027         if (num_objects) {
2028                 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2029                                                  &priv_offset);
2030                 if (ret)
2031                         goto exit_unlock;
2032
2033                 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2034                                                  &priv_offset);
2035                 if (ret)
2036                         goto exit_unlock;
2037
2038                 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2039                 if (ret)
2040                         goto exit_unlock;
2041         }
2042
2043         /* This must be the last thing in this function that can fail.
2044          * Otherwise we leak dmabuf file descriptors.
2045          */
2046         ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2047                            (uint8_t __user *)args->priv_data, &bo_priv_offset);
2048
2049 exit_unlock:
2050         mutex_unlock(&p->mutex);
2051         if (ret)
2052                 pr_err("Failed to dump CRIU ret:%d\n", ret);
2053         else
2054                 pr_debug("CRIU dump ret:%d\n", ret);
2055
2056         return ret;
2057 }
2058
2059 static int criu_restore_process(struct kfd_process *p,
2060                                 struct kfd_ioctl_criu_args *args,
2061                                 uint64_t *priv_offset,
2062                                 uint64_t max_priv_data_size)
2063 {
2064         int ret = 0;
2065         struct kfd_criu_process_priv_data process_priv;
2066
2067         if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2068                 return -EINVAL;
2069
2070         ret = copy_from_user(&process_priv,
2071                                 (void __user *)(args->priv_data + *priv_offset),
2072                                 sizeof(process_priv));
2073         if (ret) {
2074                 pr_err("Failed to copy process private information from user\n");
2075                 ret = -EFAULT;
2076                 goto exit;
2077         }
2078         *priv_offset += sizeof(process_priv);
2079
2080         if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2081                 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2082                         process_priv.version, KFD_CRIU_PRIV_VERSION);
2083                 return -EINVAL;
2084         }
2085
2086         pr_debug("Setting XNACK mode\n");
2087         if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2088                 pr_err("xnack mode cannot be set\n");
2089                 ret = -EPERM;
2090                 goto exit;
2091         } else {
2092                 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2093                 p->xnack_enabled = process_priv.xnack_mode;
2094         }
2095
2096 exit:
2097         return ret;
2098 }
2099
2100 static int criu_restore_devices(struct kfd_process *p,
2101                                 struct kfd_ioctl_criu_args *args,
2102                                 uint64_t *priv_offset,
2103                                 uint64_t max_priv_data_size)
2104 {
2105         struct kfd_criu_device_bucket *device_buckets;
2106         struct kfd_criu_device_priv_data *device_privs;
2107         int ret = 0;
2108         uint32_t i;
2109
2110         if (args->num_devices != p->n_pdds)
2111                 return -EINVAL;
2112
2113         if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2114                 return -EINVAL;
2115
2116         device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2117         if (!device_buckets)
2118                 return -ENOMEM;
2119
2120         ret = copy_from_user(device_buckets, (void __user *)args->devices,
2121                                 args->num_devices * sizeof(*device_buckets));
2122         if (ret) {
2123                 pr_err("Failed to copy devices buckets from user\n");
2124                 ret = -EFAULT;
2125                 goto exit;
2126         }
2127
2128         for (i = 0; i < args->num_devices; i++) {
2129                 struct kfd_dev *dev;
2130                 struct kfd_process_device *pdd;
2131                 struct file *drm_file;
2132
2133                 /* device private data is not currently used */
2134
2135                 if (!device_buckets[i].user_gpu_id) {
2136                         pr_err("Invalid user gpu_id\n");
2137                         ret = -EINVAL;
2138                         goto exit;
2139                 }
2140
2141                 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2142                 if (!dev) {
2143                         pr_err("Failed to find device with gpu_id = %x\n",
2144                                 device_buckets[i].actual_gpu_id);
2145                         ret = -EINVAL;
2146                         goto exit;
2147                 }
2148
2149                 pdd = kfd_get_process_device_data(dev, p);
2150                 if (!pdd) {
2151                         pr_err("Failed to get pdd for gpu_id = %x\n",
2152                                         device_buckets[i].actual_gpu_id);
2153                         ret = -EINVAL;
2154                         goto exit;
2155                 }
2156                 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2157
2158                 drm_file = fget(device_buckets[i].drm_fd);
2159                 if (!drm_file) {
2160                         pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2161                                 device_buckets[i].drm_fd);
2162                         ret = -EINVAL;
2163                         goto exit;
2164                 }
2165
2166                 if (pdd->drm_file) {
2167                         ret = -EINVAL;
2168                         goto exit;
2169                 }
2170
2171                 /* create the vm using render nodes for kfd pdd */
2172                 if (kfd_process_device_init_vm(pdd, drm_file)) {
2173                         pr_err("could not init vm for given pdd\n");
2174                         /* On success, the PDD keeps the drm_file reference */
2175                         fput(drm_file);
2176                         ret = -EINVAL;
2177                         goto exit;
2178                 }
2179                 /*
2180                  * pdd now already has the vm bound to render node so below api won't create a new
2181                  * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2182                  * for iommu v2 binding  and runtime pm.
2183                  */
2184                 pdd = kfd_bind_process_to_device(dev, p);
2185                 if (IS_ERR(pdd)) {
2186                         ret = PTR_ERR(pdd);
2187                         goto exit;
2188                 }
2189
2190                 if (!pdd->doorbell_index &&
2191                     kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
2192                         ret = -ENOMEM;
2193                         goto exit;
2194                 }
2195         }
2196
2197         /*
2198          * We are not copying device private data from user as we are not using the data for now,
2199          * but we still adjust for its private data.
2200          */
2201         *priv_offset += args->num_devices * sizeof(*device_privs);
2202
2203 exit:
2204         kfree(device_buckets);
2205         return ret;
2206 }
2207
2208 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2209                                       struct kfd_criu_bo_bucket *bo_bucket,
2210                                       struct kfd_criu_bo_priv_data *bo_priv,
2211                                       struct kgd_mem **kgd_mem)
2212 {
2213         int idr_handle;
2214         int ret;
2215         const bool criu_resume = true;
2216         u64 offset;
2217
2218         if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2219                 if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
2220                         return -EINVAL;
2221
2222                 offset = kfd_get_process_doorbells(pdd);
2223                 if (!offset)
2224                         return -ENOMEM;
2225         } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2226                 /* MMIO BOs need remapped bus address */
2227                 if (bo_bucket->size != PAGE_SIZE) {
2228                         pr_err("Invalid page size\n");
2229                         return -EINVAL;
2230                 }
2231                 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2232                 if (!offset) {
2233                         pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2234                         return -ENOMEM;
2235                 }
2236         } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2237                 offset = bo_priv->user_addr;
2238         }
2239         /* Create the BO */
2240         ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2241                                                       bo_bucket->size, pdd->drm_priv, kgd_mem,
2242                                                       &offset, bo_bucket->alloc_flags, criu_resume);
2243         if (ret) {
2244                 pr_err("Could not create the BO\n");
2245                 return ret;
2246         }
2247         pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2248                  bo_bucket->size, bo_bucket->addr, offset);
2249
2250         /* Restore previous IDR handle */
2251         pr_debug("Restoring old IDR handle for the BO");
2252         idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2253                                bo_priv->idr_handle + 1, GFP_KERNEL);
2254
2255         if (idr_handle < 0) {
2256                 pr_err("Could not allocate idr\n");
2257                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2258                                                        NULL);
2259                 return -ENOMEM;
2260         }
2261
2262         if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2263                 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2264         if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2265                 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2266         } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2267                 bo_bucket->restored_offset = offset;
2268         } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2269                 bo_bucket->restored_offset = offset;
2270                 /* Update the VRAM usage count */
2271                 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
2272         }
2273         return 0;
2274 }
2275
2276 static int criu_restore_bo(struct kfd_process *p,
2277                            struct kfd_criu_bo_bucket *bo_bucket,
2278                            struct kfd_criu_bo_priv_data *bo_priv)
2279 {
2280         struct kfd_process_device *pdd;
2281         struct kgd_mem *kgd_mem;
2282         int ret;
2283         int j;
2284
2285         pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2286                  bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2287                  bo_priv->idr_handle);
2288
2289         pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2290         if (!pdd) {
2291                 pr_err("Failed to get pdd\n");
2292                 return -ENODEV;
2293         }
2294
2295         ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2296         if (ret)
2297                 return ret;
2298
2299         /* now map these BOs to GPU/s */
2300         for (j = 0; j < p->n_pdds; j++) {
2301                 struct kfd_dev *peer;
2302                 struct kfd_process_device *peer_pdd;
2303
2304                 if (!bo_priv->mapped_gpuids[j])
2305                         break;
2306
2307                 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2308                 if (!peer_pdd)
2309                         return -EINVAL;
2310
2311                 peer = peer_pdd->dev;
2312
2313                 peer_pdd = kfd_bind_process_to_device(peer, p);
2314                 if (IS_ERR(peer_pdd))
2315                         return PTR_ERR(peer_pdd);
2316
2317                 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2318                                                             peer_pdd->drm_priv);
2319                 if (ret) {
2320                         pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2321                         return ret;
2322                 }
2323         }
2324
2325         pr_debug("map memory was successful for the BO\n");
2326         /* create the dmabuf object and export the bo */
2327         if (bo_bucket->alloc_flags
2328             & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2329                 ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
2330                                             &bo_bucket->dmabuf_fd);
2331                 if (ret)
2332                         return ret;
2333         } else {
2334                 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2335         }
2336
2337         return 0;
2338 }
2339
2340 static int criu_restore_bos(struct kfd_process *p,
2341                             struct kfd_ioctl_criu_args *args,
2342                             uint64_t *priv_offset,
2343                             uint64_t max_priv_data_size)
2344 {
2345         struct kfd_criu_bo_bucket *bo_buckets = NULL;
2346         struct kfd_criu_bo_priv_data *bo_privs = NULL;
2347         int ret = 0;
2348         uint32_t i = 0;
2349
2350         if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2351                 return -EINVAL;
2352
2353         /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2354         amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2355
2356         bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2357         if (!bo_buckets)
2358                 return -ENOMEM;
2359
2360         ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2361                              args->num_bos * sizeof(*bo_buckets));
2362         if (ret) {
2363                 pr_err("Failed to copy BOs information from user\n");
2364                 ret = -EFAULT;
2365                 goto exit;
2366         }
2367
2368         bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2369         if (!bo_privs) {
2370                 ret = -ENOMEM;
2371                 goto exit;
2372         }
2373
2374         ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2375                              args->num_bos * sizeof(*bo_privs));
2376         if (ret) {
2377                 pr_err("Failed to copy BOs information from user\n");
2378                 ret = -EFAULT;
2379                 goto exit;
2380         }
2381         *priv_offset += args->num_bos * sizeof(*bo_privs);
2382
2383         /* Create and map new BOs */
2384         for (; i < args->num_bos; i++) {
2385                 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
2386                 if (ret) {
2387                         pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2388                         goto exit;
2389                 }
2390         } /* done */
2391
2392         /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2393         ret = copy_to_user((void __user *)args->bos,
2394                                 bo_buckets,
2395                                 (args->num_bos * sizeof(*bo_buckets)));
2396         if (ret)
2397                 ret = -EFAULT;
2398
2399 exit:
2400         while (ret && i--) {
2401                 if (bo_buckets[i].alloc_flags
2402                    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2403                         close_fd(bo_buckets[i].dmabuf_fd);
2404         }
2405         kvfree(bo_buckets);
2406         kvfree(bo_privs);
2407         return ret;
2408 }
2409
2410 static int criu_restore_objects(struct file *filep,
2411                                 struct kfd_process *p,
2412                                 struct kfd_ioctl_criu_args *args,
2413                                 uint64_t *priv_offset,
2414                                 uint64_t max_priv_data_size)
2415 {
2416         int ret = 0;
2417         uint32_t i;
2418
2419         BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2420         BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2421         BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2422
2423         for (i = 0; i < args->num_objects; i++) {
2424                 uint32_t object_type;
2425
2426                 if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2427                         pr_err("Invalid private data size\n");
2428                         return -EINVAL;
2429                 }
2430
2431                 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2432                 if (ret) {
2433                         pr_err("Failed to copy private information from user\n");
2434                         goto exit;
2435                 }
2436
2437                 switch (object_type) {
2438                 case KFD_CRIU_OBJECT_TYPE_QUEUE:
2439                         ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2440                                                      priv_offset, max_priv_data_size);
2441                         if (ret)
2442                                 goto exit;
2443                         break;
2444                 case KFD_CRIU_OBJECT_TYPE_EVENT:
2445                         ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2446                                                      priv_offset, max_priv_data_size);
2447                         if (ret)
2448                                 goto exit;
2449                         break;
2450                 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2451                         ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2452                                                      priv_offset, max_priv_data_size);
2453                         if (ret)
2454                                 goto exit;
2455                         break;
2456                 default:
2457                         pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2458                         ret = -EINVAL;
2459                         goto exit;
2460                 }
2461         }
2462 exit:
2463         return ret;
2464 }
2465
2466 static int criu_restore(struct file *filep,
2467                         struct kfd_process *p,
2468                         struct kfd_ioctl_criu_args *args)
2469 {
2470         uint64_t priv_offset = 0;
2471         int ret = 0;
2472
2473         pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2474                  args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2475
2476         if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2477             !args->num_devices || !args->num_bos)
2478                 return -EINVAL;
2479
2480         mutex_lock(&p->mutex);
2481
2482         /*
2483          * Set the process to evicted state to avoid running any new queues before all the memory
2484          * mappings are ready.
2485          */
2486         ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2487         if (ret)
2488                 goto exit_unlock;
2489
2490         /* Each function will adjust priv_offset based on how many bytes they consumed */
2491         ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2492         if (ret)
2493                 goto exit_unlock;
2494
2495         ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2496         if (ret)
2497                 goto exit_unlock;
2498
2499         ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2500         if (ret)
2501                 goto exit_unlock;
2502
2503         ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2504         if (ret)
2505                 goto exit_unlock;
2506
2507         if (priv_offset != args->priv_data_size) {
2508                 pr_err("Invalid private data size\n");
2509                 ret = -EINVAL;
2510         }
2511
2512 exit_unlock:
2513         mutex_unlock(&p->mutex);
2514         if (ret)
2515                 pr_err("Failed to restore CRIU ret:%d\n", ret);
2516         else
2517                 pr_debug("CRIU restore successful\n");
2518
2519         return ret;
2520 }
2521
2522 static int criu_unpause(struct file *filep,
2523                         struct kfd_process *p,
2524                         struct kfd_ioctl_criu_args *args)
2525 {
2526         int ret;
2527
2528         mutex_lock(&p->mutex);
2529
2530         if (!p->queues_paused) {
2531                 mutex_unlock(&p->mutex);
2532                 return -EINVAL;
2533         }
2534
2535         ret = kfd_process_restore_queues(p);
2536         if (ret)
2537                 pr_err("Failed to unpause queues ret:%d\n", ret);
2538         else
2539                 p->queues_paused = false;
2540
2541         mutex_unlock(&p->mutex);
2542
2543         return ret;
2544 }
2545
2546 static int criu_resume(struct file *filep,
2547                         struct kfd_process *p,
2548                         struct kfd_ioctl_criu_args *args)
2549 {
2550         struct kfd_process *target = NULL;
2551         struct pid *pid = NULL;
2552         int ret = 0;
2553
2554         pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2555                  args->pid);
2556
2557         pid = find_get_pid(args->pid);
2558         if (!pid) {
2559                 pr_err("Cannot find pid info for %i\n", args->pid);
2560                 return -ESRCH;
2561         }
2562
2563         pr_debug("calling kfd_lookup_process_by_pid\n");
2564         target = kfd_lookup_process_by_pid(pid);
2565
2566         put_pid(pid);
2567
2568         if (!target) {
2569                 pr_debug("Cannot find process info for %i\n", args->pid);
2570                 return -ESRCH;
2571         }
2572
2573         mutex_lock(&target->mutex);
2574         ret = kfd_criu_resume_svm(target);
2575         if (ret) {
2576                 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2577                 goto exit;
2578         }
2579
2580         ret =  amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2581         if (ret)
2582                 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2583
2584 exit:
2585         mutex_unlock(&target->mutex);
2586
2587         kfd_unref_process(target);
2588         return ret;
2589 }
2590
2591 static int criu_process_info(struct file *filep,
2592                                 struct kfd_process *p,
2593                                 struct kfd_ioctl_criu_args *args)
2594 {
2595         int ret = 0;
2596
2597         mutex_lock(&p->mutex);
2598
2599         if (!p->n_pdds) {
2600                 pr_err("No pdd for given process\n");
2601                 ret = -ENODEV;
2602                 goto err_unlock;
2603         }
2604
2605         ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2606         if (ret)
2607                 goto err_unlock;
2608
2609         p->queues_paused = true;
2610
2611         args->pid = task_pid_nr_ns(p->lead_thread,
2612                                         task_active_pid_ns(p->lead_thread));
2613
2614         ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2615                                            &args->num_objects, &args->priv_data_size);
2616         if (ret)
2617                 goto err_unlock;
2618
2619         dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2620                                 args->num_devices, args->num_bos, args->num_objects,
2621                                 args->priv_data_size);
2622
2623 err_unlock:
2624         if (ret) {
2625                 kfd_process_restore_queues(p);
2626                 p->queues_paused = false;
2627         }
2628         mutex_unlock(&p->mutex);
2629         return ret;
2630 }
2631
2632 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2633 {
2634         struct kfd_ioctl_criu_args *args = data;
2635         int ret;
2636
2637         dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2638         switch (args->op) {
2639         case KFD_CRIU_OP_PROCESS_INFO:
2640                 ret = criu_process_info(filep, p, args);
2641                 break;
2642         case KFD_CRIU_OP_CHECKPOINT:
2643                 ret = criu_checkpoint(filep, p, args);
2644                 break;
2645         case KFD_CRIU_OP_UNPAUSE:
2646                 ret = criu_unpause(filep, p, args);
2647                 break;
2648         case KFD_CRIU_OP_RESTORE:
2649                 ret = criu_restore(filep, p, args);
2650                 break;
2651         case KFD_CRIU_OP_RESUME:
2652                 ret = criu_resume(filep, p, args);
2653                 break;
2654         default:
2655                 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2656                 ret = -EINVAL;
2657                 break;
2658         }
2659
2660         if (ret)
2661                 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2662
2663         return ret;
2664 }
2665
2666 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
2667         [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
2668                             .cmd_drv = 0, .name = #ioctl}
2669
2670 /** Ioctl table */
2671 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
2672         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
2673                         kfd_ioctl_get_version, 0),
2674
2675         AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
2676                         kfd_ioctl_create_queue, 0),
2677
2678         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
2679                         kfd_ioctl_destroy_queue, 0),
2680
2681         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
2682                         kfd_ioctl_set_memory_policy, 0),
2683
2684         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
2685                         kfd_ioctl_get_clock_counters, 0),
2686
2687         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
2688                         kfd_ioctl_get_process_apertures, 0),
2689
2690         AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
2691                         kfd_ioctl_update_queue, 0),
2692
2693         AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
2694                         kfd_ioctl_create_event, 0),
2695
2696         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
2697                         kfd_ioctl_destroy_event, 0),
2698
2699         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
2700                         kfd_ioctl_set_event, 0),
2701
2702         AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
2703                         kfd_ioctl_reset_event, 0),
2704
2705         AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
2706                         kfd_ioctl_wait_events, 0),
2707
2708         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
2709                         kfd_ioctl_dbg_register, 0),
2710
2711         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
2712                         kfd_ioctl_dbg_unregister, 0),
2713
2714         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
2715                         kfd_ioctl_dbg_address_watch, 0),
2716
2717         AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
2718                         kfd_ioctl_dbg_wave_control, 0),
2719
2720         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
2721                         kfd_ioctl_set_scratch_backing_va, 0),
2722
2723         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
2724                         kfd_ioctl_get_tile_config, 0),
2725
2726         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
2727                         kfd_ioctl_set_trap_handler, 0),
2728
2729         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
2730                         kfd_ioctl_get_process_apertures_new, 0),
2731
2732         AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
2733                         kfd_ioctl_acquire_vm, 0),
2734
2735         AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
2736                         kfd_ioctl_alloc_memory_of_gpu, 0),
2737
2738         AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
2739                         kfd_ioctl_free_memory_of_gpu, 0),
2740
2741         AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
2742                         kfd_ioctl_map_memory_to_gpu, 0),
2743
2744         AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
2745                         kfd_ioctl_unmap_memory_from_gpu, 0),
2746
2747         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
2748                         kfd_ioctl_set_cu_mask, 0),
2749
2750         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
2751                         kfd_ioctl_get_queue_wave_state, 0),
2752
2753         AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
2754                                 kfd_ioctl_get_dmabuf_info, 0),
2755
2756         AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
2757                                 kfd_ioctl_import_dmabuf, 0),
2758
2759         AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
2760                         kfd_ioctl_alloc_queue_gws, 0),
2761
2762         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
2763                         kfd_ioctl_smi_events, 0),
2764
2765         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
2766
2767         AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
2768                         kfd_ioctl_set_xnack_mode, 0),
2769
2770         AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
2771                         kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
2772
2773         AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
2774                         kfd_ioctl_get_available_memory, 0),
2775 };
2776
2777 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
2778
2779 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2780 {
2781         struct kfd_process *process;
2782         amdkfd_ioctl_t *func;
2783         const struct amdkfd_ioctl_desc *ioctl = NULL;
2784         unsigned int nr = _IOC_NR(cmd);
2785         char stack_kdata[128];
2786         char *kdata = NULL;
2787         unsigned int usize, asize;
2788         int retcode = -EINVAL;
2789         bool ptrace_attached = false;
2790
2791         if (nr >= AMDKFD_CORE_IOCTL_COUNT)
2792                 goto err_i1;
2793
2794         if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
2795                 u32 amdkfd_size;
2796
2797                 ioctl = &amdkfd_ioctls[nr];
2798
2799                 amdkfd_size = _IOC_SIZE(ioctl->cmd);
2800                 usize = asize = _IOC_SIZE(cmd);
2801                 if (amdkfd_size > asize)
2802                         asize = amdkfd_size;
2803
2804                 cmd = ioctl->cmd;
2805         } else
2806                 goto err_i1;
2807
2808         dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
2809
2810         /* Get the process struct from the filep. Only the process
2811          * that opened /dev/kfd can use the file descriptor. Child
2812          * processes need to create their own KFD device context.
2813          */
2814         process = filep->private_data;
2815
2816         rcu_read_lock();
2817         if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
2818             ptrace_parent(process->lead_thread) == current)
2819                 ptrace_attached = true;
2820         rcu_read_unlock();
2821
2822         if (process->lead_thread != current->group_leader
2823             && !ptrace_attached) {
2824                 dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
2825                 retcode = -EBADF;
2826                 goto err_i1;
2827         }
2828
2829         /* Do not trust userspace, use our own definition */
2830         func = ioctl->func;
2831
2832         if (unlikely(!func)) {
2833                 dev_dbg(kfd_device, "no function\n");
2834                 retcode = -EINVAL;
2835                 goto err_i1;
2836         }
2837
2838         /*
2839          * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
2840          * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
2841          * more priviledged access.
2842          */
2843         if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
2844                 if (!capable(CAP_CHECKPOINT_RESTORE) &&
2845                                                 !capable(CAP_SYS_ADMIN)) {
2846                         retcode = -EACCES;
2847                         goto err_i1;
2848                 }
2849         }
2850
2851         if (cmd & (IOC_IN | IOC_OUT)) {
2852                 if (asize <= sizeof(stack_kdata)) {
2853                         kdata = stack_kdata;
2854                 } else {
2855                         kdata = kmalloc(asize, GFP_KERNEL);
2856                         if (!kdata) {
2857                                 retcode = -ENOMEM;
2858                                 goto err_i1;
2859                         }
2860                 }
2861                 if (asize > usize)
2862                         memset(kdata + usize, 0, asize - usize);
2863         }
2864
2865         if (cmd & IOC_IN) {
2866                 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
2867                         retcode = -EFAULT;
2868                         goto err_i1;
2869                 }
2870         } else if (cmd & IOC_OUT) {
2871                 memset(kdata, 0, usize);
2872         }
2873
2874         retcode = func(filep, process, kdata);
2875
2876         if (cmd & IOC_OUT)
2877                 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
2878                         retcode = -EFAULT;
2879
2880 err_i1:
2881         if (!ioctl)
2882                 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
2883                           task_pid_nr(current), cmd, nr);
2884
2885         if (kdata != stack_kdata)
2886                 kfree(kdata);
2887
2888         if (retcode)
2889                 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
2890                                 nr, arg, retcode);
2891
2892         return retcode;
2893 }
2894
2895 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
2896                       struct vm_area_struct *vma)
2897 {
2898         phys_addr_t address;
2899
2900         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2901                 return -EINVAL;
2902
2903         address = dev->adev->rmmio_remap.bus_addr;
2904
2905         vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
2906                                 VM_DONTDUMP | VM_PFNMAP);
2907
2908         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2909
2910         pr_debug("pasid 0x%x mapping mmio page\n"
2911                  "     target user address == 0x%08llX\n"
2912                  "     physical address    == 0x%08llX\n"
2913                  "     vm_flags            == 0x%04lX\n"
2914                  "     size                == 0x%04lX\n",
2915                  process->pasid, (unsigned long long) vma->vm_start,
2916                  address, vma->vm_flags, PAGE_SIZE);
2917
2918         return io_remap_pfn_range(vma,
2919                                 vma->vm_start,
2920                                 address >> PAGE_SHIFT,
2921                                 PAGE_SIZE,
2922                                 vma->vm_page_prot);
2923 }
2924
2925
2926 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
2927 {
2928         struct kfd_process *process;
2929         struct kfd_dev *dev = NULL;
2930         unsigned long mmap_offset;
2931         unsigned int gpu_id;
2932
2933         process = kfd_get_process(current);
2934         if (IS_ERR(process))
2935                 return PTR_ERR(process);
2936
2937         mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
2938         gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
2939         if (gpu_id)
2940                 dev = kfd_device_by_id(gpu_id);
2941
2942         switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
2943         case KFD_MMAP_TYPE_DOORBELL:
2944                 if (!dev)
2945                         return -ENODEV;
2946                 return kfd_doorbell_mmap(dev, process, vma);
2947
2948         case KFD_MMAP_TYPE_EVENTS:
2949                 return kfd_event_mmap(process, vma);
2950
2951         case KFD_MMAP_TYPE_RESERVED_MEM:
2952                 if (!dev)
2953                         return -ENODEV;
2954                 return kfd_reserved_mem_mmap(dev, process, vma);
2955         case KFD_MMAP_TYPE_MMIO:
2956                 if (!dev)
2957                         return -ENODEV;
2958                 return kfd_mmio_mmap(dev, process, vma);
2959         }
2960
2961         return -EFAULT;
2962 }