1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/amd-iommu.h>
32 #include <linux/notifier.h>
33 #include <linux/compat.h>
34 #include <linux/mman.h>
35 #include <linux/file.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
43 #include "kfd_device_queue_manager.h"
44 #include "kfd_iommu.h"
48 * List of struct kfd_process (field kfd_process).
49 * Unique/indexed by mm_struct*
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 static DEFINE_MUTEX(kfd_processes_mutex);
54 DEFINE_SRCU(kfd_processes_srcu);
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
59 /* Ordered, single-threaded workqueue for restoring evicted
60 * processes. Restoring multiple processes concurrently under memory
61 * pressure can lead to processes blocking each other from validating
62 * their BOs and result in a live-lock situation where processes
63 * remain evicted indefinitely.
65 static struct workqueue_struct *kfd_restore_wq;
67 static struct kfd_process *find_process(const struct task_struct *thread,
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
71 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
73 static void evict_process_worker(struct work_struct *work);
74 static void restore_process_worker(struct work_struct *work);
76 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
78 struct kfd_procfs_tree {
82 static struct kfd_procfs_tree procfs;
85 * Structure for SDMA activity tracking
87 struct kfd_sdma_activity_handler_workarea {
88 struct work_struct sdma_activity_work;
89 struct kfd_process_device *pdd;
90 uint64_t sdma_activity_counter;
93 struct temp_sdma_queue_list {
94 uint64_t __user *rptr;
96 unsigned int queue_id;
97 struct list_head list;
100 static void kfd_sdma_activity_worker(struct work_struct *work)
102 struct kfd_sdma_activity_handler_workarea *workarea;
103 struct kfd_process_device *pdd;
105 struct mm_struct *mm;
107 struct qcm_process_device *qpd;
108 struct device_queue_manager *dqm;
110 struct temp_sdma_queue_list sdma_q_list;
111 struct temp_sdma_queue_list *sdma_q, *next;
113 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
124 * Total SDMA activity is current SDMA activity + past SDMA activity
125 * Past SDMA count is stored in pdd.
126 * To get the current activity counters for all active SDMA queues,
127 * we loop over all SDMA queues and get their counts from user-space.
129 * We cannot call get_user() with dqm_lock held as it can cause
130 * a circular lock dependency situation. To read the SDMA stats,
131 * we need to do the following:
133 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
134 * with dqm_lock/dqm_unlock().
135 * 2. Call get_user() for each node in temporary list without dqm_lock.
136 * Save the SDMA count for each node and also add the count to the total
137 * SDMA count counter.
138 * Its possible, during this step, a few SDMA queue nodes got deleted
139 * from the qpd->queues_list.
140 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
141 * If any node got deleted, its SDMA count would be captured in the sdma
142 * past activity counter. So subtract the SDMA counter stored in step 2
143 * for this node from the total SDMA count.
145 INIT_LIST_HEAD(&sdma_q_list.list);
148 * Create the temp list of all SDMA queues
152 list_for_each_entry(q, &qpd->queues_list, list) {
153 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
154 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
157 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
163 INIT_LIST_HEAD(&sdma_q->list);
164 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
165 sdma_q->queue_id = q->properties.queue_id;
166 list_add_tail(&sdma_q->list, &sdma_q_list.list);
170 * If the temp list is empty, then no SDMA queues nodes were found in
171 * qpd->queues_list. Return the past activity count as the total sdma
174 if (list_empty(&sdma_q_list.list)) {
175 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
183 * Get the usage count for each SDMA queue in temp_list.
185 mm = get_task_mm(pdd->process->lead_thread);
191 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
193 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
195 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
198 sdma_q->sdma_val = val;
199 workarea->sdma_activity_counter += val;
203 kthread_unuse_mm(mm);
207 * Do a second iteration over qpd_queues_list to check if any SDMA
208 * nodes got deleted while fetching SDMA counter.
212 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
214 list_for_each_entry(q, &qpd->queues_list, list) {
215 if (list_empty(&sdma_q_list.list))
218 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
219 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
222 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
223 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
224 (sdma_q->queue_id == q->properties.queue_id)) {
225 list_del(&sdma_q->list);
235 * If temp list is not empty, it implies some queues got deleted
236 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
237 * count for each node from the total SDMA count.
239 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
240 workarea->sdma_activity_counter -= sdma_q->sdma_val;
241 list_del(&sdma_q->list);
248 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
249 list_del(&sdma_q->list);
255 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
256 * by current process. Translates acquired wave count into number of compute units
259 * @attr: Handle of attribute that allows reporting of wave count. The attribute
260 * handle encapsulates GPU device it is associated with, thereby allowing collection
261 * of waves in flight, etc
262 * @buffer: Handle of user provided buffer updated with wave count
264 * Return: Number of bytes written to user buffer or an error value
266 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
270 int max_waves_per_cu;
271 struct kfd_dev *dev = NULL;
272 struct kfd_process *proc = NULL;
273 struct kfd_process_device *pdd = NULL;
275 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
277 if (dev->kfd2kgd->get_cu_occupancy == NULL)
282 if (pdd->qpd.queue_count == 0) {
283 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
284 dev->id, proc->pasid);
285 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
288 /* Collect wave count from device if it supports */
290 max_waves_per_cu = 0;
291 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
294 /* Translate wave count to number of compute units */
295 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
296 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
299 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
302 if (strcmp(attr->name, "pasid") == 0) {
303 struct kfd_process *p = container_of(attr, struct kfd_process,
306 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
307 } else if (strncmp(attr->name, "vram_", 5) == 0) {
308 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
310 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
311 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
312 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
314 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
316 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
317 kfd_sdma_activity_worker);
319 sdma_activity_work_handler.pdd = pdd;
320 sdma_activity_work_handler.sdma_activity_counter = 0;
322 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
324 flush_work(&sdma_activity_work_handler.sdma_activity_work);
326 return snprintf(buffer, PAGE_SIZE, "%llu\n",
327 (sdma_activity_work_handler.sdma_activity_counter)/
328 SDMA_ACTIVITY_DIVISOR);
330 pr_err("Invalid attribute");
337 static void kfd_procfs_kobj_release(struct kobject *kobj)
342 static const struct sysfs_ops kfd_procfs_ops = {
343 .show = kfd_procfs_show,
346 static struct kobj_type procfs_type = {
347 .release = kfd_procfs_kobj_release,
348 .sysfs_ops = &kfd_procfs_ops,
351 void kfd_procfs_init(void)
355 procfs.kobj = kfd_alloc_struct(procfs.kobj);
359 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
360 &kfd_device->kobj, "proc");
362 pr_warn("Could not create procfs proc folder");
363 /* If we fail to create the procfs, clean up */
364 kfd_procfs_shutdown();
368 void kfd_procfs_shutdown(void)
371 kobject_del(procfs.kobj);
372 kobject_put(procfs.kobj);
377 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
378 struct attribute *attr, char *buffer)
380 struct queue *q = container_of(kobj, struct queue, kobj);
382 if (!strcmp(attr->name, "size"))
383 return snprintf(buffer, PAGE_SIZE, "%llu",
384 q->properties.queue_size);
385 else if (!strcmp(attr->name, "type"))
386 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
387 else if (!strcmp(attr->name, "gpuid"))
388 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
390 pr_err("Invalid attribute");
395 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
396 struct attribute *attr, char *buffer)
398 if (strcmp(attr->name, "evicted_ms") == 0) {
399 struct kfd_process_device *pdd = container_of(attr,
400 struct kfd_process_device,
402 uint64_t evict_jiffies;
404 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
406 return snprintf(buffer,
409 jiffies64_to_msecs(evict_jiffies));
411 /* Sysfs handle that gets CU occupancy is per device */
412 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
413 return kfd_get_cu_occupancy(attr, buffer);
415 pr_err("Invalid attribute");
421 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
422 struct attribute *attr, char *buf)
424 struct kfd_process_device *pdd;
426 if (!strcmp(attr->name, "faults")) {
427 pdd = container_of(attr, struct kfd_process_device,
429 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
431 if (!strcmp(attr->name, "page_in")) {
432 pdd = container_of(attr, struct kfd_process_device,
434 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
436 if (!strcmp(attr->name, "page_out")) {
437 pdd = container_of(attr, struct kfd_process_device,
439 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
444 static struct attribute attr_queue_size = {
446 .mode = KFD_SYSFS_FILE_MODE
449 static struct attribute attr_queue_type = {
451 .mode = KFD_SYSFS_FILE_MODE
454 static struct attribute attr_queue_gpuid = {
456 .mode = KFD_SYSFS_FILE_MODE
459 static struct attribute *procfs_queue_attrs[] = {
465 ATTRIBUTE_GROUPS(procfs_queue);
467 static const struct sysfs_ops procfs_queue_ops = {
468 .show = kfd_procfs_queue_show,
471 static struct kobj_type procfs_queue_type = {
472 .sysfs_ops = &procfs_queue_ops,
473 .default_groups = procfs_queue_groups,
476 static const struct sysfs_ops procfs_stats_ops = {
477 .show = kfd_procfs_stats_show,
480 static struct kobj_type procfs_stats_type = {
481 .sysfs_ops = &procfs_stats_ops,
482 .release = kfd_procfs_kobj_release,
485 static const struct sysfs_ops sysfs_counters_ops = {
486 .show = kfd_sysfs_counters_show,
489 static struct kobj_type sysfs_counters_type = {
490 .sysfs_ops = &sysfs_counters_ops,
491 .release = kfd_procfs_kobj_release,
494 int kfd_procfs_add_queue(struct queue *q)
496 struct kfd_process *proc;
499 if (!q || !q->process)
503 /* Create proc/<pid>/queues/<queue id> folder */
504 if (!proc->kobj_queues)
506 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
507 proc->kobj_queues, "%u", q->properties.queue_id);
509 pr_warn("Creating proc/<pid>/queues/%u failed",
510 q->properties.queue_id);
511 kobject_put(&q->kobj);
518 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
523 if (!kobj || !attr || !name)
527 attr->mode = KFD_SYSFS_FILE_MODE;
528 sysfs_attr_init(attr);
530 ret = sysfs_create_file(kobj, attr);
532 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
535 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
539 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
545 * Create sysfs files for each GPU:
546 * - proc/<pid>/stats_<gpuid>/
547 * - proc/<pid>/stats_<gpuid>/evicted_ms
548 * - proc/<pid>/stats_<gpuid>/cu_occupancy
550 for (i = 0; i < p->n_pdds; i++) {
551 struct kfd_process_device *pdd = p->pdds[i];
553 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
554 "stats_%u", pdd->dev->id);
555 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
556 if (!pdd->kobj_stats)
559 ret = kobject_init_and_add(pdd->kobj_stats,
565 pr_warn("Creating KFD proc/stats_%s folder failed",
567 kobject_put(pdd->kobj_stats);
568 pdd->kobj_stats = NULL;
572 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
574 /* Add sysfs file to report compute unit occupancy */
575 if (pdd->dev->kfd2kgd->get_cu_occupancy)
576 kfd_sysfs_create_file(pdd->kobj_stats,
577 &pdd->attr_cu_occupancy,
582 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
586 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
592 * Create sysfs files for each GPU which supports SVM
593 * - proc/<pid>/counters_<gpuid>/
594 * - proc/<pid>/counters_<gpuid>/faults
595 * - proc/<pid>/counters_<gpuid>/page_in
596 * - proc/<pid>/counters_<gpuid>/page_out
598 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
599 struct kfd_process_device *pdd = p->pdds[i];
600 struct kobject *kobj_counters;
602 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
603 "counters_%u", pdd->dev->id);
604 kobj_counters = kfd_alloc_struct(kobj_counters);
608 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
609 p->kobj, counters_dir_filename);
611 pr_warn("Creating KFD proc/%s folder failed",
612 counters_dir_filename);
613 kobject_put(kobj_counters);
617 pdd->kobj_counters = kobj_counters;
618 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
620 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
622 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
627 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
635 * Create sysfs files for each GPU:
636 * - proc/<pid>/vram_<gpuid>
637 * - proc/<pid>/sdma_<gpuid>
639 for (i = 0; i < p->n_pdds; i++) {
640 struct kfd_process_device *pdd = p->pdds[i];
642 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
644 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
647 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
649 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
654 void kfd_procfs_del_queue(struct queue *q)
659 kobject_del(&q->kobj);
660 kobject_put(&q->kobj);
663 int kfd_process_create_wq(void)
666 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
668 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
670 if (!kfd_process_wq || !kfd_restore_wq) {
671 kfd_process_destroy_wq();
678 void kfd_process_destroy_wq(void)
680 if (kfd_process_wq) {
681 destroy_workqueue(kfd_process_wq);
682 kfd_process_wq = NULL;
684 if (kfd_restore_wq) {
685 destroy_workqueue(kfd_restore_wq);
686 kfd_restore_wq = NULL;
690 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
691 struct kfd_process_device *pdd, void *kptr)
693 struct kfd_dev *dev = pdd->dev;
696 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem);
700 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
701 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
705 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
706 * This function should be only called right after the process
707 * is created and when kfd_processes_mutex is still being held
708 * to avoid concurrency. Because of that exclusiveness, we do
709 * not need to take p->mutex.
711 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
712 uint64_t gpu_va, uint32_t size,
713 uint32_t flags, struct kgd_mem **mem, void **kptr)
715 struct kfd_dev *kdev = pdd->dev;
718 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
719 pdd->drm_priv, mem, NULL,
724 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
725 pdd->drm_priv, NULL);
729 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
731 pr_debug("Sync memory failed, wait interrupted by user signal\n");
732 goto sync_memory_failed;
736 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev,
737 (struct kgd_mem *)*mem, kptr, NULL);
739 pr_debug("Map GTT BO to kernel failed\n");
740 goto sync_memory_failed;
747 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
750 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
758 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
759 * process for IB usage The memory reserved is for KFD to submit
760 * IB to AMDGPU from kernel. If the memory is reserved
761 * successfully, ib_kaddr will have the CPU/kernel
762 * address. Check ib_kaddr before accessing the memory.
764 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766 struct qcm_process_device *qpd = &pdd->qpd;
767 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
768 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
769 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
770 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
775 if (qpd->ib_kaddr || !qpd->ib_base)
778 /* ib_base is only set for dGPU */
779 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
785 qpd->ib_kaddr = kaddr;
790 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792 struct qcm_process_device *qpd = &pdd->qpd;
794 if (!qpd->ib_kaddr || !qpd->ib_base)
797 kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
800 struct kfd_process *kfd_create_process(struct file *filep)
802 struct kfd_process *process;
803 struct task_struct *thread = current;
807 return ERR_PTR(-EINVAL);
809 /* Only the pthreads threading model is supported. */
810 if (thread->group_leader->mm != thread->mm)
811 return ERR_PTR(-EINVAL);
814 * take kfd processes mutex before starting of process creation
815 * so there won't be a case where two threads of the same process
816 * create two kfd_process structures
818 mutex_lock(&kfd_processes_mutex);
820 /* A prior open of /dev/kfd could have already created the process. */
821 process = find_process(thread, false);
823 pr_debug("Process already found\n");
825 process = create_process(thread);
829 ret = kfd_process_init_cwsr_apu(process, filep);
836 process->kobj = kfd_alloc_struct(process->kobj);
837 if (!process->kobj) {
838 pr_warn("Creating procfs kobject failed");
841 ret = kobject_init_and_add(process->kobj, &procfs_type,
843 (int)process->lead_thread->pid);
845 pr_warn("Creating procfs pid directory failed");
846 kobject_put(process->kobj);
850 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
853 process->kobj_queues = kobject_create_and_add("queues",
855 if (!process->kobj_queues)
856 pr_warn("Creating KFD proc/queues folder failed");
858 kfd_procfs_add_sysfs_stats(process);
859 kfd_procfs_add_sysfs_files(process);
860 kfd_procfs_add_sysfs_counters(process);
863 if (!IS_ERR(process))
864 kref_get(&process->ref);
865 mutex_unlock(&kfd_processes_mutex);
870 hash_del_rcu(&process->kfd_processes);
871 mutex_unlock(&kfd_processes_mutex);
872 synchronize_srcu(&kfd_processes_srcu);
873 /* kfd_process_free_notifier will trigger the cleanup */
874 mmu_notifier_put(&process->mmu_notifier);
878 struct kfd_process *kfd_get_process(const struct task_struct *thread)
880 struct kfd_process *process;
883 return ERR_PTR(-EINVAL);
885 /* Only the pthreads threading model is supported. */
886 if (thread->group_leader->mm != thread->mm)
887 return ERR_PTR(-EINVAL);
889 process = find_process(thread, false);
891 return ERR_PTR(-EINVAL);
896 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
898 struct kfd_process *process;
900 hash_for_each_possible_rcu(kfd_processes_table, process,
901 kfd_processes, (uintptr_t)mm)
902 if (process->mm == mm)
908 static struct kfd_process *find_process(const struct task_struct *thread,
911 struct kfd_process *p;
914 idx = srcu_read_lock(&kfd_processes_srcu);
915 p = find_process_by_mm(thread->mm);
918 srcu_read_unlock(&kfd_processes_srcu, idx);
923 void kfd_unref_process(struct kfd_process *p)
925 kref_put(&p->ref, kfd_process_ref_release);
928 /* This increments the process->ref counter. */
929 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
931 struct task_struct *task = NULL;
932 struct kfd_process *p = NULL;
936 get_task_struct(task);
938 task = get_pid_task(pid, PIDTYPE_PID);
942 p = find_process(task, true);
943 put_task_struct(task);
949 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
951 struct kfd_process *p = pdd->process;
957 * Remove all handles from idr and release appropriate
958 * local memory object
960 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
962 for (i = 0; i < p->n_pdds; i++) {
963 struct kfd_process_device *peer_pdd = p->pdds[i];
965 if (!peer_pdd->drm_priv)
967 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
968 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
971 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
972 pdd->drm_priv, NULL);
973 kfd_process_device_remove_obj_handle(pdd, id);
978 * Just kunmap and unpin signal BO here. It will be freed in
979 * kfd_process_free_outstanding_kfd_bos()
981 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
983 struct kfd_process_device *pdd;
984 struct kfd_dev *kdev;
987 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
991 mutex_lock(&p->mutex);
993 pdd = kfd_get_process_device_data(kdev, p);
997 mem = kfd_process_device_translate_handle(
998 pdd, GET_IDR_HANDLE(p->signal_handle));
1002 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem);
1005 mutex_unlock(&p->mutex);
1008 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1012 for (i = 0; i < p->n_pdds; i++)
1013 kfd_process_device_free_bos(p->pdds[i]);
1016 static void kfd_process_destroy_pdds(struct kfd_process *p)
1020 for (i = 0; i < p->n_pdds; i++) {
1021 struct kfd_process_device *pdd = p->pdds[i];
1023 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1024 pdd->dev->id, p->pasid);
1026 kfd_process_device_destroy_cwsr_dgpu(pdd);
1027 kfd_process_device_destroy_ib_mem(pdd);
1029 if (pdd->drm_file) {
1030 amdgpu_amdkfd_gpuvm_release_process_vm(
1031 pdd->dev->adev, pdd->drm_priv);
1032 fput(pdd->drm_file);
1035 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1036 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1037 get_order(KFD_CWSR_TBA_TMA_SIZE));
1039 bitmap_free(pdd->qpd.doorbell_bitmap);
1040 idr_destroy(&pdd->alloc_idr);
1042 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
1045 * before destroying pdd, make sure to report availability
1048 if (pdd->runtime_inuse) {
1049 pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
1050 pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
1051 pdd->runtime_inuse = false;
1060 static void kfd_process_remove_sysfs(struct kfd_process *p)
1062 struct kfd_process_device *pdd;
1068 sysfs_remove_file(p->kobj, &p->attr_pasid);
1069 kobject_del(p->kobj_queues);
1070 kobject_put(p->kobj_queues);
1071 p->kobj_queues = NULL;
1073 for (i = 0; i < p->n_pdds; i++) {
1076 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1077 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1079 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1080 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1081 sysfs_remove_file(pdd->kobj_stats,
1082 &pdd->attr_cu_occupancy);
1083 kobject_del(pdd->kobj_stats);
1084 kobject_put(pdd->kobj_stats);
1085 pdd->kobj_stats = NULL;
1088 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1091 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1092 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1093 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1094 kobject_del(pdd->kobj_counters);
1095 kobject_put(pdd->kobj_counters);
1096 pdd->kobj_counters = NULL;
1099 kobject_del(p->kobj);
1100 kobject_put(p->kobj);
1104 /* No process locking is needed in this function, because the process
1105 * is not findable any more. We must assume that no other thread is
1106 * using it any more, otherwise we couldn't safely free the process
1107 * structure in the end.
1109 static void kfd_process_wq_release(struct work_struct *work)
1111 struct kfd_process *p = container_of(work, struct kfd_process,
1114 kfd_process_remove_sysfs(p);
1115 kfd_iommu_unbind_process(p);
1117 kfd_process_kunmap_signal_bo(p);
1118 kfd_process_free_outstanding_kfd_bos(p);
1119 svm_range_list_fini(p);
1121 kfd_process_destroy_pdds(p);
1122 dma_fence_put(p->ef);
1124 kfd_event_free_process(p);
1126 kfd_pasid_free(p->pasid);
1127 mutex_destroy(&p->mutex);
1129 put_task_struct(p->lead_thread);
1134 static void kfd_process_ref_release(struct kref *ref)
1136 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1138 INIT_WORK(&p->release_work, kfd_process_wq_release);
1139 queue_work(kfd_process_wq, &p->release_work);
1142 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1144 int idx = srcu_read_lock(&kfd_processes_srcu);
1145 struct kfd_process *p = find_process_by_mm(mm);
1147 srcu_read_unlock(&kfd_processes_srcu, idx);
1149 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1152 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1154 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1157 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1158 struct mm_struct *mm)
1160 struct kfd_process *p;
1163 * The kfd_process structure can not be free because the
1164 * mmu_notifier srcu is read locked
1166 p = container_of(mn, struct kfd_process, mmu_notifier);
1167 if (WARN_ON(p->mm != mm))
1170 mutex_lock(&kfd_processes_mutex);
1171 hash_del_rcu(&p->kfd_processes);
1172 mutex_unlock(&kfd_processes_mutex);
1173 synchronize_srcu(&kfd_processes_srcu);
1175 cancel_delayed_work_sync(&p->eviction_work);
1176 cancel_delayed_work_sync(&p->restore_work);
1178 mutex_lock(&p->mutex);
1180 kfd_process_dequeue_from_all_devices(p);
1181 pqm_uninit(&p->pqm);
1183 /* Indicate to other users that MM is no longer valid */
1185 /* Signal the eviction fence after user mode queues are
1186 * destroyed. This allows any BOs to be freed without
1187 * triggering pointless evictions or waiting for fences.
1189 dma_fence_signal(p->ef);
1191 mutex_unlock(&p->mutex);
1193 mmu_notifier_put(&p->mmu_notifier);
1196 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1197 .release = kfd_process_notifier_release,
1198 .alloc_notifier = kfd_process_alloc_notifier,
1199 .free_notifier = kfd_process_free_notifier,
1202 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1204 unsigned long offset;
1207 for (i = 0; i < p->n_pdds; i++) {
1208 struct kfd_dev *dev = p->pdds[i]->dev;
1209 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1211 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1214 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1215 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1216 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1217 MAP_SHARED, offset);
1219 if (IS_ERR_VALUE(qpd->tba_addr)) {
1220 int err = qpd->tba_addr;
1222 pr_err("Failure to set tba address. error %d.\n", err);
1224 qpd->cwsr_kaddr = NULL;
1228 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1230 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1231 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1232 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1238 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1240 struct kfd_dev *dev = pdd->dev;
1241 struct qcm_process_device *qpd = &pdd->qpd;
1242 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1243 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1244 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1245 struct kgd_mem *mem;
1249 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1252 /* cwsr_base is only set for dGPU */
1253 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1254 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1258 qpd->cwsr_mem = mem;
1259 qpd->cwsr_kaddr = kaddr;
1260 qpd->tba_addr = qpd->cwsr_base;
1262 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1264 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1265 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1266 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1271 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1273 struct kfd_dev *dev = pdd->dev;
1274 struct qcm_process_device *qpd = &pdd->qpd;
1276 if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1279 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
1282 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1286 if (qpd->cwsr_kaddr) {
1287 /* KFD trap handler is bound, record as second-level TBA/TMA
1288 * in first-level TMA. First-level trap will jump to second.
1291 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1295 /* No trap handler bound, bind as first-level TBA/TMA. */
1296 qpd->tba_addr = tba_addr;
1297 qpd->tma_addr = tma_addr;
1301 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1305 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1306 * boot time retry setting. Mixing processes with different
1307 * XNACK/retry settings can hang the GPU.
1309 * Different GPUs can have different noretry settings depending
1310 * on HW bugs or limitations. We need to find at least one
1311 * XNACK mode for this process that's compatible with all GPUs.
1312 * Fortunately GPUs with retry enabled (noretry=0) can run code
1313 * built for XNACK-off. On GFXv9 it may perform slower.
1315 * Therefore applications built for XNACK-off can always be
1316 * supported and will be our fallback if any GPU does not
1319 for (i = 0; i < p->n_pdds; i++) {
1320 struct kfd_dev *dev = p->pdds[i]->dev;
1322 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1323 * support the SVM APIs and don't need to be considered
1324 * for the XNACK mode selection.
1326 if (!KFD_IS_SOC15(dev))
1328 /* Aldebaran can always support XNACK because it can support
1329 * per-process XNACK mode selection. But let the dev->noretry
1330 * setting still influence the default XNACK mode.
1332 if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
1335 /* GFXv10 and later GPUs do not support shader preemption
1336 * during page faults. This can lead to poor QoS for queue
1337 * management and memory-manager-related preemptions or
1340 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1351 * On return the kfd_process is fully operational and will be freed when the
1354 static struct kfd_process *create_process(const struct task_struct *thread)
1356 struct kfd_process *process;
1357 struct mmu_notifier *mn;
1360 process = kzalloc(sizeof(*process), GFP_KERNEL);
1362 goto err_alloc_process;
1364 kref_init(&process->ref);
1365 mutex_init(&process->mutex);
1366 process->mm = thread->mm;
1367 process->lead_thread = thread->group_leader;
1368 process->n_pdds = 0;
1369 process->queues_paused = false;
1370 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1371 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1372 process->last_restore_timestamp = get_jiffies_64();
1373 kfd_event_init_process(process);
1374 process->is_32bit_user_mode = in_compat_syscall();
1376 process->pasid = kfd_pasid_alloc();
1377 if (process->pasid == 0)
1378 goto err_alloc_pasid;
1380 err = pqm_init(&process->pqm, process);
1382 goto err_process_pqm_init;
1384 /* init process apertures*/
1385 err = kfd_init_apertures(process);
1387 goto err_init_apertures;
1389 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1390 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1392 err = svm_range_list_init(process);
1394 goto err_init_svm_range_list;
1396 /* alloc_notifier needs to find the process in the hash table */
1397 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1398 (uintptr_t)process->mm);
1400 /* MMU notifier registration must be the last call that can fail
1401 * because after this point we cannot unwind the process creation.
1402 * After this point, mmu_notifier_put will trigger the cleanup by
1403 * dropping the last process reference in the free_notifier.
1405 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1408 goto err_register_notifier;
1410 BUG_ON(mn != &process->mmu_notifier);
1412 get_task_struct(process->lead_thread);
1416 err_register_notifier:
1417 hash_del_rcu(&process->kfd_processes);
1418 svm_range_list_fini(process);
1419 err_init_svm_range_list:
1420 kfd_process_free_outstanding_kfd_bos(process);
1421 kfd_process_destroy_pdds(process);
1423 pqm_uninit(&process->pqm);
1424 err_process_pqm_init:
1425 kfd_pasid_free(process->pasid);
1427 mutex_destroy(&process->mutex);
1430 return ERR_PTR(err);
1433 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1434 struct kfd_dev *dev)
1437 int range_start = dev->shared_resources.non_cp_doorbells_start;
1438 int range_end = dev->shared_resources.non_cp_doorbells_end;
1440 if (!KFD_IS_SOC15(dev))
1443 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1445 if (!qpd->doorbell_bitmap)
1448 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1449 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1450 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1451 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1452 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1454 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1455 if (i >= range_start && i <= range_end) {
1456 __set_bit(i, qpd->doorbell_bitmap);
1457 __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1458 qpd->doorbell_bitmap);
1465 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1466 struct kfd_process *p)
1470 for (i = 0; i < p->n_pdds; i++)
1471 if (p->pdds[i]->dev == dev)
1477 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1478 struct kfd_process *p)
1480 struct kfd_process_device *pdd = NULL;
1482 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1484 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1488 if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
1489 pr_err("Failed to alloc doorbell for pdd\n");
1493 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1494 pr_err("Failed to init doorbell for process\n");
1499 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1500 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1501 pdd->qpd.dqm = dev->dqm;
1502 pdd->qpd.pqm = &p->pqm;
1503 pdd->qpd.evicted = 0;
1504 pdd->qpd.mapped_gws_queue = false;
1506 pdd->bound = PDD_UNBOUND;
1507 pdd->already_dequeued = false;
1508 pdd->runtime_inuse = false;
1509 pdd->vram_usage = 0;
1510 pdd->sdma_past_activity_counter = 0;
1511 pdd->user_gpu_id = dev->id;
1512 atomic64_set(&pdd->evict_duration_counter, 0);
1513 p->pdds[p->n_pdds++] = pdd;
1515 /* Init idr used for memory handle translation */
1516 idr_init(&pdd->alloc_idr);
1526 * kfd_process_device_init_vm - Initialize a VM for a process-device
1528 * @pdd: The process-device
1529 * @drm_file: Optional pointer to a DRM file descriptor
1531 * If @drm_file is specified, it will be used to acquire the VM from
1532 * that file descriptor. If successful, the @pdd takes ownership of
1533 * the file descriptor.
1535 * If @drm_file is NULL, a new VM is created.
1537 * Returns 0 on success, -errno on failure.
1539 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1540 struct file *drm_file)
1542 struct kfd_process *p;
1543 struct kfd_dev *dev;
1555 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
1556 dev->adev, drm_file, p->pasid,
1557 &p->kgd_process_info, &p->ef);
1559 pr_err("Failed to create process VM object\n");
1562 pdd->drm_priv = drm_file->private_data;
1564 ret = kfd_process_device_reserve_ib_mem(pdd);
1566 goto err_reserve_ib_mem;
1567 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1571 pdd->drm_file = drm_file;
1577 kfd_process_device_free_bos(pdd);
1578 pdd->drm_priv = NULL;
1584 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1586 * Unbinding occurs when the process dies or the device is removed.
1588 * Assumes that the process lock is held.
1590 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1591 struct kfd_process *p)
1593 struct kfd_process_device *pdd;
1596 pdd = kfd_get_process_device_data(dev, p);
1598 pr_err("Process device data doesn't exist\n");
1599 return ERR_PTR(-ENOMEM);
1603 return ERR_PTR(-ENODEV);
1606 * signal runtime-pm system to auto resume and prevent
1607 * further runtime suspend once device pdd is created until
1610 if (!pdd->runtime_inuse) {
1611 err = pm_runtime_get_sync(dev->ddev->dev);
1613 pm_runtime_put_autosuspend(dev->ddev->dev);
1614 return ERR_PTR(err);
1618 err = kfd_iommu_bind_process_to_device(pdd);
1623 * make sure that runtime_usage counter is incremented just once
1626 pdd->runtime_inuse = true;
1631 /* balance runpm reference count and exit with error */
1632 if (!pdd->runtime_inuse) {
1633 pm_runtime_mark_last_busy(dev->ddev->dev);
1634 pm_runtime_put_autosuspend(dev->ddev->dev);
1637 return ERR_PTR(err);
1640 /* Create specific handle mapped to mem from process local memory idr
1641 * Assumes that the process lock is held.
1643 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1646 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1649 /* Translate specific handle from process local memory idr
1650 * Assumes that the process lock is held.
1652 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1658 return idr_find(&pdd->alloc_idr, handle);
1661 /* Remove specific handle from process local memory idr
1662 * Assumes that the process lock is held.
1664 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1668 idr_remove(&pdd->alloc_idr, handle);
1671 /* This increments the process->ref counter. */
1672 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1674 struct kfd_process *p, *ret_p = NULL;
1677 int idx = srcu_read_lock(&kfd_processes_srcu);
1679 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1680 if (p->pasid == pasid) {
1687 srcu_read_unlock(&kfd_processes_srcu, idx);
1692 /* This increments the process->ref counter. */
1693 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1695 struct kfd_process *p;
1697 int idx = srcu_read_lock(&kfd_processes_srcu);
1699 p = find_process_by_mm(mm);
1703 srcu_read_unlock(&kfd_processes_srcu, idx);
1708 /* kfd_process_evict_queues - Evict all user queues of a process
1710 * Eviction is reference-counted per process-device. This means multiple
1711 * evictions from different sources can be nested safely.
1713 int kfd_process_evict_queues(struct kfd_process *p)
1717 unsigned int n_evicted = 0;
1719 for (i = 0; i < p->n_pdds; i++) {
1720 struct kfd_process_device *pdd = p->pdds[i];
1722 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1724 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1725 * we would like to set all the queues to be in evicted state to prevent
1726 * them been add back since they actually not be saved right now.
1728 if (r && r != -EIO) {
1729 pr_err("Failed to evict process queues\n");
1738 /* To keep state consistent, roll back partial eviction by
1741 for (i = 0; i < p->n_pdds; i++) {
1742 struct kfd_process_device *pdd = p->pdds[i];
1746 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1748 pr_err("Failed to restore queues\n");
1756 /* kfd_process_restore_queues - Restore all user queues of a process */
1757 int kfd_process_restore_queues(struct kfd_process *p)
1762 for (i = 0; i < p->n_pdds; i++) {
1763 struct kfd_process_device *pdd = p->pdds[i];
1765 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1768 pr_err("Failed to restore process queues\n");
1777 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1781 for (i = 0; i < p->n_pdds; i++)
1782 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1788 kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
1789 uint32_t *gpuid, uint32_t *gpuidx)
1793 for (i = 0; i < p->n_pdds; i++)
1794 if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
1795 *gpuid = p->pdds[i]->user_gpu_id;
1802 static void evict_process_worker(struct work_struct *work)
1805 struct kfd_process *p;
1806 struct delayed_work *dwork;
1808 dwork = to_delayed_work(work);
1810 /* Process termination destroys this worker thread. So during the
1811 * lifetime of this thread, kfd_process p will be valid
1813 p = container_of(dwork, struct kfd_process, eviction_work);
1814 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1815 "Eviction fence mismatch\n");
1817 /* Narrow window of overlap between restore and evict work
1818 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1819 * unreserves KFD BOs, it is possible to evicted again. But
1820 * restore has few more steps of finish. So lets wait for any
1821 * previous restore work to complete
1823 flush_delayed_work(&p->restore_work);
1825 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1826 ret = kfd_process_evict_queues(p);
1828 dma_fence_signal(p->ef);
1829 dma_fence_put(p->ef);
1831 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1832 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1834 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1836 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1839 static void restore_process_worker(struct work_struct *work)
1841 struct delayed_work *dwork;
1842 struct kfd_process *p;
1845 dwork = to_delayed_work(work);
1847 /* Process termination destroys this worker thread. So during the
1848 * lifetime of this thread, kfd_process p will be valid
1850 p = container_of(dwork, struct kfd_process, restore_work);
1851 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1853 /* Setting last_restore_timestamp before successful restoration.
1854 * Otherwise this would have to be set by KGD (restore_process_bos)
1855 * before KFD BOs are unreserved. If not, the process can be evicted
1856 * again before the timestamp is set.
1857 * If restore fails, the timestamp will be set again in the next
1858 * attempt. This would mean that the minimum GPU quanta would be
1859 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1863 p->last_restore_timestamp = get_jiffies_64();
1864 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1867 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1868 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1869 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1870 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1871 WARN(!ret, "reschedule restore work failed\n");
1875 ret = kfd_process_restore_queues(p);
1877 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1879 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1882 void kfd_suspend_all_processes(void)
1884 struct kfd_process *p;
1886 int idx = srcu_read_lock(&kfd_processes_srcu);
1888 WARN(debug_evictions, "Evicting all processes");
1889 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1890 cancel_delayed_work_sync(&p->eviction_work);
1891 cancel_delayed_work_sync(&p->restore_work);
1893 if (kfd_process_evict_queues(p))
1894 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1895 dma_fence_signal(p->ef);
1896 dma_fence_put(p->ef);
1899 srcu_read_unlock(&kfd_processes_srcu, idx);
1902 int kfd_resume_all_processes(void)
1904 struct kfd_process *p;
1906 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1908 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1909 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1910 pr_err("Restore process %d failed during resume\n",
1915 srcu_read_unlock(&kfd_processes_srcu, idx);
1919 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1920 struct vm_area_struct *vma)
1922 struct kfd_process_device *pdd;
1923 struct qcm_process_device *qpd;
1925 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1926 pr_err("Incorrect CWSR mapping size.\n");
1930 pdd = kfd_get_process_device_data(dev, process);
1935 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1936 get_order(KFD_CWSR_TBA_TMA_SIZE));
1937 if (!qpd->cwsr_kaddr) {
1938 pr_err("Error allocating per process CWSR buffer.\n");
1942 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1943 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1944 /* Mapping pages to user process */
1945 return remap_pfn_range(vma, vma->vm_start,
1946 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1947 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1950 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
1952 struct kfd_dev *dev = pdd->dev;
1954 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1955 /* Nothing to flush until a VMID is assigned, which
1956 * only happens when the first queue is created.
1959 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
1962 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
1963 pdd->process->pasid, type);
1967 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
1972 for (i = 0; i < p->n_pdds; i++) {
1973 struct kfd_process_device *pdd = p->pdds[i];
1975 if (pdd->user_gpu_id == gpu_id)
1982 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
1989 for (i = 0; i < p->n_pdds; i++) {
1990 struct kfd_process_device *pdd = p->pdds[i];
1992 if (pdd->dev->id == actual_gpu_id)
1993 return pdd->user_gpu_id;
1998 #if defined(CONFIG_DEBUG_FS)
2000 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2002 struct kfd_process *p;
2006 int idx = srcu_read_lock(&kfd_processes_srcu);
2008 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2009 seq_printf(m, "Process %d PASID 0x%x:\n",
2010 p->lead_thread->tgid, p->pasid);
2012 mutex_lock(&p->mutex);
2013 r = pqm_debugfs_mqds(m, &p->pqm);
2014 mutex_unlock(&p->mutex);
2020 srcu_read_unlock(&kfd_processes_srcu, idx);