1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/amd-iommu.h>
32 #include <linux/notifier.h>
33 #include <linux/compat.h>
34 #include <linux/mman.h>
35 #include <linux/file.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
43 #include "kfd_device_queue_manager.h"
44 #include "kfd_iommu.h"
46 #include "kfd_smi_events.h"
49 * List of struct kfd_process (field kfd_process).
50 * Unique/indexed by mm_struct*
52 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
53 static DEFINE_MUTEX(kfd_processes_mutex);
55 DEFINE_SRCU(kfd_processes_srcu);
57 /* For process termination handling */
58 static struct workqueue_struct *kfd_process_wq;
60 /* Ordered, single-threaded workqueue for restoring evicted
61 * processes. Restoring multiple processes concurrently under memory
62 * pressure can lead to processes blocking each other from validating
63 * their BOs and result in a live-lock situation where processes
64 * remain evicted indefinitely.
66 static struct workqueue_struct *kfd_restore_wq;
68 static struct kfd_process *find_process(const struct task_struct *thread,
70 static void kfd_process_ref_release(struct kref *ref);
71 static struct kfd_process *create_process(const struct task_struct *thread);
72 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
74 static void evict_process_worker(struct work_struct *work);
75 static void restore_process_worker(struct work_struct *work);
77 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
79 struct kfd_procfs_tree {
83 static struct kfd_procfs_tree procfs;
86 * Structure for SDMA activity tracking
88 struct kfd_sdma_activity_handler_workarea {
89 struct work_struct sdma_activity_work;
90 struct kfd_process_device *pdd;
91 uint64_t sdma_activity_counter;
94 struct temp_sdma_queue_list {
95 uint64_t __user *rptr;
97 unsigned int queue_id;
98 struct list_head list;
101 static void kfd_sdma_activity_worker(struct work_struct *work)
103 struct kfd_sdma_activity_handler_workarea *workarea;
104 struct kfd_process_device *pdd;
106 struct mm_struct *mm;
108 struct qcm_process_device *qpd;
109 struct device_queue_manager *dqm;
111 struct temp_sdma_queue_list sdma_q_list;
112 struct temp_sdma_queue_list *sdma_q, *next;
114 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
125 * Total SDMA activity is current SDMA activity + past SDMA activity
126 * Past SDMA count is stored in pdd.
127 * To get the current activity counters for all active SDMA queues,
128 * we loop over all SDMA queues and get their counts from user-space.
130 * We cannot call get_user() with dqm_lock held as it can cause
131 * a circular lock dependency situation. To read the SDMA stats,
132 * we need to do the following:
134 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
135 * with dqm_lock/dqm_unlock().
136 * 2. Call get_user() for each node in temporary list without dqm_lock.
137 * Save the SDMA count for each node and also add the count to the total
138 * SDMA count counter.
139 * Its possible, during this step, a few SDMA queue nodes got deleted
140 * from the qpd->queues_list.
141 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
142 * If any node got deleted, its SDMA count would be captured in the sdma
143 * past activity counter. So subtract the SDMA counter stored in step 2
144 * for this node from the total SDMA count.
146 INIT_LIST_HEAD(&sdma_q_list.list);
149 * Create the temp list of all SDMA queues
153 list_for_each_entry(q, &qpd->queues_list, list) {
154 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
155 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
158 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
164 INIT_LIST_HEAD(&sdma_q->list);
165 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
166 sdma_q->queue_id = q->properties.queue_id;
167 list_add_tail(&sdma_q->list, &sdma_q_list.list);
171 * If the temp list is empty, then no SDMA queues nodes were found in
172 * qpd->queues_list. Return the past activity count as the total sdma
175 if (list_empty(&sdma_q_list.list)) {
176 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
184 * Get the usage count for each SDMA queue in temp_list.
186 mm = get_task_mm(pdd->process->lead_thread);
192 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
194 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
196 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
199 sdma_q->sdma_val = val;
200 workarea->sdma_activity_counter += val;
204 kthread_unuse_mm(mm);
208 * Do a second iteration over qpd_queues_list to check if any SDMA
209 * nodes got deleted while fetching SDMA counter.
213 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
215 list_for_each_entry(q, &qpd->queues_list, list) {
216 if (list_empty(&sdma_q_list.list))
219 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
220 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
223 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
224 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
225 (sdma_q->queue_id == q->properties.queue_id)) {
226 list_del(&sdma_q->list);
236 * If temp list is not empty, it implies some queues got deleted
237 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
238 * count for each node from the total SDMA count.
240 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
241 workarea->sdma_activity_counter -= sdma_q->sdma_val;
242 list_del(&sdma_q->list);
249 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
250 list_del(&sdma_q->list);
256 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
257 * by current process. Translates acquired wave count into number of compute units
260 * @attr: Handle of attribute that allows reporting of wave count. The attribute
261 * handle encapsulates GPU device it is associated with, thereby allowing collection
262 * of waves in flight, etc
263 * @buffer: Handle of user provided buffer updated with wave count
265 * Return: Number of bytes written to user buffer or an error value
267 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
271 int max_waves_per_cu;
272 struct kfd_dev *dev = NULL;
273 struct kfd_process *proc = NULL;
274 struct kfd_process_device *pdd = NULL;
276 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
278 if (dev->kfd2kgd->get_cu_occupancy == NULL)
283 if (pdd->qpd.queue_count == 0) {
284 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
285 dev->id, proc->pasid);
286 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
289 /* Collect wave count from device if it supports */
291 max_waves_per_cu = 0;
292 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
295 /* Translate wave count to number of compute units */
296 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
297 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
300 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
303 if (strcmp(attr->name, "pasid") == 0) {
304 struct kfd_process *p = container_of(attr, struct kfd_process,
307 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
308 } else if (strncmp(attr->name, "vram_", 5) == 0) {
309 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
311 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
312 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
313 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
315 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
317 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
318 kfd_sdma_activity_worker);
320 sdma_activity_work_handler.pdd = pdd;
321 sdma_activity_work_handler.sdma_activity_counter = 0;
323 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
325 flush_work(&sdma_activity_work_handler.sdma_activity_work);
327 return snprintf(buffer, PAGE_SIZE, "%llu\n",
328 (sdma_activity_work_handler.sdma_activity_counter)/
329 SDMA_ACTIVITY_DIVISOR);
331 pr_err("Invalid attribute");
338 static void kfd_procfs_kobj_release(struct kobject *kobj)
343 static const struct sysfs_ops kfd_procfs_ops = {
344 .show = kfd_procfs_show,
347 static const struct kobj_type procfs_type = {
348 .release = kfd_procfs_kobj_release,
349 .sysfs_ops = &kfd_procfs_ops,
352 void kfd_procfs_init(void)
356 procfs.kobj = kfd_alloc_struct(procfs.kobj);
360 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
361 &kfd_device->kobj, "proc");
363 pr_warn("Could not create procfs proc folder");
364 /* If we fail to create the procfs, clean up */
365 kfd_procfs_shutdown();
369 void kfd_procfs_shutdown(void)
372 kobject_del(procfs.kobj);
373 kobject_put(procfs.kobj);
378 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
379 struct attribute *attr, char *buffer)
381 struct queue *q = container_of(kobj, struct queue, kobj);
383 if (!strcmp(attr->name, "size"))
384 return snprintf(buffer, PAGE_SIZE, "%llu",
385 q->properties.queue_size);
386 else if (!strcmp(attr->name, "type"))
387 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
388 else if (!strcmp(attr->name, "gpuid"))
389 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
391 pr_err("Invalid attribute");
396 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
397 struct attribute *attr, char *buffer)
399 if (strcmp(attr->name, "evicted_ms") == 0) {
400 struct kfd_process_device *pdd = container_of(attr,
401 struct kfd_process_device,
403 uint64_t evict_jiffies;
405 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
407 return snprintf(buffer,
410 jiffies64_to_msecs(evict_jiffies));
412 /* Sysfs handle that gets CU occupancy is per device */
413 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
414 return kfd_get_cu_occupancy(attr, buffer);
416 pr_err("Invalid attribute");
422 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
423 struct attribute *attr, char *buf)
425 struct kfd_process_device *pdd;
427 if (!strcmp(attr->name, "faults")) {
428 pdd = container_of(attr, struct kfd_process_device,
430 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
432 if (!strcmp(attr->name, "page_in")) {
433 pdd = container_of(attr, struct kfd_process_device,
435 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
437 if (!strcmp(attr->name, "page_out")) {
438 pdd = container_of(attr, struct kfd_process_device,
440 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
445 static struct attribute attr_queue_size = {
447 .mode = KFD_SYSFS_FILE_MODE
450 static struct attribute attr_queue_type = {
452 .mode = KFD_SYSFS_FILE_MODE
455 static struct attribute attr_queue_gpuid = {
457 .mode = KFD_SYSFS_FILE_MODE
460 static struct attribute *procfs_queue_attrs[] = {
466 ATTRIBUTE_GROUPS(procfs_queue);
468 static const struct sysfs_ops procfs_queue_ops = {
469 .show = kfd_procfs_queue_show,
472 static const struct kobj_type procfs_queue_type = {
473 .sysfs_ops = &procfs_queue_ops,
474 .default_groups = procfs_queue_groups,
477 static const struct sysfs_ops procfs_stats_ops = {
478 .show = kfd_procfs_stats_show,
481 static const struct kobj_type procfs_stats_type = {
482 .sysfs_ops = &procfs_stats_ops,
483 .release = kfd_procfs_kobj_release,
486 static const struct sysfs_ops sysfs_counters_ops = {
487 .show = kfd_sysfs_counters_show,
490 static const struct kobj_type sysfs_counters_type = {
491 .sysfs_ops = &sysfs_counters_ops,
492 .release = kfd_procfs_kobj_release,
495 int kfd_procfs_add_queue(struct queue *q)
497 struct kfd_process *proc;
500 if (!q || !q->process)
504 /* Create proc/<pid>/queues/<queue id> folder */
505 if (!proc->kobj_queues)
507 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
508 proc->kobj_queues, "%u", q->properties.queue_id);
510 pr_warn("Creating proc/<pid>/queues/%u failed",
511 q->properties.queue_id);
512 kobject_put(&q->kobj);
519 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
524 if (!kobj || !attr || !name)
528 attr->mode = KFD_SYSFS_FILE_MODE;
529 sysfs_attr_init(attr);
531 ret = sysfs_create_file(kobj, attr);
533 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
536 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
540 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
546 * Create sysfs files for each GPU:
547 * - proc/<pid>/stats_<gpuid>/
548 * - proc/<pid>/stats_<gpuid>/evicted_ms
549 * - proc/<pid>/stats_<gpuid>/cu_occupancy
551 for (i = 0; i < p->n_pdds; i++) {
552 struct kfd_process_device *pdd = p->pdds[i];
554 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
555 "stats_%u", pdd->dev->id);
556 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
557 if (!pdd->kobj_stats)
560 ret = kobject_init_and_add(pdd->kobj_stats,
566 pr_warn("Creating KFD proc/stats_%s folder failed",
568 kobject_put(pdd->kobj_stats);
569 pdd->kobj_stats = NULL;
573 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
575 /* Add sysfs file to report compute unit occupancy */
576 if (pdd->dev->kfd2kgd->get_cu_occupancy)
577 kfd_sysfs_create_file(pdd->kobj_stats,
578 &pdd->attr_cu_occupancy,
583 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
587 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
593 * Create sysfs files for each GPU which supports SVM
594 * - proc/<pid>/counters_<gpuid>/
595 * - proc/<pid>/counters_<gpuid>/faults
596 * - proc/<pid>/counters_<gpuid>/page_in
597 * - proc/<pid>/counters_<gpuid>/page_out
599 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
600 struct kfd_process_device *pdd = p->pdds[i];
601 struct kobject *kobj_counters;
603 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
604 "counters_%u", pdd->dev->id);
605 kobj_counters = kfd_alloc_struct(kobj_counters);
609 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
610 p->kobj, counters_dir_filename);
612 pr_warn("Creating KFD proc/%s folder failed",
613 counters_dir_filename);
614 kobject_put(kobj_counters);
618 pdd->kobj_counters = kobj_counters;
619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
623 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
628 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
636 * Create sysfs files for each GPU:
637 * - proc/<pid>/vram_<gpuid>
638 * - proc/<pid>/sdma_<gpuid>
640 for (i = 0; i < p->n_pdds; i++) {
641 struct kfd_process_device *pdd = p->pdds[i];
643 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
645 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
648 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
650 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
655 void kfd_procfs_del_queue(struct queue *q)
660 kobject_del(&q->kobj);
661 kobject_put(&q->kobj);
664 int kfd_process_create_wq(void)
667 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
669 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
671 if (!kfd_process_wq || !kfd_restore_wq) {
672 kfd_process_destroy_wq();
679 void kfd_process_destroy_wq(void)
681 if (kfd_process_wq) {
682 destroy_workqueue(kfd_process_wq);
683 kfd_process_wq = NULL;
685 if (kfd_restore_wq) {
686 destroy_workqueue(kfd_restore_wq);
687 kfd_restore_wq = NULL;
691 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
692 struct kfd_process_device *pdd, void **kptr)
694 struct kfd_dev *dev = pdd->dev;
697 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
701 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
702 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
706 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
707 * This function should be only called right after the process
708 * is created and when kfd_processes_mutex is still being held
709 * to avoid concurrency. Because of that exclusiveness, we do
710 * not need to take p->mutex.
712 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
713 uint64_t gpu_va, uint32_t size,
714 uint32_t flags, struct kgd_mem **mem, void **kptr)
716 struct kfd_dev *kdev = pdd->dev;
719 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
720 pdd->drm_priv, mem, NULL,
725 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
730 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
732 pr_debug("Sync memory failed, wait interrupted by user signal\n");
733 goto sync_memory_failed;
737 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
738 (struct kgd_mem *)*mem, kptr, NULL);
740 pr_debug("Map GTT BO to kernel failed\n");
741 goto sync_memory_failed;
748 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
751 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
759 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
760 * process for IB usage The memory reserved is for KFD to submit
761 * IB to AMDGPU from kernel. If the memory is reserved
762 * successfully, ib_kaddr will have the CPU/kernel
763 * address. Check ib_kaddr before accessing the memory.
765 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
767 struct qcm_process_device *qpd = &pdd->qpd;
768 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
769 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
770 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
771 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
776 if (qpd->ib_kaddr || !qpd->ib_base)
779 /* ib_base is only set for dGPU */
780 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
786 qpd->ib_kaddr = kaddr;
791 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
793 struct qcm_process_device *qpd = &pdd->qpd;
795 if (!qpd->ib_kaddr || !qpd->ib_base)
798 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
801 struct kfd_process *kfd_create_process(struct file *filep)
803 struct kfd_process *process;
804 struct task_struct *thread = current;
808 return ERR_PTR(-EINVAL);
810 /* Only the pthreads threading model is supported. */
811 if (thread->group_leader->mm != thread->mm)
812 return ERR_PTR(-EINVAL);
815 * take kfd processes mutex before starting of process creation
816 * so there won't be a case where two threads of the same process
817 * create two kfd_process structures
819 mutex_lock(&kfd_processes_mutex);
821 /* A prior open of /dev/kfd could have already created the process. */
822 process = find_process(thread, false);
824 pr_debug("Process already found\n");
826 process = create_process(thread);
830 ret = kfd_process_init_cwsr_apu(process, filep);
837 process->kobj = kfd_alloc_struct(process->kobj);
838 if (!process->kobj) {
839 pr_warn("Creating procfs kobject failed");
842 ret = kobject_init_and_add(process->kobj, &procfs_type,
844 (int)process->lead_thread->pid);
846 pr_warn("Creating procfs pid directory failed");
847 kobject_put(process->kobj);
851 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
854 process->kobj_queues = kobject_create_and_add("queues",
856 if (!process->kobj_queues)
857 pr_warn("Creating KFD proc/queues folder failed");
859 kfd_procfs_add_sysfs_stats(process);
860 kfd_procfs_add_sysfs_files(process);
861 kfd_procfs_add_sysfs_counters(process);
864 if (!IS_ERR(process))
865 kref_get(&process->ref);
866 mutex_unlock(&kfd_processes_mutex);
871 hash_del_rcu(&process->kfd_processes);
872 mutex_unlock(&kfd_processes_mutex);
873 synchronize_srcu(&kfd_processes_srcu);
874 /* kfd_process_free_notifier will trigger the cleanup */
875 mmu_notifier_put(&process->mmu_notifier);
879 struct kfd_process *kfd_get_process(const struct task_struct *thread)
881 struct kfd_process *process;
884 return ERR_PTR(-EINVAL);
886 /* Only the pthreads threading model is supported. */
887 if (thread->group_leader->mm != thread->mm)
888 return ERR_PTR(-EINVAL);
890 process = find_process(thread, false);
892 return ERR_PTR(-EINVAL);
897 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
899 struct kfd_process *process;
901 hash_for_each_possible_rcu(kfd_processes_table, process,
902 kfd_processes, (uintptr_t)mm)
903 if (process->mm == mm)
909 static struct kfd_process *find_process(const struct task_struct *thread,
912 struct kfd_process *p;
915 idx = srcu_read_lock(&kfd_processes_srcu);
916 p = find_process_by_mm(thread->mm);
919 srcu_read_unlock(&kfd_processes_srcu, idx);
924 void kfd_unref_process(struct kfd_process *p)
926 kref_put(&p->ref, kfd_process_ref_release);
929 /* This increments the process->ref counter. */
930 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
932 struct task_struct *task = NULL;
933 struct kfd_process *p = NULL;
937 get_task_struct(task);
939 task = get_pid_task(pid, PIDTYPE_PID);
943 p = find_process(task, true);
944 put_task_struct(task);
950 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
952 struct kfd_process *p = pdd->process;
958 * Remove all handles from idr and release appropriate
959 * local memory object
961 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
963 for (i = 0; i < p->n_pdds; i++) {
964 struct kfd_process_device *peer_pdd = p->pdds[i];
966 if (!peer_pdd->drm_priv)
968 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
969 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
972 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
973 pdd->drm_priv, NULL);
974 kfd_process_device_remove_obj_handle(pdd, id);
979 * Just kunmap and unpin signal BO here. It will be freed in
980 * kfd_process_free_outstanding_kfd_bos()
982 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
984 struct kfd_process_device *pdd;
985 struct kfd_dev *kdev;
988 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
992 mutex_lock(&p->mutex);
994 pdd = kfd_get_process_device_data(kdev, p);
998 mem = kfd_process_device_translate_handle(
999 pdd, GET_IDR_HANDLE(p->signal_handle));
1003 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1006 mutex_unlock(&p->mutex);
1009 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1013 for (i = 0; i < p->n_pdds; i++)
1014 kfd_process_device_free_bos(p->pdds[i]);
1017 static void kfd_process_destroy_pdds(struct kfd_process *p)
1021 for (i = 0; i < p->n_pdds; i++) {
1022 struct kfd_process_device *pdd = p->pdds[i];
1024 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1025 pdd->dev->id, p->pasid);
1027 kfd_process_device_destroy_cwsr_dgpu(pdd);
1028 kfd_process_device_destroy_ib_mem(pdd);
1030 if (pdd->drm_file) {
1031 amdgpu_amdkfd_gpuvm_release_process_vm(
1032 pdd->dev->adev, pdd->drm_priv);
1033 fput(pdd->drm_file);
1036 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1037 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1038 get_order(KFD_CWSR_TBA_TMA_SIZE));
1040 bitmap_free(pdd->qpd.doorbell_bitmap);
1041 idr_destroy(&pdd->alloc_idr);
1043 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
1045 if (pdd->dev->shared_resources.enable_mes)
1046 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1049 * before destroying pdd, make sure to report availability
1052 if (pdd->runtime_inuse) {
1053 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1054 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1055 pdd->runtime_inuse = false;
1064 static void kfd_process_remove_sysfs(struct kfd_process *p)
1066 struct kfd_process_device *pdd;
1072 sysfs_remove_file(p->kobj, &p->attr_pasid);
1073 kobject_del(p->kobj_queues);
1074 kobject_put(p->kobj_queues);
1075 p->kobj_queues = NULL;
1077 for (i = 0; i < p->n_pdds; i++) {
1080 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1081 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1083 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1084 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1085 sysfs_remove_file(pdd->kobj_stats,
1086 &pdd->attr_cu_occupancy);
1087 kobject_del(pdd->kobj_stats);
1088 kobject_put(pdd->kobj_stats);
1089 pdd->kobj_stats = NULL;
1092 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1095 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1096 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1097 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1098 kobject_del(pdd->kobj_counters);
1099 kobject_put(pdd->kobj_counters);
1100 pdd->kobj_counters = NULL;
1103 kobject_del(p->kobj);
1104 kobject_put(p->kobj);
1108 /* No process locking is needed in this function, because the process
1109 * is not findable any more. We must assume that no other thread is
1110 * using it any more, otherwise we couldn't safely free the process
1111 * structure in the end.
1113 static void kfd_process_wq_release(struct work_struct *work)
1115 struct kfd_process *p = container_of(work, struct kfd_process,
1118 kfd_process_dequeue_from_all_devices(p);
1119 pqm_uninit(&p->pqm);
1121 /* Signal the eviction fence after user mode queues are
1122 * destroyed. This allows any BOs to be freed without
1123 * triggering pointless evictions or waiting for fences.
1125 dma_fence_signal(p->ef);
1127 kfd_process_remove_sysfs(p);
1128 kfd_iommu_unbind_process(p);
1130 kfd_process_kunmap_signal_bo(p);
1131 kfd_process_free_outstanding_kfd_bos(p);
1132 svm_range_list_fini(p);
1134 kfd_process_destroy_pdds(p);
1135 dma_fence_put(p->ef);
1137 kfd_event_free_process(p);
1139 kfd_pasid_free(p->pasid);
1140 mutex_destroy(&p->mutex);
1142 put_task_struct(p->lead_thread);
1147 static void kfd_process_ref_release(struct kref *ref)
1149 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1151 INIT_WORK(&p->release_work, kfd_process_wq_release);
1152 queue_work(kfd_process_wq, &p->release_work);
1155 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1157 int idx = srcu_read_lock(&kfd_processes_srcu);
1158 struct kfd_process *p = find_process_by_mm(mm);
1160 srcu_read_unlock(&kfd_processes_srcu, idx);
1162 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1165 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1167 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1170 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1172 cancel_delayed_work_sync(&p->eviction_work);
1173 cancel_delayed_work_sync(&p->restore_work);
1175 /* Indicate to other users that MM is no longer valid */
1178 mmu_notifier_put(&p->mmu_notifier);
1181 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1182 struct mm_struct *mm)
1184 struct kfd_process *p;
1187 * The kfd_process structure can not be free because the
1188 * mmu_notifier srcu is read locked
1190 p = container_of(mn, struct kfd_process, mmu_notifier);
1191 if (WARN_ON(p->mm != mm))
1194 mutex_lock(&kfd_processes_mutex);
1196 * Do early return if table is empty.
1198 * This could potentially happen if this function is called concurrently
1199 * by mmu_notifier and by kfd_cleanup_pocesses.
1202 if (hash_empty(kfd_processes_table)) {
1203 mutex_unlock(&kfd_processes_mutex);
1206 hash_del_rcu(&p->kfd_processes);
1207 mutex_unlock(&kfd_processes_mutex);
1208 synchronize_srcu(&kfd_processes_srcu);
1210 kfd_process_notifier_release_internal(p);
1213 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1214 .release = kfd_process_notifier_release,
1215 .alloc_notifier = kfd_process_alloc_notifier,
1216 .free_notifier = kfd_process_free_notifier,
1220 * This code handles the case when driver is being unloaded before all
1221 * mm_struct are released. We need to safely free the kfd_process and
1222 * avoid race conditions with mmu_notifier that might try to free them.
1225 void kfd_cleanup_processes(void)
1227 struct kfd_process *p;
1228 struct hlist_node *p_temp;
1230 HLIST_HEAD(cleanup_list);
1233 * Move all remaining kfd_process from the process table to a
1234 * temp list for processing. Once done, callback from mmu_notifier
1235 * release will not see the kfd_process in the table and do early return,
1236 * avoiding double free issues.
1238 mutex_lock(&kfd_processes_mutex);
1239 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1240 hash_del_rcu(&p->kfd_processes);
1241 synchronize_srcu(&kfd_processes_srcu);
1242 hlist_add_head(&p->kfd_processes, &cleanup_list);
1244 mutex_unlock(&kfd_processes_mutex);
1246 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1247 kfd_process_notifier_release_internal(p);
1250 * Ensures that all outstanding free_notifier get called, triggering
1251 * the release of the kfd_process struct.
1253 mmu_notifier_synchronize();
1256 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1258 unsigned long offset;
1261 for (i = 0; i < p->n_pdds; i++) {
1262 struct kfd_dev *dev = p->pdds[i]->dev;
1263 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1265 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1268 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1269 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1270 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1271 MAP_SHARED, offset);
1273 if (IS_ERR_VALUE(qpd->tba_addr)) {
1274 int err = qpd->tba_addr;
1276 pr_err("Failure to set tba address. error %d.\n", err);
1278 qpd->cwsr_kaddr = NULL;
1282 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1284 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1285 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1286 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1292 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1294 struct kfd_dev *dev = pdd->dev;
1295 struct qcm_process_device *qpd = &pdd->qpd;
1296 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1297 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1298 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1299 struct kgd_mem *mem;
1303 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1306 /* cwsr_base is only set for dGPU */
1307 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1308 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1312 qpd->cwsr_mem = mem;
1313 qpd->cwsr_kaddr = kaddr;
1314 qpd->tba_addr = qpd->cwsr_base;
1316 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
1318 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1319 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1320 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1325 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1327 struct kfd_dev *dev = pdd->dev;
1328 struct qcm_process_device *qpd = &pdd->qpd;
1330 if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1333 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1336 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1340 if (qpd->cwsr_kaddr) {
1341 /* KFD trap handler is bound, record as second-level TBA/TMA
1342 * in first-level TMA. First-level trap will jump to second.
1345 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1349 /* No trap handler bound, bind as first-level TBA/TMA. */
1350 qpd->tba_addr = tba_addr;
1351 qpd->tma_addr = tma_addr;
1355 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1359 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1360 * boot time retry setting. Mixing processes with different
1361 * XNACK/retry settings can hang the GPU.
1363 * Different GPUs can have different noretry settings depending
1364 * on HW bugs or limitations. We need to find at least one
1365 * XNACK mode for this process that's compatible with all GPUs.
1366 * Fortunately GPUs with retry enabled (noretry=0) can run code
1367 * built for XNACK-off. On GFXv9 it may perform slower.
1369 * Therefore applications built for XNACK-off can always be
1370 * supported and will be our fallback if any GPU does not
1373 for (i = 0; i < p->n_pdds; i++) {
1374 struct kfd_dev *dev = p->pdds[i]->dev;
1376 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1377 * support the SVM APIs and don't need to be considered
1378 * for the XNACK mode selection.
1380 if (!KFD_IS_SOC15(dev))
1382 /* Aldebaran can always support XNACK because it can support
1383 * per-process XNACK mode selection. But let the dev->noretry
1384 * setting still influence the default XNACK mode.
1386 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
1389 /* GFXv10 and later GPUs do not support shader preemption
1390 * during page faults. This can lead to poor QoS for queue
1391 * management and memory-manager-related preemptions or
1394 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1405 * On return the kfd_process is fully operational and will be freed when the
1408 static struct kfd_process *create_process(const struct task_struct *thread)
1410 struct kfd_process *process;
1411 struct mmu_notifier *mn;
1414 process = kzalloc(sizeof(*process), GFP_KERNEL);
1416 goto err_alloc_process;
1418 kref_init(&process->ref);
1419 mutex_init(&process->mutex);
1420 process->mm = thread->mm;
1421 process->lead_thread = thread->group_leader;
1422 process->n_pdds = 0;
1423 process->queues_paused = false;
1424 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1425 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1426 process->last_restore_timestamp = get_jiffies_64();
1427 err = kfd_event_init_process(process);
1429 goto err_event_init;
1430 process->is_32bit_user_mode = in_compat_syscall();
1432 process->pasid = kfd_pasid_alloc();
1433 if (process->pasid == 0) {
1435 goto err_alloc_pasid;
1438 err = pqm_init(&process->pqm, process);
1440 goto err_process_pqm_init;
1442 /* init process apertures*/
1443 err = kfd_init_apertures(process);
1445 goto err_init_apertures;
1447 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1448 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1450 err = svm_range_list_init(process);
1452 goto err_init_svm_range_list;
1454 /* alloc_notifier needs to find the process in the hash table */
1455 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1456 (uintptr_t)process->mm);
1458 /* Avoid free_notifier to start kfd_process_wq_release if
1459 * mmu_notifier_get failed because of pending signal.
1461 kref_get(&process->ref);
1463 /* MMU notifier registration must be the last call that can fail
1464 * because after this point we cannot unwind the process creation.
1465 * After this point, mmu_notifier_put will trigger the cleanup by
1466 * dropping the last process reference in the free_notifier.
1468 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1471 goto err_register_notifier;
1473 BUG_ON(mn != &process->mmu_notifier);
1475 kfd_unref_process(process);
1476 get_task_struct(process->lead_thread);
1480 err_register_notifier:
1481 hash_del_rcu(&process->kfd_processes);
1482 svm_range_list_fini(process);
1483 err_init_svm_range_list:
1484 kfd_process_free_outstanding_kfd_bos(process);
1485 kfd_process_destroy_pdds(process);
1487 pqm_uninit(&process->pqm);
1488 err_process_pqm_init:
1489 kfd_pasid_free(process->pasid);
1491 kfd_event_free_process(process);
1493 mutex_destroy(&process->mutex);
1496 return ERR_PTR(err);
1499 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1500 struct kfd_dev *dev)
1503 int range_start = dev->shared_resources.non_cp_doorbells_start;
1504 int range_end = dev->shared_resources.non_cp_doorbells_end;
1506 if (!KFD_IS_SOC15(dev))
1509 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1511 if (!qpd->doorbell_bitmap)
1514 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1515 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1516 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1517 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1518 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1520 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1521 if (i >= range_start && i <= range_end) {
1522 __set_bit(i, qpd->doorbell_bitmap);
1523 __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1524 qpd->doorbell_bitmap);
1531 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1532 struct kfd_process *p)
1536 for (i = 0; i < p->n_pdds; i++)
1537 if (p->pdds[i]->dev == dev)
1543 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1544 struct kfd_process *p)
1546 struct kfd_process_device *pdd = NULL;
1549 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1551 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1555 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1556 pr_err("Failed to init doorbell for process\n");
1561 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1562 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1563 pdd->qpd.dqm = dev->dqm;
1564 pdd->qpd.pqm = &p->pqm;
1565 pdd->qpd.evicted = 0;
1566 pdd->qpd.mapped_gws_queue = false;
1568 pdd->bound = PDD_UNBOUND;
1569 pdd->already_dequeued = false;
1570 pdd->runtime_inuse = false;
1571 pdd->vram_usage = 0;
1572 pdd->sdma_past_activity_counter = 0;
1573 pdd->user_gpu_id = dev->id;
1574 atomic64_set(&pdd->evict_duration_counter, 0);
1576 if (dev->shared_resources.enable_mes) {
1577 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1578 AMDGPU_MES_PROC_CTX_SIZE,
1580 &pdd->proc_ctx_gpu_addr,
1581 &pdd->proc_ctx_cpu_ptr,
1584 pr_err("failed to allocate process context bo\n");
1587 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1590 p->pdds[p->n_pdds++] = pdd;
1592 /* Init idr used for memory handle translation */
1593 idr_init(&pdd->alloc_idr);
1603 * kfd_process_device_init_vm - Initialize a VM for a process-device
1605 * @pdd: The process-device
1606 * @drm_file: Optional pointer to a DRM file descriptor
1608 * If @drm_file is specified, it will be used to acquire the VM from
1609 * that file descriptor. If successful, the @pdd takes ownership of
1610 * the file descriptor.
1612 * If @drm_file is NULL, a new VM is created.
1614 * Returns 0 on success, -errno on failure.
1616 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1617 struct file *drm_file)
1619 struct amdgpu_fpriv *drv_priv;
1620 struct amdgpu_vm *avm;
1621 struct kfd_process *p;
1622 struct kfd_dev *dev;
1631 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1634 avm = &drv_priv->vm;
1639 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1640 &p->kgd_process_info,
1643 pr_err("Failed to create process VM object\n");
1646 pdd->drm_priv = drm_file->private_data;
1647 atomic64_set(&pdd->tlb_seq, 0);
1649 ret = kfd_process_device_reserve_ib_mem(pdd);
1651 goto err_reserve_ib_mem;
1652 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1656 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1660 pdd->drm_file = drm_file;
1665 kfd_process_device_destroy_cwsr_dgpu(pdd);
1667 kfd_process_device_destroy_ib_mem(pdd);
1669 pdd->drm_priv = NULL;
1670 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1676 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1678 * Unbinding occurs when the process dies or the device is removed.
1680 * Assumes that the process lock is held.
1682 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1683 struct kfd_process *p)
1685 struct kfd_process_device *pdd;
1688 pdd = kfd_get_process_device_data(dev, p);
1690 pr_err("Process device data doesn't exist\n");
1691 return ERR_PTR(-ENOMEM);
1695 return ERR_PTR(-ENODEV);
1698 * signal runtime-pm system to auto resume and prevent
1699 * further runtime suspend once device pdd is created until
1702 if (!pdd->runtime_inuse) {
1703 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1705 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1706 return ERR_PTR(err);
1710 err = kfd_iommu_bind_process_to_device(pdd);
1715 * make sure that runtime_usage counter is incremented just once
1718 pdd->runtime_inuse = true;
1723 /* balance runpm reference count and exit with error */
1724 if (!pdd->runtime_inuse) {
1725 pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
1726 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1729 return ERR_PTR(err);
1732 /* Create specific handle mapped to mem from process local memory idr
1733 * Assumes that the process lock is held.
1735 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1738 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1741 /* Translate specific handle from process local memory idr
1742 * Assumes that the process lock is held.
1744 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1750 return idr_find(&pdd->alloc_idr, handle);
1753 /* Remove specific handle from process local memory idr
1754 * Assumes that the process lock is held.
1756 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1760 idr_remove(&pdd->alloc_idr, handle);
1763 /* This increments the process->ref counter. */
1764 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1766 struct kfd_process *p, *ret_p = NULL;
1769 int idx = srcu_read_lock(&kfd_processes_srcu);
1771 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1772 if (p->pasid == pasid) {
1779 srcu_read_unlock(&kfd_processes_srcu, idx);
1784 /* This increments the process->ref counter. */
1785 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1787 struct kfd_process *p;
1789 int idx = srcu_read_lock(&kfd_processes_srcu);
1791 p = find_process_by_mm(mm);
1795 srcu_read_unlock(&kfd_processes_srcu, idx);
1800 /* kfd_process_evict_queues - Evict all user queues of a process
1802 * Eviction is reference-counted per process-device. This means multiple
1803 * evictions from different sources can be nested safely.
1805 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1809 unsigned int n_evicted = 0;
1811 for (i = 0; i < p->n_pdds; i++) {
1812 struct kfd_process_device *pdd = p->pdds[i];
1814 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1817 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1819 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1820 * we would like to set all the queues to be in evicted state to prevent
1821 * them been add back since they actually not be saved right now.
1823 if (r && r != -EIO) {
1824 pr_err("Failed to evict process queues\n");
1833 /* To keep state consistent, roll back partial eviction by
1836 for (i = 0; i < p->n_pdds; i++) {
1837 struct kfd_process_device *pdd = p->pdds[i];
1842 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1844 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1846 pr_err("Failed to restore queues\n");
1854 /* kfd_process_restore_queues - Restore all user queues of a process */
1855 int kfd_process_restore_queues(struct kfd_process *p)
1860 for (i = 0; i < p->n_pdds; i++) {
1861 struct kfd_process_device *pdd = p->pdds[i];
1863 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1865 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1868 pr_err("Failed to restore process queues\n");
1877 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1881 for (i = 0; i < p->n_pdds; i++)
1882 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1888 kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
1889 uint32_t *gpuid, uint32_t *gpuidx)
1893 for (i = 0; i < p->n_pdds; i++)
1894 if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
1895 *gpuid = p->pdds[i]->user_gpu_id;
1902 static void evict_process_worker(struct work_struct *work)
1905 struct kfd_process *p;
1906 struct delayed_work *dwork;
1908 dwork = to_delayed_work(work);
1910 /* Process termination destroys this worker thread. So during the
1911 * lifetime of this thread, kfd_process p will be valid
1913 p = container_of(dwork, struct kfd_process, eviction_work);
1914 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1915 "Eviction fence mismatch\n");
1917 /* Narrow window of overlap between restore and evict work
1918 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1919 * unreserves KFD BOs, it is possible to evicted again. But
1920 * restore has few more steps of finish. So lets wait for any
1921 * previous restore work to complete
1923 flush_delayed_work(&p->restore_work);
1925 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1926 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1928 dma_fence_signal(p->ef);
1929 dma_fence_put(p->ef);
1931 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1932 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1934 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1936 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1939 static void restore_process_worker(struct work_struct *work)
1941 struct delayed_work *dwork;
1942 struct kfd_process *p;
1945 dwork = to_delayed_work(work);
1947 /* Process termination destroys this worker thread. So during the
1948 * lifetime of this thread, kfd_process p will be valid
1950 p = container_of(dwork, struct kfd_process, restore_work);
1951 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1953 /* Setting last_restore_timestamp before successful restoration.
1954 * Otherwise this would have to be set by KGD (restore_process_bos)
1955 * before KFD BOs are unreserved. If not, the process can be evicted
1956 * again before the timestamp is set.
1957 * If restore fails, the timestamp will be set again in the next
1958 * attempt. This would mean that the minimum GPU quanta would be
1959 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1963 p->last_restore_timestamp = get_jiffies_64();
1964 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1967 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1968 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1969 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1970 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1971 WARN(!ret, "reschedule restore work failed\n");
1975 ret = kfd_process_restore_queues(p);
1977 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1979 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1982 void kfd_suspend_all_processes(void)
1984 struct kfd_process *p;
1986 int idx = srcu_read_lock(&kfd_processes_srcu);
1988 WARN(debug_evictions, "Evicting all processes");
1989 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1990 cancel_delayed_work_sync(&p->eviction_work);
1991 cancel_delayed_work_sync(&p->restore_work);
1993 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
1994 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1995 dma_fence_signal(p->ef);
1996 dma_fence_put(p->ef);
1999 srcu_read_unlock(&kfd_processes_srcu, idx);
2002 int kfd_resume_all_processes(void)
2004 struct kfd_process *p;
2006 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2008 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2009 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
2010 pr_err("Restore process %d failed during resume\n",
2015 srcu_read_unlock(&kfd_processes_srcu, idx);
2019 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
2020 struct vm_area_struct *vma)
2022 struct kfd_process_device *pdd;
2023 struct qcm_process_device *qpd;
2025 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2026 pr_err("Incorrect CWSR mapping size.\n");
2030 pdd = kfd_get_process_device_data(dev, process);
2035 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2036 get_order(KFD_CWSR_TBA_TMA_SIZE));
2037 if (!qpd->cwsr_kaddr) {
2038 pr_err("Error allocating per process CWSR buffer.\n");
2042 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2043 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
2044 /* Mapping pages to user process */
2045 return remap_pfn_range(vma, vma->vm_start,
2046 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2047 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2050 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
2052 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
2053 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
2054 struct kfd_dev *dev = pdd->dev;
2057 * It can be that we race and lose here, but that is extremely unlikely
2058 * and the worst thing which could happen is that we flush the changes
2059 * into the TLB once more which is harmless.
2061 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
2064 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2065 /* Nothing to flush until a VMID is assigned, which
2066 * only happens when the first queue is created.
2069 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
2072 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
2073 pdd->process->pasid, type);
2077 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2082 for (i = 0; i < p->n_pdds; i++) {
2083 struct kfd_process_device *pdd = p->pdds[i];
2085 if (pdd->user_gpu_id == gpu_id)
2092 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2099 for (i = 0; i < p->n_pdds; i++) {
2100 struct kfd_process_device *pdd = p->pdds[i];
2102 if (pdd->dev->id == actual_gpu_id)
2103 return pdd->user_gpu_id;
2108 #if defined(CONFIG_DEBUG_FS)
2110 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2112 struct kfd_process *p;
2116 int idx = srcu_read_lock(&kfd_processes_srcu);
2118 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2119 seq_printf(m, "Process %d PASID 0x%x:\n",
2120 p->lead_thread->tgid, p->pasid);
2122 mutex_lock(&p->mutex);
2123 r = pqm_debugfs_mqds(m, &p->pqm);
2124 mutex_unlock(&p->mutex);
2130 srcu_read_unlock(&kfd_processes_srcu, idx);