1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/mm_types.h>
25 #include <linux/slab.h>
26 #include <linux/types.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/mm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mman.h>
31 #include <linux/memory.h>
33 #include "kfd_events.h"
34 #include "kfd_iommu.h"
35 #include <linux/device.h>
38 * Wrapper around wait_queue_entry_t
40 struct kfd_event_waiter {
41 wait_queue_entry_t wait;
42 struct kfd_event *event; /* Event to wait for */
43 bool activated; /* Becomes true when event is signaled */
47 * Each signal event needs a 64-bit signal slot where the signaler will write
48 * a 1 before sending an interrupt. (This is needed because some interrupts
49 * do not contain enough spare data bits to identify an event.)
50 * We get whole pages and map them to the process VA.
51 * Individual signal events use their event_id as slot index.
53 struct kfd_signal_page {
54 uint64_t *kernel_address;
55 uint64_t __user *user_address;
56 bool need_to_free_pages;
59 static uint64_t *page_slots(struct kfd_signal_page *page)
61 return page->kernel_address;
64 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
67 struct kfd_signal_page *page;
69 page = kzalloc(sizeof(*page), GFP_KERNEL);
73 backing_store = (void *) __get_free_pages(GFP_KERNEL,
74 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
76 goto fail_alloc_signal_store;
78 /* Initialize all events to unsignaled */
79 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
80 KFD_SIGNAL_EVENT_LIMIT * 8);
82 page->kernel_address = backing_store;
83 page->need_to_free_pages = true;
84 pr_debug("Allocated new event signal page at %p, for process %p\n",
89 fail_alloc_signal_store:
94 static int allocate_event_notification_slot(struct kfd_process *p,
96 const int *restore_id)
100 if (!p->signal_page) {
101 p->signal_page = allocate_signal_page(p);
104 /* Oldest user mode expects 256 event slots */
105 p->signal_mapped_size = 256*8;
109 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
113 * Compatibility with old user mode: Only use signal slots
114 * user mode has mapped, may be less than
115 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
116 * of the event limit without breaking user mode.
118 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
125 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
131 * Assumes that p->event_mutex or rcu_readlock is held and of course that p is
134 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
136 return idr_find(&p->event_idr, id);
140 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
141 * @p: Pointer to struct kfd_process
143 * @bits: Number of valid bits in @id
145 * Finds the first signaled event with a matching partial ID. If no
146 * matching signaled event is found, returns NULL. In that case the
147 * caller should assume that the partial ID is invalid and do an
148 * exhaustive search of all siglaned events.
150 * If multiple events with the same partial ID signal at the same
151 * time, they will be found one interrupt at a time, not necessarily
152 * in the same order the interrupts occurred. As long as the number of
153 * interrupts is correct, all signaled events will be seen by the
156 static struct kfd_event *lookup_signaled_event_by_partial_id(
157 struct kfd_process *p, uint32_t id, uint32_t bits)
159 struct kfd_event *ev;
161 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
164 /* Fast path for the common case that @id is not a partial ID
165 * and we only need a single lookup.
167 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
168 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
171 return idr_find(&p->event_idr, id);
174 /* General case for partial IDs: Iterate over all matching IDs
175 * and find the first one that has signaled.
177 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
178 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
181 ev = idr_find(&p->event_idr, id);
187 static int create_signal_event(struct file *devkfd, struct kfd_process *p,
188 struct kfd_event *ev, const int *restore_id)
192 if (p->signal_mapped_size &&
193 p->signal_event_count == p->signal_mapped_size / 8) {
194 if (!p->signal_event_limit_reached) {
195 pr_debug("Signal event wasn't created because limit was reached\n");
196 p->signal_event_limit_reached = true;
201 ret = allocate_event_notification_slot(p, ev, restore_id);
203 pr_warn("Signal event wasn't created because out of kernel memory\n");
207 p->signal_event_count++;
209 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
210 pr_debug("Signal event number %zu created with id %d, address %p\n",
211 p->signal_event_count, ev->event_id,
212 ev->user_signal_address);
217 static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const int *restore_id)
222 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
225 /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
226 * intentional integer overflow to -1 without a compiler
227 * warning. idr_alloc treats a negative value as "maximum
230 id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
231 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
241 int kfd_event_init_process(struct kfd_process *p)
245 mutex_init(&p->event_mutex);
246 idr_init(&p->event_idr);
247 p->signal_page = NULL;
248 p->signal_event_count = 1;
249 /* Allocate event ID 0. It is used for a fast path to ignore bogus events
250 * that are sent by the CP without a context ID
252 id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL);
254 idr_destroy(&p->event_idr);
255 mutex_destroy(&p->event_mutex);
261 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
263 struct kfd_event_waiter *waiter;
265 /* Wake up pending waiters. They will return failure */
266 spin_lock(&ev->lock);
267 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
268 WRITE_ONCE(waiter->event, NULL);
269 wake_up_all(&ev->wq);
270 spin_unlock(&ev->lock);
272 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
273 ev->type == KFD_EVENT_TYPE_DEBUG)
274 p->signal_event_count--;
276 idr_remove(&p->event_idr, ev->event_id);
280 static void destroy_events(struct kfd_process *p)
282 struct kfd_event *ev;
285 idr_for_each_entry(&p->event_idr, ev, id)
287 destroy_event(p, ev);
288 idr_destroy(&p->event_idr);
289 mutex_destroy(&p->event_mutex);
293 * We assume that the process is being destroyed and there is no need to
294 * unmap the pages or keep bookkeeping data in order.
296 static void shutdown_signal_page(struct kfd_process *p)
298 struct kfd_signal_page *page = p->signal_page;
301 if (page->need_to_free_pages)
302 free_pages((unsigned long)page->kernel_address,
303 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
308 void kfd_event_free_process(struct kfd_process *p)
311 shutdown_signal_page(p);
314 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
316 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
317 ev->type == KFD_EVENT_TYPE_DEBUG;
320 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
322 return ev->type == KFD_EVENT_TYPE_SIGNAL;
325 static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
326 uint64_t size, uint64_t user_handle)
328 struct kfd_signal_page *page;
333 page = kzalloc(sizeof(*page), GFP_KERNEL);
337 /* Initialize all events to unsignaled */
338 memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
339 KFD_SIGNAL_EVENT_LIMIT * 8);
341 page->kernel_address = kernel_address;
343 p->signal_page = page;
344 p->signal_mapped_size = size;
345 p->signal_handle = user_handle;
349 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
351 struct kfd_node *kfd;
352 struct kfd_process_device *pdd;
353 void *mem, *kern_addr;
357 if (p->signal_page) {
358 pr_err("Event page is already set\n");
362 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset));
364 pr_err("Getting device by id failed in %s\n", __func__);
369 pdd = kfd_bind_process_to_device(kfd, p);
373 mem = kfd_process_device_translate_handle(pdd,
374 GET_IDR_HANDLE(event_page_offset));
376 pr_err("Can't find BO, offset is 0x%llx\n", event_page_offset);
380 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(mem, &kern_addr, &size);
382 pr_err("Failed to map event page to kernel\n");
386 err = kfd_event_page_set(p, kern_addr, size, event_page_offset);
388 pr_err("Failed to set event page\n");
389 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
395 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
396 uint32_t event_type, bool auto_reset, uint32_t node_id,
397 uint32_t *event_id, uint32_t *event_trigger_data,
398 uint64_t *event_page_offset, uint32_t *event_slot_index)
401 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
406 ev->type = event_type;
407 ev->auto_reset = auto_reset;
408 ev->signaled = false;
410 spin_lock_init(&ev->lock);
411 init_waitqueue_head(&ev->wq);
413 *event_page_offset = 0;
415 mutex_lock(&p->event_mutex);
417 switch (event_type) {
418 case KFD_EVENT_TYPE_SIGNAL:
419 case KFD_EVENT_TYPE_DEBUG:
420 ret = create_signal_event(devkfd, p, ev, NULL);
422 *event_page_offset = KFD_MMAP_TYPE_EVENTS;
423 *event_slot_index = ev->event_id;
427 ret = create_other_event(p, ev, NULL);
432 *event_id = ev->event_id;
433 *event_trigger_data = ev->event_id;
439 mutex_unlock(&p->event_mutex);
444 int kfd_criu_restore_event(struct file *devkfd,
445 struct kfd_process *p,
446 uint8_t __user *user_priv_ptr,
447 uint64_t *priv_data_offset,
448 uint64_t max_priv_data_size)
450 struct kfd_criu_event_priv_data *ev_priv;
451 struct kfd_event *ev = NULL;
454 ev_priv = kmalloc(sizeof(*ev_priv), GFP_KERNEL);
458 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
464 if (*priv_data_offset + sizeof(*ev_priv) > max_priv_data_size) {
469 ret = copy_from_user(ev_priv, user_priv_ptr + *priv_data_offset, sizeof(*ev_priv));
474 *priv_data_offset += sizeof(*ev_priv);
476 if (ev_priv->user_handle) {
477 ret = kfd_kmap_event_page(p, ev_priv->user_handle);
482 ev->type = ev_priv->type;
483 ev->auto_reset = ev_priv->auto_reset;
484 ev->signaled = ev_priv->signaled;
486 spin_lock_init(&ev->lock);
487 init_waitqueue_head(&ev->wq);
489 mutex_lock(&p->event_mutex);
491 case KFD_EVENT_TYPE_SIGNAL:
492 case KFD_EVENT_TYPE_DEBUG:
493 ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id);
495 case KFD_EVENT_TYPE_MEMORY:
496 memcpy(&ev->memory_exception_data,
497 &ev_priv->memory_exception_data,
498 sizeof(struct kfd_hsa_memory_exception_data));
500 ret = create_other_event(p, ev, &ev_priv->event_id);
502 case KFD_EVENT_TYPE_HW_EXCEPTION:
503 memcpy(&ev->hw_exception_data,
504 &ev_priv->hw_exception_data,
505 sizeof(struct kfd_hsa_hw_exception_data));
507 ret = create_other_event(p, ev, &ev_priv->event_id);
510 mutex_unlock(&p->event_mutex);
521 int kfd_criu_checkpoint_events(struct kfd_process *p,
522 uint8_t __user *user_priv_data,
523 uint64_t *priv_data_offset)
525 struct kfd_criu_event_priv_data *ev_privs;
528 struct kfd_event *ev;
531 uint32_t num_events = kfd_get_num_events(p);
536 ev_privs = kvzalloc(num_events * sizeof(*ev_privs), GFP_KERNEL);
541 idr_for_each_entry(&p->event_idr, ev, ev_id) {
542 struct kfd_criu_event_priv_data *ev_priv;
545 * Currently, all events have same size of private_data, but the current ioctl's
546 * and CRIU plugin supports private_data of variable sizes
548 ev_priv = &ev_privs[i];
550 ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT;
552 /* We store the user_handle with the first event */
553 if (i == 0 && p->signal_page)
554 ev_priv->user_handle = p->signal_handle;
556 ev_priv->event_id = ev->event_id;
557 ev_priv->auto_reset = ev->auto_reset;
558 ev_priv->type = ev->type;
559 ev_priv->signaled = ev->signaled;
561 if (ev_priv->type == KFD_EVENT_TYPE_MEMORY)
562 memcpy(&ev_priv->memory_exception_data,
563 &ev->memory_exception_data,
564 sizeof(struct kfd_hsa_memory_exception_data));
565 else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION)
566 memcpy(&ev_priv->hw_exception_data,
567 &ev->hw_exception_data,
568 sizeof(struct kfd_hsa_hw_exception_data));
570 pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n",
579 ret = copy_to_user(user_priv_data + *priv_data_offset,
580 ev_privs, num_events * sizeof(*ev_privs));
582 pr_err("Failed to copy events priv to user\n");
586 *priv_data_offset += num_events * sizeof(*ev_privs);
592 int kfd_get_num_events(struct kfd_process *p)
594 struct kfd_event *ev;
598 idr_for_each_entry(&p->event_idr, ev, id)
604 /* Assumes that p is current. */
605 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
607 struct kfd_event *ev;
610 mutex_lock(&p->event_mutex);
612 ev = lookup_event_by_id(p, event_id);
615 destroy_event(p, ev);
619 mutex_unlock(&p->event_mutex);
623 static void set_event(struct kfd_event *ev)
625 struct kfd_event_waiter *waiter;
627 /* Auto reset if the list is non-empty and we're waking
628 * someone. waitqueue_active is safe here because we're
629 * protected by the ev->lock, which is also held when
630 * updating the wait queues in kfd_wait_on_events.
632 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
633 if (!(++ev->event_age)) {
634 /* Never wrap back to reserved/default event age 0/1 */
636 WARN_ONCE(1, "event_age wrap back!");
639 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
640 WRITE_ONCE(waiter->activated, true);
642 wake_up_all(&ev->wq);
645 /* Assumes that p is current. */
646 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
649 struct kfd_event *ev;
653 ev = lookup_event_by_id(p, event_id);
658 spin_lock(&ev->lock);
660 if (event_can_be_cpu_signaled(ev))
665 spin_unlock(&ev->lock);
671 static void reset_event(struct kfd_event *ev)
673 ev->signaled = false;
676 /* Assumes that p is current. */
677 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
680 struct kfd_event *ev;
684 ev = lookup_event_by_id(p, event_id);
689 spin_lock(&ev->lock);
691 if (event_can_be_cpu_signaled(ev))
696 spin_unlock(&ev->lock);
703 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
705 WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT);
708 static void set_event_from_interrupt(struct kfd_process *p,
709 struct kfd_event *ev)
711 if (ev && event_can_be_gpu_signaled(ev)) {
712 acknowledge_signal(p, ev);
713 spin_lock(&ev->lock);
715 spin_unlock(&ev->lock);
719 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
720 uint32_t valid_id_bits)
722 struct kfd_event *ev = NULL;
725 * Because we are called from arbitrary context (workqueue) as opposed
726 * to process context, kfd_process could attempt to exit while we are
727 * running so the lookup function increments the process ref count.
729 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
732 return; /* Presumably process exited. */
737 ev = lookup_signaled_event_by_partial_id(p, partial_id,
740 set_event_from_interrupt(p, ev);
741 } else if (p->signal_page) {
743 * Partial ID lookup failed. Assume that the event ID
744 * in the interrupt payload was invalid and do an
745 * exhaustive search of signaled events.
747 uint64_t *slots = page_slots(p->signal_page);
751 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
752 partial_id, valid_id_bits);
754 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
755 /* With relatively few events, it's faster to
756 * iterate over the event IDR
758 idr_for_each_entry(&p->event_idr, ev, id) {
759 if (id >= KFD_SIGNAL_EVENT_LIMIT)
762 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT)
763 set_event_from_interrupt(p, ev);
766 /* With relatively many events, it's faster to
767 * iterate over the signal slots and lookup
768 * only signaled events from the IDR.
770 for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++)
771 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) {
772 ev = lookup_event_by_id(p, id);
773 set_event_from_interrupt(p, ev);
779 kfd_unref_process(p);
782 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
784 struct kfd_event_waiter *event_waiters;
787 event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
792 for (i = 0; i < num_events; i++)
793 init_wait(&event_waiters[i].wait);
795 return event_waiters;
798 static int init_event_waiter(struct kfd_process *p,
799 struct kfd_event_waiter *waiter,
802 struct kfd_event *ev = lookup_event_by_id(p, event_id);
807 spin_lock(&ev->lock);
809 waiter->activated = ev->signaled;
810 ev->signaled = ev->signaled && !ev->auto_reset;
811 if (!waiter->activated)
812 add_wait_queue(&ev->wq, &waiter->wait);
813 spin_unlock(&ev->lock);
818 /* test_event_condition - Test condition of events being waited for
819 * @all: Return completion only if all events have signaled
820 * @num_events: Number of events to wait for
821 * @event_waiters: Array of event waiters, one per event
823 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
824 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
825 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
826 * the events have been destroyed.
828 static uint32_t test_event_condition(bool all, uint32_t num_events,
829 struct kfd_event_waiter *event_waiters)
832 uint32_t activated_count = 0;
834 for (i = 0; i < num_events; i++) {
835 if (!READ_ONCE(event_waiters[i].event))
836 return KFD_IOC_WAIT_RESULT_FAIL;
838 if (READ_ONCE(event_waiters[i].activated)) {
840 return KFD_IOC_WAIT_RESULT_COMPLETE;
846 return activated_count == num_events ?
847 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
851 * Copy event specific data, if defined.
852 * Currently only memory exception events have additional data to copy to user
854 static int copy_signaled_event_data(uint32_t num_events,
855 struct kfd_event_waiter *event_waiters,
856 struct kfd_event_data __user *data)
858 struct kfd_hsa_memory_exception_data *src;
859 struct kfd_hsa_memory_exception_data __user *dst;
860 struct kfd_event_waiter *waiter;
861 struct kfd_event *event;
864 for (i = 0; i < num_events; i++) {
865 waiter = &event_waiters[i];
866 event = waiter->event;
868 return -EINVAL; /* event was destroyed */
869 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
870 dst = &data[i].memory_exception_data;
871 src = &event->memory_exception_data;
872 if (copy_to_user(dst, src,
873 sizeof(struct kfd_hsa_memory_exception_data)))
881 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
883 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
886 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
887 return MAX_SCHEDULE_TIMEOUT;
890 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
891 * but we consider them finite.
892 * This hack is wrong, but nobody is likely to notice.
894 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
896 return msecs_to_jiffies(user_timeout_ms) + 1;
899 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
900 bool undo_auto_reset)
904 for (i = 0; i < num_events; i++)
905 if (waiters[i].event) {
906 spin_lock(&waiters[i].event->lock);
907 remove_wait_queue(&waiters[i].event->wq,
909 if (undo_auto_reset && waiters[i].activated &&
910 waiters[i].event && waiters[i].event->auto_reset)
911 set_event(waiters[i].event);
912 spin_unlock(&waiters[i].event->lock);
918 int kfd_wait_on_events(struct kfd_process *p,
919 uint32_t num_events, void __user *data,
920 bool all, uint32_t *user_timeout_ms,
921 uint32_t *wait_result)
923 struct kfd_event_data __user *events =
924 (struct kfd_event_data __user *) data;
928 struct kfd_event_waiter *event_waiters = NULL;
929 long timeout = user_timeout_to_jiffies(*user_timeout_ms);
931 event_waiters = alloc_event_waiters(num_events);
932 if (!event_waiters) {
937 /* Use p->event_mutex here to protect against concurrent creation and
938 * destruction of events while we initialize event_waiters.
940 mutex_lock(&p->event_mutex);
942 for (i = 0; i < num_events; i++) {
943 struct kfd_event_data event_data;
945 if (copy_from_user(&event_data, &events[i],
946 sizeof(struct kfd_event_data))) {
951 ret = init_event_waiter(p, &event_waiters[i],
952 event_data.event_id);
957 /* Check condition once. */
958 *wait_result = test_event_condition(all, num_events, event_waiters);
959 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
960 ret = copy_signaled_event_data(num_events,
961 event_waiters, events);
963 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
964 /* This should not happen. Events shouldn't be
965 * destroyed while we're holding the event_mutex
970 mutex_unlock(&p->event_mutex);
973 if (fatal_signal_pending(current)) {
978 if (signal_pending(current)) {
980 if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
981 *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
982 *user_timeout_ms = jiffies_to_msecs(
987 /* Set task state to interruptible sleep before
988 * checking wake-up conditions. A concurrent wake-up
989 * will put the task back into runnable state. In that
990 * case schedule_timeout will not put the task to
991 * sleep and we'll get a chance to re-check the
992 * updated conditions almost immediately. Otherwise,
993 * this race condition would lead to a soft hang or a
996 set_current_state(TASK_INTERRUPTIBLE);
998 *wait_result = test_event_condition(all, num_events,
1000 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
1006 timeout = schedule_timeout(timeout);
1008 __set_current_state(TASK_RUNNING);
1010 mutex_lock(&p->event_mutex);
1011 /* copy_signaled_event_data may sleep. So this has to happen
1012 * after the task state is set back to RUNNING.
1014 * The event may also have been destroyed after signaling. So
1015 * copy_signaled_event_data also must confirm that the event
1016 * still exists. Therefore this must be under the p->event_mutex
1017 * which is also held when events are destroyed.
1019 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
1020 ret = copy_signaled_event_data(num_events,
1021 event_waiters, events);
1024 free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
1025 mutex_unlock(&p->event_mutex);
1028 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
1029 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
1035 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
1038 struct kfd_signal_page *page;
1041 /* check required size doesn't exceed the allocated size */
1042 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
1043 get_order(vma->vm_end - vma->vm_start)) {
1044 pr_err("Event page mmap requested illegal size\n");
1048 page = p->signal_page;
1050 /* Probably KFD bug, but mmap is user-accessible. */
1051 pr_debug("Signal page could not be found\n");
1055 pfn = __pa(page->kernel_address);
1058 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
1059 | VM_DONTDUMP | VM_PFNMAP);
1061 pr_debug("Mapping signal page\n");
1062 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
1063 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
1064 pr_debug(" pfn == 0x%016lX\n", pfn);
1065 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
1066 pr_debug(" size == 0x%08lX\n",
1067 vma->vm_end - vma->vm_start);
1069 page->user_address = (uint64_t __user *)vma->vm_start;
1071 /* mapping the page to user process */
1072 ret = remap_pfn_range(vma, vma->vm_start, pfn,
1073 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1075 p->signal_mapped_size = vma->vm_end - vma->vm_start;
1081 * Assumes that p is not going away.
1083 static void lookup_events_by_type_and_signal(struct kfd_process *p,
1084 int type, void *event_data)
1086 struct kfd_hsa_memory_exception_data *ev_data;
1087 struct kfd_event *ev;
1089 bool send_signal = true;
1091 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
1095 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1096 idr_for_each_entry_continue(&p->event_idr, ev, id)
1097 if (ev->type == type) {
1098 send_signal = false;
1100 "Event found: id %X type %d",
1101 ev->event_id, ev->type);
1102 spin_lock(&ev->lock);
1104 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
1105 ev->memory_exception_data = *ev_data;
1106 spin_unlock(&ev->lock);
1109 if (type == KFD_EVENT_TYPE_MEMORY) {
1110 dev_warn(kfd_device,
1111 "Sending SIGSEGV to process %d (pasid 0x%x)",
1112 p->lead_thread->pid, p->pasid);
1113 send_sig(SIGSEGV, p->lead_thread, 0);
1116 /* Send SIGTERM no event of type "type" has been found*/
1119 dev_warn(kfd_device,
1120 "Sending SIGTERM to process %d (pasid 0x%x)",
1121 p->lead_thread->pid, p->pasid);
1122 send_sig(SIGTERM, p->lead_thread, 0);
1125 "Process %d (pasid 0x%x) got unhandled exception",
1126 p->lead_thread->pid, p->pasid);
1133 #ifdef KFD_SUPPORT_IOMMU_V2
1134 void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid,
1135 unsigned long address, bool is_write_requested,
1136 bool is_execute_requested)
1138 struct kfd_hsa_memory_exception_data memory_exception_data;
1139 struct vm_area_struct *vma;
1143 * Because we are called from arbitrary context (workqueue) as opposed
1144 * to process context, kfd_process could attempt to exit while we are
1145 * running so the lookup function increments the process ref count.
1147 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1148 struct mm_struct *mm;
1151 return; /* Presumably process exited. */
1153 /* Take a safe reference to the mm_struct, which may otherwise
1154 * disappear even while the kfd_process is still referenced.
1156 mm = get_task_mm(p->lead_thread);
1158 kfd_unref_process(p);
1159 return; /* Process is exiting */
1162 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1163 if (unlikely(user_gpu_id == -EINVAL)) {
1164 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1167 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1170 vma = find_vma(mm, address);
1172 memory_exception_data.gpu_id = user_gpu_id;
1173 memory_exception_data.va = address;
1174 /* Set failure reason */
1175 memory_exception_data.failure.NotPresent = 1;
1176 memory_exception_data.failure.NoExecute = 0;
1177 memory_exception_data.failure.ReadOnly = 0;
1178 if (vma && address >= vma->vm_start) {
1179 memory_exception_data.failure.NotPresent = 0;
1181 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
1182 memory_exception_data.failure.ReadOnly = 1;
1184 memory_exception_data.failure.ReadOnly = 0;
1186 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
1187 memory_exception_data.failure.NoExecute = 1;
1189 memory_exception_data.failure.NoExecute = 0;
1192 mmap_read_unlock(mm);
1195 pr_debug("notpresent %d, noexecute %d, readonly %d\n",
1196 memory_exception_data.failure.NotPresent,
1197 memory_exception_data.failure.NoExecute,
1198 memory_exception_data.failure.ReadOnly);
1200 /* Workaround on Raven to not kill the process when memory is freed
1201 * before IOMMU is able to finish processing all the excessive PPRs
1204 if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) &&
1205 KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) &&
1206 KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0))
1207 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
1208 &memory_exception_data);
1210 kfd_unref_process(p);
1212 #endif /* KFD_SUPPORT_IOMMU_V2 */
1214 void kfd_signal_hw_exception_event(u32 pasid)
1217 * Because we are called from arbitrary context (workqueue) as opposed
1218 * to process context, kfd_process could attempt to exit while we are
1219 * running so the lookup function increments the process ref count.
1221 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1224 return; /* Presumably process exited. */
1226 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
1227 kfd_unref_process(p);
1230 void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
1231 struct kfd_vm_fault_info *info,
1232 struct kfd_hsa_memory_exception_data *data)
1234 struct kfd_event *ev;
1236 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1237 struct kfd_hsa_memory_exception_data memory_exception_data;
1241 return; /* Presumably process exited. */
1243 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1244 if (unlikely(user_gpu_id == -EINVAL)) {
1245 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1249 /* SoC15 chips and onwards will pass in data from now on. */
1251 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1252 memory_exception_data.gpu_id = user_gpu_id;
1253 memory_exception_data.failure.imprecise = true;
1255 /* Set failure reason */
1257 memory_exception_data.va = (info->page_addr) <<
1259 memory_exception_data.failure.NotPresent =
1260 info->prot_valid ? 1 : 0;
1261 memory_exception_data.failure.NoExecute =
1262 info->prot_exec ? 1 : 0;
1263 memory_exception_data.failure.ReadOnly =
1264 info->prot_write ? 1 : 0;
1265 memory_exception_data.failure.imprecise = 0;
1271 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1272 idr_for_each_entry_continue(&p->event_idr, ev, id)
1273 if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1274 spin_lock(&ev->lock);
1275 ev->memory_exception_data = data ? *data :
1276 memory_exception_data;
1278 spin_unlock(&ev->lock);
1282 kfd_unref_process(p);
1285 void kfd_signal_reset_event(struct kfd_node *dev)
1287 struct kfd_hsa_hw_exception_data hw_exception_data;
1288 struct kfd_hsa_memory_exception_data memory_exception_data;
1289 struct kfd_process *p;
1290 struct kfd_event *ev;
1293 int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
1294 KFD_HW_EXCEPTION_ECC :
1295 KFD_HW_EXCEPTION_GPU_HANG;
1297 /* Whole gpu reset caused by GPU hang and memory is lost */
1298 memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1299 hw_exception_data.memory_lost = 1;
1300 hw_exception_data.reset_cause = reset_cause;
1302 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1303 memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
1304 memory_exception_data.failure.imprecise = true;
1306 idx = srcu_read_lock(&kfd_processes_srcu);
1307 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1308 int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1310 if (unlikely(user_gpu_id == -EINVAL)) {
1311 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1317 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1318 idr_for_each_entry_continue(&p->event_idr, ev, id) {
1319 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1320 spin_lock(&ev->lock);
1321 ev->hw_exception_data = hw_exception_data;
1322 ev->hw_exception_data.gpu_id = user_gpu_id;
1324 spin_unlock(&ev->lock);
1326 if (ev->type == KFD_EVENT_TYPE_MEMORY &&
1327 reset_cause == KFD_HW_EXCEPTION_ECC) {
1328 spin_lock(&ev->lock);
1329 ev->memory_exception_data = memory_exception_data;
1330 ev->memory_exception_data.gpu_id = user_gpu_id;
1332 spin_unlock(&ev->lock);
1338 srcu_read_unlock(&kfd_processes_srcu, idx);
1341 void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
1343 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1344 struct kfd_hsa_memory_exception_data memory_exception_data;
1345 struct kfd_hsa_hw_exception_data hw_exception_data;
1346 struct kfd_event *ev;
1347 uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1351 return; /* Presumably process exited. */
1353 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
1354 if (unlikely(user_gpu_id == -EINVAL)) {
1355 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
1359 memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1360 hw_exception_data.gpu_id = user_gpu_id;
1361 hw_exception_data.memory_lost = 1;
1362 hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC;
1364 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1365 memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED;
1366 memory_exception_data.gpu_id = user_gpu_id;
1367 memory_exception_data.failure.imprecise = true;
1371 idr_for_each_entry_continue(&p->event_idr, ev, id) {
1372 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1373 spin_lock(&ev->lock);
1374 ev->hw_exception_data = hw_exception_data;
1376 spin_unlock(&ev->lock);
1379 if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1380 spin_lock(&ev->lock);
1381 ev->memory_exception_data = memory_exception_data;
1383 spin_unlock(&ev->lock);
1389 /* user application will handle SIGBUS signal */
1390 send_sig(SIGBUS, p->lead_thread, 0);
1392 kfd_unref_process(p);