2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/mm.h>
28 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
32 #include "kfd_events.h"
33 #include "kfd_iommu.h"
34 #include <linux/device.h>
37 * Wrapper around wait_queue_entry_t
39 struct kfd_event_waiter {
40 wait_queue_entry_t wait;
41 struct kfd_event *event; /* Event to wait for */
42 bool activated; /* Becomes true when event is signaled */
46 * Each signal event needs a 64-bit signal slot where the signaler will write
47 * a 1 before sending an interrupt. (This is needed because some interrupts
48 * do not contain enough spare data bits to identify an event.)
49 * We get whole pages and map them to the process VA.
50 * Individual signal events use their event_id as slot index.
52 struct kfd_signal_page {
53 uint64_t *kernel_address;
54 uint64_t __user *user_address;
55 bool need_to_free_pages;
59 static uint64_t *page_slots(struct kfd_signal_page *page)
61 return page->kernel_address;
64 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
67 struct kfd_signal_page *page;
69 page = kzalloc(sizeof(*page), GFP_KERNEL);
73 backing_store = (void *) __get_free_pages(GFP_KERNEL,
74 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
76 goto fail_alloc_signal_store;
78 /* Initialize all events to unsignaled */
79 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
80 KFD_SIGNAL_EVENT_LIMIT * 8);
82 page->kernel_address = backing_store;
83 page->need_to_free_pages = true;
84 pr_debug("Allocated new event signal page at %p, for process %p\n",
89 fail_alloc_signal_store:
94 static int allocate_event_notification_slot(struct kfd_process *p,
99 if (!p->signal_page) {
100 p->signal_page = allocate_signal_page(p);
103 /* Oldest user mode expects 256 event slots */
104 p->signal_mapped_size = 256*8;
108 * Compatibility with old user mode: Only use signal slots
109 * user mode has mapped, may be less than
110 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
111 * of the event limit without breaking user mode.
113 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
119 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
125 * Assumes that p->event_mutex is held and of course that p is not going
126 * away (current or locked).
128 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
130 return idr_find(&p->event_idr, id);
134 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
135 * @p: Pointer to struct kfd_process
137 * @bits: Number of valid bits in @id
139 * Finds the first signaled event with a matching partial ID. If no
140 * matching signaled event is found, returns NULL. In that case the
141 * caller should assume that the partial ID is invalid and do an
142 * exhaustive search of all siglaned events.
144 * If multiple events with the same partial ID signal at the same
145 * time, they will be found one interrupt at a time, not necessarily
146 * in the same order the interrupts occurred. As long as the number of
147 * interrupts is correct, all signaled events will be seen by the
150 static struct kfd_event *lookup_signaled_event_by_partial_id(
151 struct kfd_process *p, uint32_t id, uint32_t bits)
153 struct kfd_event *ev;
155 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
158 /* Fast path for the common case that @id is not a partial ID
159 * and we only need a single lookup.
161 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
162 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
165 return idr_find(&p->event_idr, id);
168 /* General case for partial IDs: Iterate over all matching IDs
169 * and find the first one that has signaled.
171 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
172 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
175 ev = idr_find(&p->event_idr, id);
181 static int create_signal_event(struct file *devkfd,
182 struct kfd_process *p,
183 struct kfd_event *ev)
187 if (p->signal_mapped_size &&
188 p->signal_event_count == p->signal_mapped_size / 8) {
189 if (!p->signal_event_limit_reached) {
190 pr_warn("Signal event wasn't created because limit was reached\n");
191 p->signal_event_limit_reached = true;
196 ret = allocate_event_notification_slot(p, ev);
198 pr_warn("Signal event wasn't created because out of kernel memory\n");
202 p->signal_event_count++;
204 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
205 pr_debug("Signal event number %zu created with id %d, address %p\n",
206 p->signal_event_count, ev->event_id,
207 ev->user_signal_address);
212 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
214 /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
215 * intentional integer overflow to -1 without a compiler
216 * warning. idr_alloc treats a negative value as "maximum
219 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
220 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
230 void kfd_event_init_process(struct kfd_process *p)
232 mutex_init(&p->event_mutex);
233 idr_init(&p->event_idr);
234 p->signal_page = NULL;
235 p->signal_event_count = 0;
238 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
240 struct kfd_event_waiter *waiter;
242 /* Wake up pending waiters. They will return failure */
243 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
244 waiter->event = NULL;
245 wake_up_all(&ev->wq);
247 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
248 ev->type == KFD_EVENT_TYPE_DEBUG)
249 p->signal_event_count--;
251 idr_remove(&p->event_idr, ev->event_id);
255 static void destroy_events(struct kfd_process *p)
257 struct kfd_event *ev;
260 idr_for_each_entry(&p->event_idr, ev, id)
261 destroy_event(p, ev);
262 idr_destroy(&p->event_idr);
266 * We assume that the process is being destroyed and there is no need to
267 * unmap the pages or keep bookkeeping data in order.
269 static void shutdown_signal_page(struct kfd_process *p)
271 struct kfd_signal_page *page = p->signal_page;
274 if (page->need_to_free_pages)
275 free_pages((unsigned long)page->kernel_address,
276 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
281 void kfd_event_free_process(struct kfd_process *p)
284 shutdown_signal_page(p);
287 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
289 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
290 ev->type == KFD_EVENT_TYPE_DEBUG;
293 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
295 return ev->type == KFD_EVENT_TYPE_SIGNAL;
298 int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
301 struct kfd_signal_page *page;
306 page = kzalloc(sizeof(*page), GFP_KERNEL);
310 /* Initialize all events to unsignaled */
311 memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
312 KFD_SIGNAL_EVENT_LIMIT * 8);
314 page->kernel_address = kernel_address;
316 p->signal_page = page;
317 p->signal_mapped_size = size;
322 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
323 uint32_t event_type, bool auto_reset, uint32_t node_id,
324 uint32_t *event_id, uint32_t *event_trigger_data,
325 uint64_t *event_page_offset, uint32_t *event_slot_index)
328 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
333 ev->type = event_type;
334 ev->auto_reset = auto_reset;
335 ev->signaled = false;
337 init_waitqueue_head(&ev->wq);
339 *event_page_offset = 0;
341 mutex_lock(&p->event_mutex);
343 switch (event_type) {
344 case KFD_EVENT_TYPE_SIGNAL:
345 case KFD_EVENT_TYPE_DEBUG:
346 ret = create_signal_event(devkfd, p, ev);
348 *event_page_offset = KFD_MMAP_TYPE_EVENTS;
349 *event_page_offset <<= PAGE_SHIFT;
350 *event_slot_index = ev->event_id;
354 ret = create_other_event(p, ev);
359 *event_id = ev->event_id;
360 *event_trigger_data = ev->event_id;
365 mutex_unlock(&p->event_mutex);
370 /* Assumes that p is current. */
371 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
373 struct kfd_event *ev;
376 mutex_lock(&p->event_mutex);
378 ev = lookup_event_by_id(p, event_id);
381 destroy_event(p, ev);
385 mutex_unlock(&p->event_mutex);
389 static void set_event(struct kfd_event *ev)
391 struct kfd_event_waiter *waiter;
393 /* Auto reset if the list is non-empty and we're waking
394 * someone. waitqueue_active is safe here because we're
395 * protected by the p->event_mutex, which is also held when
396 * updating the wait queues in kfd_wait_on_events.
398 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
400 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
401 waiter->activated = true;
403 wake_up_all(&ev->wq);
406 /* Assumes that p is current. */
407 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
410 struct kfd_event *ev;
412 mutex_lock(&p->event_mutex);
414 ev = lookup_event_by_id(p, event_id);
416 if (ev && event_can_be_cpu_signaled(ev))
421 mutex_unlock(&p->event_mutex);
425 static void reset_event(struct kfd_event *ev)
427 ev->signaled = false;
430 /* Assumes that p is current. */
431 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
434 struct kfd_event *ev;
436 mutex_lock(&p->event_mutex);
438 ev = lookup_event_by_id(p, event_id);
440 if (ev && event_can_be_cpu_signaled(ev))
445 mutex_unlock(&p->event_mutex);
450 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
452 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
455 static void set_event_from_interrupt(struct kfd_process *p,
456 struct kfd_event *ev)
458 if (ev && event_can_be_gpu_signaled(ev)) {
459 acknowledge_signal(p, ev);
464 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
465 uint32_t valid_id_bits)
467 struct kfd_event *ev = NULL;
470 * Because we are called from arbitrary context (workqueue) as opposed
471 * to process context, kfd_process could attempt to exit while we are
472 * running so the lookup function increments the process ref count.
474 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
477 return; /* Presumably process exited. */
479 mutex_lock(&p->event_mutex);
482 ev = lookup_signaled_event_by_partial_id(p, partial_id,
485 set_event_from_interrupt(p, ev);
486 } else if (p->signal_page) {
488 * Partial ID lookup failed. Assume that the event ID
489 * in the interrupt payload was invalid and do an
490 * exhaustive search of signaled events.
492 uint64_t *slots = page_slots(p->signal_page);
496 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
497 partial_id, valid_id_bits);
499 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
500 /* With relatively few events, it's faster to
501 * iterate over the event IDR
503 idr_for_each_entry(&p->event_idr, ev, id) {
504 if (id >= KFD_SIGNAL_EVENT_LIMIT)
507 if (slots[id] != UNSIGNALED_EVENT_SLOT)
508 set_event_from_interrupt(p, ev);
511 /* With relatively many events, it's faster to
512 * iterate over the signal slots and lookup
513 * only signaled events from the IDR.
515 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
516 if (slots[id] != UNSIGNALED_EVENT_SLOT) {
517 ev = lookup_event_by_id(p, id);
518 set_event_from_interrupt(p, ev);
523 mutex_unlock(&p->event_mutex);
524 kfd_unref_process(p);
527 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
529 struct kfd_event_waiter *event_waiters;
532 event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
537 for (i = 0; i < num_events; i++)
538 init_wait(&event_waiters[i].wait);
540 return event_waiters;
543 static int init_event_waiter_get_status(struct kfd_process *p,
544 struct kfd_event_waiter *waiter,
547 struct kfd_event *ev = lookup_event_by_id(p, event_id);
553 waiter->activated = ev->signaled;
554 ev->signaled = ev->signaled && !ev->auto_reset;
559 static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
561 struct kfd_event *ev = waiter->event;
563 /* Only add to the wait list if we actually need to
564 * wait on this event.
566 if (!waiter->activated)
567 add_wait_queue(&ev->wq, &waiter->wait);
570 /* test_event_condition - Test condition of events being waited for
571 * @all: Return completion only if all events have signaled
572 * @num_events: Number of events to wait for
573 * @event_waiters: Array of event waiters, one per event
575 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
576 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
577 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
578 * the events have been destroyed.
580 static uint32_t test_event_condition(bool all, uint32_t num_events,
581 struct kfd_event_waiter *event_waiters)
584 uint32_t activated_count = 0;
586 for (i = 0; i < num_events; i++) {
587 if (!event_waiters[i].event)
588 return KFD_IOC_WAIT_RESULT_FAIL;
590 if (event_waiters[i].activated) {
592 return KFD_IOC_WAIT_RESULT_COMPLETE;
598 return activated_count == num_events ?
599 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
603 * Copy event specific data, if defined.
604 * Currently only memory exception events have additional data to copy to user
606 static int copy_signaled_event_data(uint32_t num_events,
607 struct kfd_event_waiter *event_waiters,
608 struct kfd_event_data __user *data)
610 struct kfd_hsa_memory_exception_data *src;
611 struct kfd_hsa_memory_exception_data __user *dst;
612 struct kfd_event_waiter *waiter;
613 struct kfd_event *event;
616 for (i = 0; i < num_events; i++) {
617 waiter = &event_waiters[i];
618 event = waiter->event;
619 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
620 dst = &data[i].memory_exception_data;
621 src = &event->memory_exception_data;
622 if (copy_to_user(dst, src,
623 sizeof(struct kfd_hsa_memory_exception_data)))
634 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
636 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
639 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
640 return MAX_SCHEDULE_TIMEOUT;
643 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
644 * but we consider them finite.
645 * This hack is wrong, but nobody is likely to notice.
647 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
649 return msecs_to_jiffies(user_timeout_ms) + 1;
652 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
656 for (i = 0; i < num_events; i++)
657 if (waiters[i].event)
658 remove_wait_queue(&waiters[i].event->wq,
664 int kfd_wait_on_events(struct kfd_process *p,
665 uint32_t num_events, void __user *data,
666 bool all, uint32_t user_timeout_ms,
667 uint32_t *wait_result)
669 struct kfd_event_data __user *events =
670 (struct kfd_event_data __user *) data;
674 struct kfd_event_waiter *event_waiters = NULL;
675 long timeout = user_timeout_to_jiffies(user_timeout_ms);
677 event_waiters = alloc_event_waiters(num_events);
678 if (!event_waiters) {
683 mutex_lock(&p->event_mutex);
685 for (i = 0; i < num_events; i++) {
686 struct kfd_event_data event_data;
688 if (copy_from_user(&event_data, &events[i],
689 sizeof(struct kfd_event_data))) {
694 ret = init_event_waiter_get_status(p, &event_waiters[i],
695 event_data.event_id);
700 /* Check condition once. */
701 *wait_result = test_event_condition(all, num_events, event_waiters);
702 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
703 ret = copy_signaled_event_data(num_events,
704 event_waiters, events);
706 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
707 /* This should not happen. Events shouldn't be
708 * destroyed while we're holding the event_mutex
713 /* Add to wait lists if we need to wait. */
714 for (i = 0; i < num_events; i++)
715 init_event_waiter_add_to_waitlist(&event_waiters[i]);
717 mutex_unlock(&p->event_mutex);
720 if (fatal_signal_pending(current)) {
725 if (signal_pending(current)) {
727 * This is wrong when a nonzero, non-infinite timeout
728 * is specified. We need to use
729 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
730 * contains a union with data for each user and it's
731 * in generic kernel code that I don't want to
738 /* Set task state to interruptible sleep before
739 * checking wake-up conditions. A concurrent wake-up
740 * will put the task back into runnable state. In that
741 * case schedule_timeout will not put the task to
742 * sleep and we'll get a chance to re-check the
743 * updated conditions almost immediately. Otherwise,
744 * this race condition would lead to a soft hang or a
747 set_current_state(TASK_INTERRUPTIBLE);
749 *wait_result = test_event_condition(all, num_events,
751 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
757 timeout = schedule_timeout(timeout);
759 __set_current_state(TASK_RUNNING);
761 /* copy_signaled_event_data may sleep. So this has to happen
762 * after the task state is set back to RUNNING.
764 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
765 ret = copy_signaled_event_data(num_events,
766 event_waiters, events);
768 mutex_lock(&p->event_mutex);
770 free_waiters(num_events, event_waiters);
771 mutex_unlock(&p->event_mutex);
774 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
775 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
781 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
784 struct kfd_signal_page *page;
787 /* check required size doesn't exceed the allocated size */
788 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
789 get_order(vma->vm_end - vma->vm_start)) {
790 pr_err("Event page mmap requested illegal size\n");
794 page = p->signal_page;
796 /* Probably KFD bug, but mmap is user-accessible. */
797 pr_debug("Signal page could not be found\n");
801 pfn = __pa(page->kernel_address);
804 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
805 | VM_DONTDUMP | VM_PFNMAP;
807 pr_debug("Mapping signal page\n");
808 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
809 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
810 pr_debug(" pfn == 0x%016lX\n", pfn);
811 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
812 pr_debug(" size == 0x%08lX\n",
813 vma->vm_end - vma->vm_start);
815 page->user_address = (uint64_t __user *)vma->vm_start;
817 /* mapping the page to user process */
818 ret = remap_pfn_range(vma, vma->vm_start, pfn,
819 vma->vm_end - vma->vm_start, vma->vm_page_prot);
821 p->signal_mapped_size = vma->vm_end - vma->vm_start;
827 * Assumes that p->event_mutex is held and of course
828 * that p is not going away (current or locked).
830 static void lookup_events_by_type_and_signal(struct kfd_process *p,
831 int type, void *event_data)
833 struct kfd_hsa_memory_exception_data *ev_data;
834 struct kfd_event *ev;
836 bool send_signal = true;
838 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
840 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
841 idr_for_each_entry_continue(&p->event_idr, ev, id)
842 if (ev->type == type) {
845 "Event found: id %X type %d",
846 ev->event_id, ev->type);
848 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
849 ev->memory_exception_data = *ev_data;
852 if (type == KFD_EVENT_TYPE_MEMORY) {
854 "Sending SIGSEGV to HSA Process with PID %d ",
855 p->lead_thread->pid);
856 send_sig(SIGSEGV, p->lead_thread, 0);
859 /* Send SIGTERM no event of type "type" has been found*/
863 "Sending SIGTERM to HSA Process with PID %d ",
864 p->lead_thread->pid);
865 send_sig(SIGTERM, p->lead_thread, 0);
868 "HSA Process (PID %d) got unhandled exception",
869 p->lead_thread->pid);
874 #ifdef KFD_SUPPORT_IOMMU_V2
875 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
876 unsigned long address, bool is_write_requested,
877 bool is_execute_requested)
879 struct kfd_hsa_memory_exception_data memory_exception_data;
880 struct vm_area_struct *vma;
883 * Because we are called from arbitrary context (workqueue) as opposed
884 * to process context, kfd_process could attempt to exit while we are
885 * running so the lookup function increments the process ref count.
887 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
888 struct mm_struct *mm;
891 return; /* Presumably process exited. */
893 /* Take a safe reference to the mm_struct, which may otherwise
894 * disappear even while the kfd_process is still referenced.
896 mm = get_task_mm(p->lead_thread);
898 kfd_unref_process(p);
899 return; /* Process is exiting */
902 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
904 down_read(&mm->mmap_sem);
905 vma = find_vma(mm, address);
907 memory_exception_data.gpu_id = dev->id;
908 memory_exception_data.va = address;
909 /* Set failure reason */
910 memory_exception_data.failure.NotPresent = 1;
911 memory_exception_data.failure.NoExecute = 0;
912 memory_exception_data.failure.ReadOnly = 0;
913 if (vma && address >= vma->vm_start) {
914 memory_exception_data.failure.NotPresent = 0;
916 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
917 memory_exception_data.failure.ReadOnly = 1;
919 memory_exception_data.failure.ReadOnly = 0;
921 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
922 memory_exception_data.failure.NoExecute = 1;
924 memory_exception_data.failure.NoExecute = 0;
927 up_read(&mm->mmap_sem);
930 pr_debug("notpresent %d, noexecute %d, readonly %d\n",
931 memory_exception_data.failure.NotPresent,
932 memory_exception_data.failure.NoExecute,
933 memory_exception_data.failure.ReadOnly);
935 /* Workaround on Raven to not kill the process when memory is freed
936 * before IOMMU is able to finish processing all the excessive PPRs
938 if (dev->device_info->asic_family != CHIP_RAVEN) {
939 mutex_lock(&p->event_mutex);
941 /* Lookup events by type and signal them */
942 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
943 &memory_exception_data);
945 mutex_unlock(&p->event_mutex);
948 kfd_unref_process(p);
950 #endif /* KFD_SUPPORT_IOMMU_V2 */
952 void kfd_signal_hw_exception_event(unsigned int pasid)
955 * Because we are called from arbitrary context (workqueue) as opposed
956 * to process context, kfd_process could attempt to exit while we are
957 * running so the lookup function increments the process ref count.
959 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
962 return; /* Presumably process exited. */
964 mutex_lock(&p->event_mutex);
966 /* Lookup events by type and signal them */
967 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
969 mutex_unlock(&p->event_mutex);
970 kfd_unref_process(p);
973 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
974 struct kfd_vm_fault_info *info)
976 struct kfd_event *ev;
978 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
979 struct kfd_hsa_memory_exception_data memory_exception_data;
982 return; /* Presumably process exited. */
983 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
984 memory_exception_data.gpu_id = dev->id;
985 memory_exception_data.failure.imprecise = 1;
986 /* Set failure reason */
988 memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
989 memory_exception_data.failure.NotPresent =
990 info->prot_valid ? 1 : 0;
991 memory_exception_data.failure.NoExecute =
992 info->prot_exec ? 1 : 0;
993 memory_exception_data.failure.ReadOnly =
994 info->prot_write ? 1 : 0;
995 memory_exception_data.failure.imprecise = 0;
997 mutex_lock(&p->event_mutex);
999 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1000 idr_for_each_entry_continue(&p->event_idr, ev, id)
1001 if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1002 ev->memory_exception_data = memory_exception_data;
1006 mutex_unlock(&p->event_mutex);
1007 kfd_unref_process(p);
1010 void kfd_signal_reset_event(struct kfd_dev *dev)
1012 struct kfd_hsa_hw_exception_data hw_exception_data;
1013 struct kfd_process *p;
1014 struct kfd_event *ev;
1018 /* Whole gpu reset caused by GPU hang and memory is lost */
1019 memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1020 hw_exception_data.gpu_id = dev->id;
1021 hw_exception_data.memory_lost = 1;
1023 idx = srcu_read_lock(&kfd_processes_srcu);
1024 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1025 mutex_lock(&p->event_mutex);
1026 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1027 idr_for_each_entry_continue(&p->event_idr, ev, id)
1028 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1029 ev->hw_exception_data = hw_exception_data;
1032 mutex_unlock(&p->event_mutex);
1034 srcu_read_unlock(&kfd_processes_srcu, idx);