Lines Matching +full:signal +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
27 #include <linux/sched/signal.h>
43 bool event_age_enabled; /* set to true when last_event_age is non-zero */
47 * Each signal event needs a 64-bit signal slot where the signaler will write
51 * Individual signal events use their event_id as slot index.
61 return page->kernel_address; in page_slots()
82 page->kernel_address = backing_store; in allocate_signal_page()
83 page->need_to_free_pages = true; in allocate_signal_page()
84 pr_debug("Allocated new event signal page at %p, for process %p\n", in allocate_signal_page()
98 int id; in allocate_event_notification_slot() local
100 if (!p->signal_page) { in allocate_event_notification_slot()
101 p->signal_page = allocate_signal_page(p); in allocate_event_notification_slot()
102 if (!p->signal_page) in allocate_event_notification_slot()
103 return -ENOMEM; in allocate_event_notification_slot()
105 p->signal_mapped_size = 256*8; in allocate_event_notification_slot()
109 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, in allocate_event_notification_slot()
113 * Compatibility with old user mode: Only use signal slots in allocate_event_notification_slot()
118 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8, in allocate_event_notification_slot()
121 if (id < 0) in allocate_event_notification_slot()
122 return id; in allocate_event_notification_slot()
124 ev->event_id = id; in allocate_event_notification_slot()
125 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT; in allocate_event_notification_slot()
131 * Assumes that p->event_mutex or rcu_readlock is held and of course that p is
134 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) in lookup_event_by_id() argument
136 return idr_find(&p->event_idr, id); in lookup_event_by_id()
140 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
142 * @id: ID to look up
143 * @bits: Number of valid bits in @id
145 * Finds the first signaled event with a matching partial ID. If no
147 * caller should assume that the partial ID is invalid and do an
150 * If multiple events with the same partial ID signal at the same
157 struct kfd_process *p, uint32_t id, uint32_t bits) in lookup_signaled_event_by_partial_id() argument
161 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT) in lookup_signaled_event_by_partial_id()
164 /* Fast path for the common case that @id is not a partial ID in lookup_signaled_event_by_partial_id()
168 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
171 return idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
177 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) { in lookup_signaled_event_by_partial_id()
178 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
181 ev = idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
192 if (p->signal_mapped_size && in create_signal_event()
193 p->signal_event_count == p->signal_mapped_size / 8) { in create_signal_event()
194 if (!p->signal_event_limit_reached) { in create_signal_event()
195 pr_debug("Signal event wasn't created because limit was reached\n"); in create_signal_event()
196 p->signal_event_limit_reached = true; in create_signal_event()
198 return -ENOSPC; in create_signal_event()
203 pr_warn("Signal event wasn't created because out of kernel memory\n"); in create_signal_event()
207 p->signal_event_count++; in create_signal_event()
209 ev->user_signal_address = &p->signal_page->user_address[ev->event_id]; in create_signal_event()
210 pr_debug("Signal event number %zu created with id %d, address %p\n", in create_signal_event()
211 p->signal_event_count, ev->event_id, in create_signal_event()
212 ev->user_signal_address); in create_signal_event()
219 int id; in create_other_event() local
222 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, in create_other_event()
226 * intentional integer overflow to -1 without a compiler in create_other_event()
230 id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID, in create_other_event()
234 if (id < 0) in create_other_event()
235 return id; in create_other_event()
236 ev->event_id = id; in create_other_event()
243 int id; in kfd_event_init_process() local
245 mutex_init(&p->event_mutex); in kfd_event_init_process()
246 idr_init(&p->event_idr); in kfd_event_init_process()
247 p->signal_page = NULL; in kfd_event_init_process()
248 p->signal_event_count = 1; in kfd_event_init_process()
249 /* Allocate event ID 0. It is used for a fast path to ignore bogus events in kfd_event_init_process()
250 * that are sent by the CP without a context ID in kfd_event_init_process()
252 id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL); in kfd_event_init_process()
253 if (id < 0) { in kfd_event_init_process()
254 idr_destroy(&p->event_idr); in kfd_event_init_process()
255 mutex_destroy(&p->event_mutex); in kfd_event_init_process()
256 return id; in kfd_event_init_process()
266 spin_lock(&ev->lock); in destroy_event()
267 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in destroy_event()
268 WRITE_ONCE(waiter->event, NULL); in destroy_event()
269 wake_up_all(&ev->wq); in destroy_event()
270 spin_unlock(&ev->lock); in destroy_event()
272 if (ev->type == KFD_EVENT_TYPE_SIGNAL || in destroy_event()
273 ev->type == KFD_EVENT_TYPE_DEBUG) in destroy_event()
274 p->signal_event_count--; in destroy_event()
276 idr_remove(&p->event_idr, ev->event_id); in destroy_event()
283 uint32_t id; in destroy_events() local
285 idr_for_each_entry(&p->event_idr, ev, id) in destroy_events()
288 idr_destroy(&p->event_idr); in destroy_events()
289 mutex_destroy(&p->event_mutex); in destroy_events()
298 struct kfd_signal_page *page = p->signal_page; in shutdown_signal_page()
301 if (page->need_to_free_pages) in shutdown_signal_page()
302 free_pages((unsigned long)page->kernel_address, in shutdown_signal_page()
316 return ev->type == KFD_EVENT_TYPE_SIGNAL || in event_can_be_gpu_signaled()
317 ev->type == KFD_EVENT_TYPE_DEBUG; in event_can_be_gpu_signaled()
322 return ev->type == KFD_EVENT_TYPE_SIGNAL; in event_can_be_cpu_signaled()
330 if (p->signal_page) in kfd_event_page_set()
331 return -EBUSY; in kfd_event_page_set()
335 return -ENOMEM; in kfd_event_page_set()
341 page->kernel_address = kernel_address; in kfd_event_page_set()
343 p->signal_page = page; in kfd_event_page_set()
344 p->signal_mapped_size = size; in kfd_event_page_set()
345 p->signal_handle = user_handle; in kfd_event_page_set()
357 if (p->signal_page) { in kfd_kmap_event_page()
359 return -EINVAL; in kfd_kmap_event_page()
364 pr_err("Getting device by id failed in %s\n", __func__); in kfd_kmap_event_page()
365 return -EINVAL; in kfd_kmap_event_page()
367 kfd = pdd->dev; in kfd_kmap_event_page()
377 return -EINVAL; in kfd_kmap_event_page()
404 return -ENOMEM; in kfd_event_create()
406 ev->type = event_type; in kfd_event_create()
407 ev->auto_reset = auto_reset; in kfd_event_create()
408 ev->signaled = false; in kfd_event_create()
410 spin_lock_init(&ev->lock); in kfd_event_create()
411 init_waitqueue_head(&ev->wq); in kfd_event_create()
415 mutex_lock(&p->event_mutex); in kfd_event_create()
423 *event_slot_index = ev->event_id; in kfd_event_create()
432 *event_id = ev->event_id; in kfd_event_create()
433 *event_trigger_data = ev->event_id; in kfd_event_create()
434 ev->event_age = 1; in kfd_event_create()
439 mutex_unlock(&p->event_mutex); in kfd_event_create()
456 return -ENOMEM; in kfd_criu_restore_event()
460 ret = -ENOMEM; in kfd_criu_restore_event()
465 ret = -EINVAL; in kfd_criu_restore_event()
471 ret = -EFAULT; in kfd_criu_restore_event()
476 if (ev_priv->user_handle) { in kfd_criu_restore_event()
477 ret = kfd_kmap_event_page(p, ev_priv->user_handle); in kfd_criu_restore_event()
482 ev->type = ev_priv->type; in kfd_criu_restore_event()
483 ev->auto_reset = ev_priv->auto_reset; in kfd_criu_restore_event()
484 ev->signaled = ev_priv->signaled; in kfd_criu_restore_event()
486 spin_lock_init(&ev->lock); in kfd_criu_restore_event()
487 init_waitqueue_head(&ev->wq); in kfd_criu_restore_event()
489 mutex_lock(&p->event_mutex); in kfd_criu_restore_event()
490 switch (ev->type) { in kfd_criu_restore_event()
493 ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
496 memcpy(&ev->memory_exception_data, in kfd_criu_restore_event()
497 &ev_priv->memory_exception_data, in kfd_criu_restore_event()
500 ret = create_other_event(p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
503 memcpy(&ev->hw_exception_data, in kfd_criu_restore_event()
504 &ev_priv->hw_exception_data, in kfd_criu_restore_event()
507 ret = create_other_event(p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
510 mutex_unlock(&p->event_mutex); in kfd_criu_restore_event()
538 return -ENOMEM; in kfd_criu_checkpoint_events()
541 idr_for_each_entry(&p->event_idr, ev, ev_id) { in kfd_criu_checkpoint_events()
550 ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT; in kfd_criu_checkpoint_events()
553 if (i == 0 && p->signal_page) in kfd_criu_checkpoint_events()
554 ev_priv->user_handle = p->signal_handle; in kfd_criu_checkpoint_events()
556 ev_priv->event_id = ev->event_id; in kfd_criu_checkpoint_events()
557 ev_priv->auto_reset = ev->auto_reset; in kfd_criu_checkpoint_events()
558 ev_priv->type = ev->type; in kfd_criu_checkpoint_events()
559 ev_priv->signaled = ev->signaled; in kfd_criu_checkpoint_events()
561 if (ev_priv->type == KFD_EVENT_TYPE_MEMORY) in kfd_criu_checkpoint_events()
562 memcpy(&ev_priv->memory_exception_data, in kfd_criu_checkpoint_events()
563 &ev->memory_exception_data, in kfd_criu_checkpoint_events()
565 else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION) in kfd_criu_checkpoint_events()
566 memcpy(&ev_priv->hw_exception_data, in kfd_criu_checkpoint_events()
567 &ev->hw_exception_data, in kfd_criu_checkpoint_events()
570 pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n", in kfd_criu_checkpoint_events()
572 ev_priv->event_id, in kfd_criu_checkpoint_events()
573 ev_priv->auto_reset, in kfd_criu_checkpoint_events()
574 ev_priv->type, in kfd_criu_checkpoint_events()
575 ev_priv->signaled); in kfd_criu_checkpoint_events()
583 ret = -EFAULT; in kfd_criu_checkpoint_events()
595 uint32_t id; in kfd_get_num_events() local
598 idr_for_each_entry(&p->event_idr, ev, id) in kfd_get_num_events()
610 mutex_lock(&p->event_mutex); in kfd_event_destroy()
617 ret = -EINVAL; in kfd_event_destroy()
619 mutex_unlock(&p->event_mutex); in kfd_event_destroy()
627 /* Auto reset if the list is non-empty and we're waking in set_event()
629 * protected by the ev->lock, which is also held when in set_event()
632 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); in set_event()
633 if (!(++ev->event_age)) { in set_event()
635 ev->event_age = 2; in set_event()
639 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in set_event()
640 WRITE_ONCE(waiter->activated, true); in set_event()
642 wake_up_all(&ev->wq); in set_event()
655 ret = -EINVAL; in kfd_set_event()
658 spin_lock(&ev->lock); in kfd_set_event()
663 ret = -EINVAL; in kfd_set_event()
665 spin_unlock(&ev->lock); in kfd_set_event()
673 ev->signaled = false; in reset_event()
686 ret = -EINVAL; in kfd_reset_event()
689 spin_lock(&ev->lock); in kfd_reset_event()
694 ret = -EINVAL; in kfd_reset_event()
696 spin_unlock(&ev->lock); in kfd_reset_event()
705 WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT); in acknowledge_signal()
713 spin_lock(&ev->lock); in set_event_from_interrupt()
715 spin_unlock(&ev->lock); in set_event_from_interrupt()
741 } else if (p->signal_page) { in kfd_signal_event_interrupt()
743 * Partial ID lookup failed. Assume that the event ID in kfd_signal_event_interrupt()
747 uint64_t *slots = page_slots(p->signal_page); in kfd_signal_event_interrupt()
748 uint32_t id; in kfd_signal_event_interrupt() local
751 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", in kfd_signal_event_interrupt()
754 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { in kfd_signal_event_interrupt()
758 idr_for_each_entry(&p->event_idr, ev, id) { in kfd_signal_event_interrupt()
759 if (id >= KFD_SIGNAL_EVENT_LIMIT) in kfd_signal_event_interrupt()
762 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) in kfd_signal_event_interrupt()
767 * iterate over the signal slots and lookup in kfd_signal_event_interrupt()
770 for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++) in kfd_signal_event_interrupt()
771 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) { in kfd_signal_event_interrupt()
772 ev = lookup_event_by_id(p, id); in kfd_signal_event_interrupt()
802 struct kfd_event *ev = lookup_event_by_id(p, event_data->event_id); in init_event_waiter()
805 return -EINVAL; in init_event_waiter()
807 spin_lock(&ev->lock); in init_event_waiter()
808 waiter->event = ev; in init_event_waiter()
809 waiter->activated = ev->signaled; in init_event_waiter()
810 ev->signaled = ev->signaled && !ev->auto_reset; in init_event_waiter()
813 if (waiter->event->type == KFD_EVENT_TYPE_SIGNAL && in init_event_waiter()
814 event_data->signal_event_data.last_event_age) { in init_event_waiter()
815 waiter->event_age_enabled = true; in init_event_waiter()
816 if (ev->event_age != event_data->signal_event_data.last_event_age) in init_event_waiter()
817 waiter->activated = true; in init_event_waiter()
820 if (!waiter->activated) in init_event_waiter()
821 add_wait_queue(&ev->wq, &waiter->wait); in init_event_waiter()
822 spin_unlock(&ev->lock); in init_event_waiter()
827 /* test_event_condition - Test condition of events being waited for
875 event = waiter->event; in copy_signaled_event_data()
877 return -EINVAL; /* event was destroyed */ in copy_signaled_event_data()
878 if (waiter->activated) { in copy_signaled_event_data()
879 if (event->type == KFD_EVENT_TYPE_MEMORY) { in copy_signaled_event_data()
881 src = &event->memory_exception_data; in copy_signaled_event_data()
883 } else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in copy_signaled_event_data()
885 src = &event->hw_exception_data; in copy_signaled_event_data()
887 } else if (event->type == KFD_EVENT_TYPE_SIGNAL && in copy_signaled_event_data()
888 waiter->event_age_enabled) { in copy_signaled_event_data()
890 src = &event->event_age; in copy_signaled_event_data()
894 return -EFAULT; in copy_signaled_event_data()
910 * msecs_to_jiffies interprets all values above 2^31-1 as infinite, in user_timeout_to_jiffies()
926 spin_lock(&waiters[i].event->lock); in free_waiters()
927 remove_wait_queue(&waiters[i].event->wq, in free_waiters()
930 waiters[i].event && waiters[i].event->auto_reset) in free_waiters()
932 spin_unlock(&waiters[i].event->lock); in free_waiters()
953 ret = -ENOMEM; in kfd_wait_on_events()
957 /* Use p->event_mutex here to protect against concurrent creation and in kfd_wait_on_events()
960 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
967 ret = -EFAULT; in kfd_wait_on_events()
989 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
993 ret = -EINTR; in kfd_wait_on_events()
998 ret = -ERESTARTSYS; in kfd_wait_on_events()
1002 max(0l, timeout-1)); in kfd_wait_on_events()
1007 * checking wake-up conditions. A concurrent wake-up in kfd_wait_on_events()
1010 * sleep and we'll get a chance to re-check the in kfd_wait_on_events()
1029 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
1035 * still exists. Therefore this must be under the p->event_mutex in kfd_wait_on_events()
1043 free_waiters(num_events, event_waiters, ret == -ERESTARTSYS); in kfd_wait_on_events()
1044 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
1049 ret = -EIO; in kfd_wait_on_events()
1062 get_order(vma->vm_end - vma->vm_start)) { in kfd_event_mmap()
1064 return -EINVAL; in kfd_event_mmap()
1067 page = p->signal_page; in kfd_event_mmap()
1069 /* Probably KFD bug, but mmap is user-accessible. */ in kfd_event_mmap()
1070 pr_debug("Signal page could not be found\n"); in kfd_event_mmap()
1071 return -EINVAL; in kfd_event_mmap()
1074 pfn = __pa(page->kernel_address); in kfd_event_mmap()
1080 pr_debug("Mapping signal page\n"); in kfd_event_mmap()
1081 pr_debug(" start user address == 0x%08lx\n", vma->vm_start); in kfd_event_mmap()
1082 pr_debug(" end user address == 0x%08lx\n", vma->vm_end); in kfd_event_mmap()
1084 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags); in kfd_event_mmap()
1086 vma->vm_end - vma->vm_start); in kfd_event_mmap()
1088 page->user_address = (uint64_t __user *)vma->vm_start; in kfd_event_mmap()
1091 ret = remap_pfn_range(vma, vma->vm_start, pfn, in kfd_event_mmap()
1092 vma->vm_end - vma->vm_start, vma->vm_page_prot); in kfd_event_mmap()
1094 p->signal_mapped_size = vma->vm_end - vma->vm_start; in kfd_event_mmap()
1107 uint32_t id; in lookup_events_by_type_and_signal() local
1114 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in lookup_events_by_type_and_signal()
1115 idr_for_each_entry_continue(&p->event_idr, ev, id) in lookup_events_by_type_and_signal()
1116 if (ev->type == type) { in lookup_events_by_type_and_signal()
1119 "Event found: id %X type %d", in lookup_events_by_type_and_signal()
1120 ev->event_id, ev->type); in lookup_events_by_type_and_signal()
1121 spin_lock(&ev->lock); in lookup_events_by_type_and_signal()
1123 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) in lookup_events_by_type_and_signal()
1124 ev->memory_exception_data = *ev_data; in lookup_events_by_type_and_signal()
1125 spin_unlock(&ev->lock); in lookup_events_by_type_and_signal()
1131 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1132 send_sig(SIGSEGV, p->lead_thread, 0); in lookup_events_by_type_and_signal()
1140 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1141 send_sig(SIGTERM, p->lead_thread, 0); in lookup_events_by_type_and_signal()
1145 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1173 uint32_t id; in kfd_signal_vm_fault_event() local
1181 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_vm_fault_event()
1182 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_vm_fault_event()
1183 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_vm_fault_event()
1195 memory_exception_data.va = (info->page_addr) << in kfd_signal_vm_fault_event()
1198 info->prot_valid ? 1 : 0; in kfd_signal_vm_fault_event()
1200 info->prot_exec ? 1 : 0; in kfd_signal_vm_fault_event()
1202 info->prot_write ? 1 : 0; in kfd_signal_vm_fault_event()
1209 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_vm_fault_event()
1210 idr_for_each_entry_continue(&p->event_idr, ev, id) in kfd_signal_vm_fault_event()
1211 if (ev->type == KFD_EVENT_TYPE_MEMORY) { in kfd_signal_vm_fault_event()
1212 spin_lock(&ev->lock); in kfd_signal_vm_fault_event()
1213 ev->memory_exception_data = data ? *data : in kfd_signal_vm_fault_event()
1216 spin_unlock(&ev->lock); in kfd_signal_vm_fault_event()
1230 uint32_t id, idx; in kfd_signal_reset_event() local
1231 int reset_cause = atomic_read(&dev->sram_ecc_flag) ? in kfd_signal_reset_event()
1246 int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_reset_event()
1248 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_reset_event()
1249 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_reset_event()
1255 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_reset_event()
1256 idr_for_each_entry_continue(&p->event_idr, ev, id) { in kfd_signal_reset_event()
1257 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in kfd_signal_reset_event()
1258 spin_lock(&ev->lock); in kfd_signal_reset_event()
1259 ev->hw_exception_data = hw_exception_data; in kfd_signal_reset_event()
1260 ev->hw_exception_data.gpu_id = user_gpu_id; in kfd_signal_reset_event()
1262 spin_unlock(&ev->lock); in kfd_signal_reset_event()
1264 if (ev->type == KFD_EVENT_TYPE_MEMORY && in kfd_signal_reset_event()
1266 spin_lock(&ev->lock); in kfd_signal_reset_event()
1267 ev->memory_exception_data = memory_exception_data; in kfd_signal_reset_event()
1268 ev->memory_exception_data.gpu_id = user_gpu_id; in kfd_signal_reset_event()
1270 spin_unlock(&ev->lock); in kfd_signal_reset_event()
1285 uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_poison_consumed_event() local
1291 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_poison_consumed_event()
1292 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_poison_consumed_event()
1293 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_poison_consumed_event()
1309 idr_for_each_entry_continue(&p->event_idr, ev, id) { in kfd_signal_poison_consumed_event()
1310 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in kfd_signal_poison_consumed_event()
1311 spin_lock(&ev->lock); in kfd_signal_poison_consumed_event()
1312 ev->hw_exception_data = hw_exception_data; in kfd_signal_poison_consumed_event()
1314 spin_unlock(&ev->lock); in kfd_signal_poison_consumed_event()
1317 if (ev->type == KFD_EVENT_TYPE_MEMORY) { in kfd_signal_poison_consumed_event()
1318 spin_lock(&ev->lock); in kfd_signal_poison_consumed_event()
1319 ev->memory_exception_data = memory_exception_data; in kfd_signal_poison_consumed_event()
1321 spin_unlock(&ev->lock); in kfd_signal_poison_consumed_event()
1327 /* user application will handle SIGBUS signal */ in kfd_signal_poison_consumed_event()
1328 send_sig(SIGBUS, p->lead_thread, 0); in kfd_signal_poison_consumed_event()