Lines Matching +full:signal +full:- +full:id
26 #include <linux/sched/signal.h>
46 * Each signal event needs a 64-bit signal slot where the signaler will write
50 * Individual signal events use their event_id as slot index.
61 return page->kernel_address; in page_slots()
82 page->kernel_address = backing_store; in allocate_signal_page()
83 page->need_to_free_pages = true; in allocate_signal_page()
84 pr_debug("Allocated new event signal page at %p, for process %p\n", in allocate_signal_page()
97 int id; in allocate_event_notification_slot() local
99 if (!p->signal_page) { in allocate_event_notification_slot()
100 p->signal_page = allocate_signal_page(p); in allocate_event_notification_slot()
101 if (!p->signal_page) in allocate_event_notification_slot()
102 return -ENOMEM; in allocate_event_notification_slot()
104 p->signal_mapped_size = 256*8; in allocate_event_notification_slot()
108 * Compatibility with old user mode: Only use signal slots in allocate_event_notification_slot()
113 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8, in allocate_event_notification_slot()
115 if (id < 0) in allocate_event_notification_slot()
116 return id; in allocate_event_notification_slot()
118 ev->event_id = id; in allocate_event_notification_slot()
119 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT; in allocate_event_notification_slot()
125 * Assumes that p->event_mutex is held and of course that p is not going
128 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) in lookup_event_by_id() argument
130 return idr_find(&p->event_idr, id); in lookup_event_by_id()
134 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
136 * @id: ID to look up
137 * @bits: Number of valid bits in @id
139 * Finds the first signaled event with a matching partial ID. If no
141 * caller should assume that the partial ID is invalid and do an
144 * If multiple events with the same partial ID signal at the same
151 struct kfd_process *p, uint32_t id, uint32_t bits) in lookup_signaled_event_by_partial_id() argument
155 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT) in lookup_signaled_event_by_partial_id()
158 /* Fast path for the common case that @id is not a partial ID in lookup_signaled_event_by_partial_id()
162 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
165 return idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
171 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) { in lookup_signaled_event_by_partial_id()
172 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
175 ev = idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
187 if (p->signal_mapped_size && in create_signal_event()
188 p->signal_event_count == p->signal_mapped_size / 8) { in create_signal_event()
189 if (!p->signal_event_limit_reached) { in create_signal_event()
190 pr_debug("Signal event wasn't created because limit was reached\n"); in create_signal_event()
191 p->signal_event_limit_reached = true; in create_signal_event()
193 return -ENOSPC; in create_signal_event()
198 pr_warn("Signal event wasn't created because out of kernel memory\n"); in create_signal_event()
202 p->signal_event_count++; in create_signal_event()
204 ev->user_signal_address = &p->signal_page->user_address[ev->event_id]; in create_signal_event()
205 pr_debug("Signal event number %zu created with id %d, address %p\n", in create_signal_event()
206 p->signal_event_count, ev->event_id, in create_signal_event()
207 ev->user_signal_address); in create_signal_event()
215 * intentional integer overflow to -1 without a compiler in create_other_event()
219 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID, in create_other_event() local
223 if (id < 0) in create_other_event()
224 return id; in create_other_event()
225 ev->event_id = id; in create_other_event()
232 mutex_init(&p->event_mutex); in kfd_event_init_process()
233 idr_init(&p->event_idr); in kfd_event_init_process()
234 p->signal_page = NULL; in kfd_event_init_process()
235 p->signal_event_count = 0; in kfd_event_init_process()
243 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in destroy_event()
244 waiter->event = NULL; in destroy_event()
245 wake_up_all(&ev->wq); in destroy_event()
247 if (ev->type == KFD_EVENT_TYPE_SIGNAL || in destroy_event()
248 ev->type == KFD_EVENT_TYPE_DEBUG) in destroy_event()
249 p->signal_event_count--; in destroy_event()
251 idr_remove(&p->event_idr, ev->event_id); in destroy_event()
258 uint32_t id; in destroy_events() local
260 idr_for_each_entry(&p->event_idr, ev, id) in destroy_events()
262 idr_destroy(&p->event_idr); in destroy_events()
271 struct kfd_signal_page *page = p->signal_page; in shutdown_signal_page()
274 if (page->need_to_free_pages) in shutdown_signal_page()
275 free_pages((unsigned long)page->kernel_address, in shutdown_signal_page()
289 return ev->type == KFD_EVENT_TYPE_SIGNAL || in event_can_be_gpu_signaled()
290 ev->type == KFD_EVENT_TYPE_DEBUG; in event_can_be_gpu_signaled()
295 return ev->type == KFD_EVENT_TYPE_SIGNAL; in event_can_be_cpu_signaled()
303 if (p->signal_page) in kfd_event_page_set()
304 return -EBUSY; in kfd_event_page_set()
308 return -ENOMEM; in kfd_event_page_set()
314 page->kernel_address = kernel_address; in kfd_event_page_set()
316 p->signal_page = page; in kfd_event_page_set()
317 p->signal_mapped_size = size; in kfd_event_page_set()
331 return -ENOMEM; in kfd_event_create()
333 ev->type = event_type; in kfd_event_create()
334 ev->auto_reset = auto_reset; in kfd_event_create()
335 ev->signaled = false; in kfd_event_create()
337 init_waitqueue_head(&ev->wq); in kfd_event_create()
341 mutex_lock(&p->event_mutex); in kfd_event_create()
349 *event_slot_index = ev->event_id; in kfd_event_create()
358 *event_id = ev->event_id; in kfd_event_create()
359 *event_trigger_data = ev->event_id; in kfd_event_create()
364 mutex_unlock(&p->event_mutex); in kfd_event_create()
375 mutex_lock(&p->event_mutex); in kfd_event_destroy()
382 ret = -EINVAL; in kfd_event_destroy()
384 mutex_unlock(&p->event_mutex); in kfd_event_destroy()
392 /* Auto reset if the list is non-empty and we're waking in set_event()
394 * protected by the p->event_mutex, which is also held when in set_event()
397 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); in set_event()
399 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in set_event()
400 waiter->activated = true; in set_event()
402 wake_up_all(&ev->wq); in set_event()
411 mutex_lock(&p->event_mutex); in kfd_set_event()
418 ret = -EINVAL; in kfd_set_event()
420 mutex_unlock(&p->event_mutex); in kfd_set_event()
426 ev->signaled = false; in reset_event()
435 mutex_lock(&p->event_mutex); in kfd_reset_event()
442 ret = -EINVAL; in kfd_reset_event()
444 mutex_unlock(&p->event_mutex); in kfd_reset_event()
451 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT; in acknowledge_signal()
478 mutex_lock(&p->event_mutex); in kfd_signal_event_interrupt()
485 } else if (p->signal_page) { in kfd_signal_event_interrupt()
487 * Partial ID lookup failed. Assume that the event ID in kfd_signal_event_interrupt()
491 uint64_t *slots = page_slots(p->signal_page); in kfd_signal_event_interrupt()
492 uint32_t id; in kfd_signal_event_interrupt() local
495 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", in kfd_signal_event_interrupt()
498 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { in kfd_signal_event_interrupt()
502 idr_for_each_entry(&p->event_idr, ev, id) { in kfd_signal_event_interrupt()
503 if (id >= KFD_SIGNAL_EVENT_LIMIT) in kfd_signal_event_interrupt()
506 if (slots[id] != UNSIGNALED_EVENT_SLOT) in kfd_signal_event_interrupt()
511 * iterate over the signal slots and lookup in kfd_signal_event_interrupt()
514 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++) in kfd_signal_event_interrupt()
515 if (slots[id] != UNSIGNALED_EVENT_SLOT) { in kfd_signal_event_interrupt()
516 ev = lookup_event_by_id(p, id); in kfd_signal_event_interrupt()
522 mutex_unlock(&p->event_mutex); in kfd_signal_event_interrupt()
550 return -EINVAL; in init_event_waiter_get_status()
552 waiter->event = ev; in init_event_waiter_get_status()
553 waiter->activated = ev->signaled; in init_event_waiter_get_status()
554 ev->signaled = ev->signaled && !ev->auto_reset; in init_event_waiter_get_status()
561 struct kfd_event *ev = waiter->event; in init_event_waiter_add_to_waitlist()
566 if (!waiter->activated) in init_event_waiter_add_to_waitlist()
567 add_wait_queue(&ev->wq, &waiter->wait); in init_event_waiter_add_to_waitlist()
570 /* test_event_condition - Test condition of events being waited for
618 event = waiter->event; in copy_signaled_event_data()
619 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { in copy_signaled_event_data()
621 src = &event->memory_exception_data; in copy_signaled_event_data()
624 return -EFAULT; in copy_signaled_event_data()
643 * msecs_to_jiffies interprets all values above 2^31-1 as infinite, in user_timeout_to_jiffies()
658 remove_wait_queue(&waiters[i].event->wq, in free_waiters()
679 ret = -ENOMEM; in kfd_wait_on_events()
683 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
690 ret = -EFAULT; in kfd_wait_on_events()
717 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
721 ret = -EINTR; in kfd_wait_on_events()
727 * This is wrong when a nonzero, non-infinite timeout in kfd_wait_on_events()
734 ret = -ERESTARTSYS; in kfd_wait_on_events()
739 * checking wake-up conditions. A concurrent wake-up in kfd_wait_on_events()
742 * sleep and we'll get a chance to re-check the in kfd_wait_on_events()
768 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
771 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
776 ret = -EIO; in kfd_wait_on_events()
789 get_order(vma->vm_end - vma->vm_start)) { in kfd_event_mmap()
791 return -EINVAL; in kfd_event_mmap()
794 page = p->signal_page; in kfd_event_mmap()
796 /* Probably KFD bug, but mmap is user-accessible. */ in kfd_event_mmap()
797 pr_debug("Signal page could not be found\n"); in kfd_event_mmap()
798 return -EINVAL; in kfd_event_mmap()
801 pfn = __pa(page->kernel_address); in kfd_event_mmap()
804 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE in kfd_event_mmap()
807 pr_debug("Mapping signal page\n"); in kfd_event_mmap()
808 pr_debug(" start user address == 0x%08lx\n", vma->vm_start); in kfd_event_mmap()
809 pr_debug(" end user address == 0x%08lx\n", vma->vm_end); in kfd_event_mmap()
811 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags); in kfd_event_mmap()
813 vma->vm_end - vma->vm_start); in kfd_event_mmap()
815 page->user_address = (uint64_t __user *)vma->vm_start; in kfd_event_mmap()
818 ret = remap_pfn_range(vma, vma->vm_start, pfn, in kfd_event_mmap()
819 vma->vm_end - vma->vm_start, vma->vm_page_prot); in kfd_event_mmap()
821 p->signal_mapped_size = vma->vm_end - vma->vm_start; in kfd_event_mmap()
827 * Assumes that p->event_mutex is held and of course
835 uint32_t id; in lookup_events_by_type_and_signal() local
840 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in lookup_events_by_type_and_signal()
841 idr_for_each_entry_continue(&p->event_idr, ev, id) in lookup_events_by_type_and_signal()
842 if (ev->type == type) { in lookup_events_by_type_and_signal()
845 "Event found: id %X type %d", in lookup_events_by_type_and_signal()
846 ev->event_id, ev->type); in lookup_events_by_type_and_signal()
848 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) in lookup_events_by_type_and_signal()
849 ev->memory_exception_data = *ev_data; in lookup_events_by_type_and_signal()
855 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
856 send_sig(SIGSEGV, p->lead_thread, 0); in lookup_events_by_type_and_signal()
864 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
865 send_sig(SIGTERM, p->lead_thread, 0); in lookup_events_by_type_and_signal()
869 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
896 mm = get_task_mm(p->lead_thread); in kfd_signal_iommu_event()
907 memory_exception_data.gpu_id = dev->id; in kfd_signal_iommu_event()
913 if (vma && address >= vma->vm_start) { in kfd_signal_iommu_event()
916 if (is_write_requested && !(vma->vm_flags & VM_WRITE)) in kfd_signal_iommu_event()
921 if (is_execute_requested && !(vma->vm_flags & VM_EXEC)) in kfd_signal_iommu_event()
938 if (dev->device_info->asic_family != CHIP_RAVEN && in kfd_signal_iommu_event()
939 dev->device_info->asic_family != CHIP_RENOIR) { in kfd_signal_iommu_event()
940 mutex_lock(&p->event_mutex); in kfd_signal_iommu_event()
942 /* Lookup events by type and signal them */ in kfd_signal_iommu_event()
946 mutex_unlock(&p->event_mutex); in kfd_signal_iommu_event()
965 mutex_lock(&p->event_mutex); in kfd_signal_hw_exception_event()
967 /* Lookup events by type and signal them */ in kfd_signal_hw_exception_event()
970 mutex_unlock(&p->event_mutex); in kfd_signal_hw_exception_event()
978 uint32_t id; in kfd_signal_vm_fault_event() local
985 memory_exception_data.gpu_id = dev->id; in kfd_signal_vm_fault_event()
989 memory_exception_data.va = (info->page_addr) << PAGE_SHIFT; in kfd_signal_vm_fault_event()
991 info->prot_valid ? 1 : 0; in kfd_signal_vm_fault_event()
993 info->prot_exec ? 1 : 0; in kfd_signal_vm_fault_event()
995 info->prot_write ? 1 : 0; in kfd_signal_vm_fault_event()
998 mutex_lock(&p->event_mutex); in kfd_signal_vm_fault_event()
1000 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_vm_fault_event()
1001 idr_for_each_entry_continue(&p->event_idr, ev, id) in kfd_signal_vm_fault_event()
1002 if (ev->type == KFD_EVENT_TYPE_MEMORY) { in kfd_signal_vm_fault_event()
1003 ev->memory_exception_data = memory_exception_data; in kfd_signal_vm_fault_event()
1007 mutex_unlock(&p->event_mutex); in kfd_signal_vm_fault_event()
1018 uint32_t id, idx; in kfd_signal_reset_event() local
1019 int reset_cause = atomic_read(&dev->sram_ecc_flag) ? in kfd_signal_reset_event()
1025 hw_exception_data.gpu_id = dev->id; in kfd_signal_reset_event()
1031 memory_exception_data.gpu_id = dev->id; in kfd_signal_reset_event()
1036 mutex_lock(&p->event_mutex); in kfd_signal_reset_event()
1037 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_reset_event()
1038 idr_for_each_entry_continue(&p->event_idr, ev, id) { in kfd_signal_reset_event()
1039 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in kfd_signal_reset_event()
1040 ev->hw_exception_data = hw_exception_data; in kfd_signal_reset_event()
1043 if (ev->type == KFD_EVENT_TYPE_MEMORY && in kfd_signal_reset_event()
1045 ev->memory_exception_data = memory_exception_data; in kfd_signal_reset_event()
1049 mutex_unlock(&p->event_mutex); in kfd_signal_reset_event()