Lines Matching refs:args
176 struct kfd_ioctl_get_version_args *args = data; in kfd_ioctl_get_version() local
178 args->major_version = KFD_IOCTL_MAJOR_VERSION; in kfd_ioctl_get_version()
179 args->minor_version = KFD_IOCTL_MINOR_VERSION; in kfd_ioctl_get_version()
185 struct kfd_ioctl_create_queue_args *args) in set_queue_properties_from_user() argument
192 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { in set_queue_properties_from_user()
197 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in set_queue_properties_from_user()
202 if ((args->ring_base_address) && in set_queue_properties_from_user()
203 (!access_ok((const void __user *) args->ring_base_address, in set_queue_properties_from_user()
209 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in set_queue_properties_from_user()
214 if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { in set_queue_properties_from_user()
215 args->ring_size = KFD_MIN_QUEUE_RING_SIZE; in set_queue_properties_from_user()
219 if (!access_ok((const void __user *) args->read_pointer_address, in set_queue_properties_from_user()
225 if (!access_ok((const void __user *) args->write_pointer_address, in set_queue_properties_from_user()
231 if (args->eop_buffer_address && in set_queue_properties_from_user()
232 !access_ok((const void __user *) args->eop_buffer_address, in set_queue_properties_from_user()
238 if (args->ctx_save_restore_address && in set_queue_properties_from_user()
239 !access_ok((const void __user *) args->ctx_save_restore_address, in set_queue_properties_from_user()
247 q_properties->queue_percent = args->queue_percentage & 0xFF; in set_queue_properties_from_user()
249 q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; in set_queue_properties_from_user()
250 q_properties->priority = args->queue_priority; in set_queue_properties_from_user()
251 q_properties->queue_address = args->ring_base_address; in set_queue_properties_from_user()
252 q_properties->queue_size = args->ring_size; in set_queue_properties_from_user()
253 q_properties->read_ptr = (void __user *)args->read_pointer_address; in set_queue_properties_from_user()
254 q_properties->write_ptr = (void __user *)args->write_pointer_address; in set_queue_properties_from_user()
255 q_properties->eop_ring_buffer_address = args->eop_buffer_address; in set_queue_properties_from_user()
256 q_properties->eop_ring_buffer_size = args->eop_buffer_size; in set_queue_properties_from_user()
258 args->ctx_save_restore_address; in set_queue_properties_from_user()
259 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; in set_queue_properties_from_user()
260 q_properties->ctl_stack_size = args->ctl_stack_size; in set_queue_properties_from_user()
261 q_properties->sdma_engine_id = args->sdma_engine_id; in set_queue_properties_from_user()
262 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || in set_queue_properties_from_user()
263 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
265 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) in set_queue_properties_from_user()
267 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) in set_queue_properties_from_user()
269 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID) in set_queue_properties_from_user()
274 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
280 q_properties->queue_percent, args->queue_percentage); in set_queue_properties_from_user()
283 q_properties->priority, args->queue_priority); in set_queue_properties_from_user()
286 q_properties->queue_address, args->ring_base_address); in set_queue_properties_from_user()
289 q_properties->queue_size, args->ring_size); in set_queue_properties_from_user()
308 struct kfd_ioctl_create_queue_args *args = data; in kfd_ioctl_create_queue() local
320 err = set_queue_properties_from_user(&q_properties, args); in kfd_ioctl_create_queue()
324 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
328 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_create_queue()
330 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
377 args->queue_id = queue_id; in kfd_ioctl_create_queue()
381 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; in kfd_ioctl_create_queue()
382 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_create_queue()
387 args->doorbell_offset |= doorbell_offset_in_process; in kfd_ioctl_create_queue()
391 pr_debug("Queue id %d was created successfully\n", args->queue_id); in kfd_ioctl_create_queue()
394 args->ring_base_address); in kfd_ioctl_create_queue()
397 args->read_pointer_address); in kfd_ioctl_create_queue()
400 args->write_pointer_address); in kfd_ioctl_create_queue()
420 struct kfd_ioctl_destroy_queue_args *args = data; in kfd_ioctl_destroy_queue() local
423 args->queue_id, in kfd_ioctl_destroy_queue()
428 retval = pqm_destroy_queue(&p->pqm, args->queue_id); in kfd_ioctl_destroy_queue()
438 struct kfd_ioctl_update_queue_args *args = data; in kfd_ioctl_update_queue() local
446 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { in kfd_ioctl_update_queue()
451 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in kfd_ioctl_update_queue()
456 if ((args->ring_base_address) && in kfd_ioctl_update_queue()
457 (!access_ok((const void __user *) args->ring_base_address, in kfd_ioctl_update_queue()
463 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in kfd_ioctl_update_queue()
468 if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { in kfd_ioctl_update_queue()
469 args->ring_size = KFD_MIN_QUEUE_RING_SIZE; in kfd_ioctl_update_queue()
473 properties.queue_address = args->ring_base_address; in kfd_ioctl_update_queue()
474 properties.queue_size = args->ring_size; in kfd_ioctl_update_queue()
475 properties.queue_percent = args->queue_percentage & 0xFF; in kfd_ioctl_update_queue()
477 properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; in kfd_ioctl_update_queue()
478 properties.priority = args->queue_priority; in kfd_ioctl_update_queue()
481 args->queue_id, p->lead_thread->pid); in kfd_ioctl_update_queue()
485 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); in kfd_ioctl_update_queue()
497 struct kfd_ioctl_set_cu_mask_args *args = data; in kfd_ioctl_set_cu_mask() local
499 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; in kfd_ioctl_set_cu_mask()
500 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); in kfd_ioctl_set_cu_mask()
502 if ((args->num_cu_mask % 32) != 0) { in kfd_ioctl_set_cu_mask()
504 args->num_cu_mask); in kfd_ioctl_set_cu_mask()
508 minfo.cu_mask.count = args->num_cu_mask; in kfd_ioctl_set_cu_mask()
532 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); in kfd_ioctl_set_cu_mask()
543 struct kfd_ioctl_get_queue_wave_state_args *args = data; in kfd_ioctl_get_queue_wave_state() local
548 r = pqm_get_wave_state(&p->pqm, args->queue_id, in kfd_ioctl_get_queue_wave_state()
549 (void __user *)args->ctl_stack_address, in kfd_ioctl_get_queue_wave_state()
550 &args->ctl_stack_used_size, in kfd_ioctl_get_queue_wave_state()
551 &args->save_area_used_size); in kfd_ioctl_get_queue_wave_state()
561 struct kfd_ioctl_set_memory_policy_args *args = data; in kfd_ioctl_set_memory_policy() local
566 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
567 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
571 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
572 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
577 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_memory_policy()
579 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_set_memory_policy()
590 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
594 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
601 (void __user *)args->alternate_aperture_base, in kfd_ioctl_set_memory_policy()
602 args->alternate_aperture_size, in kfd_ioctl_set_memory_policy()
603 args->misc_process_flag)) in kfd_ioctl_set_memory_policy()
616 struct kfd_ioctl_set_trap_handler_args *args = data; in kfd_ioctl_set_trap_handler() local
622 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_trap_handler()
634 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler()
671 struct kfd_ioctl_get_clock_counters_args *args = data; in kfd_ioctl_get_clock_counters() local
675 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_clock_counters()
679 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); in kfd_ioctl_get_clock_counters()
682 args->gpu_clock_counter = 0; in kfd_ioctl_get_clock_counters()
685 args->cpu_clock_counter = ktime_get_raw_ns(); in kfd_ioctl_get_clock_counters()
686 args->system_clock_counter = ktime_get_boottime_ns(); in kfd_ioctl_get_clock_counters()
689 args->system_clock_freq = 1000000000; in kfd_ioctl_get_clock_counters()
698 struct kfd_ioctl_get_process_apertures_args *args = data; in kfd_ioctl_get_process_apertures() local
704 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures()
712 &args->process_apertures[args->num_of_nodes]; in kfd_ioctl_get_process_apertures()
722 "node id %u\n", args->num_of_nodes); in kfd_ioctl_get_process_apertures()
738 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS) in kfd_ioctl_get_process_apertures()
749 struct kfd_ioctl_get_process_apertures_new_args *args = data; in kfd_ioctl_get_process_apertures_new() local
757 if (args->num_of_nodes == 0) { in kfd_ioctl_get_process_apertures_new()
762 args->num_of_nodes = p->n_pdds; in kfd_ioctl_get_process_apertures_new()
770 pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), in kfd_ioctl_get_process_apertures_new()
778 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures_new()
784 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) { in kfd_ioctl_get_process_apertures_new()
812 args->num_of_nodes = i; in kfd_ioctl_get_process_apertures_new()
814 (void __user *)args->kfd_process_device_apertures_ptr, in kfd_ioctl_get_process_apertures_new()
828 struct kfd_ioctl_create_event_args *args = data; in kfd_ioctl_create_event() local
835 if (args->event_page_offset) { in kfd_ioctl_create_event()
837 err = kfd_kmap_event_page(p, args->event_page_offset); in kfd_ioctl_create_event()
843 err = kfd_event_create(filp, p, args->event_type, in kfd_ioctl_create_event()
844 args->auto_reset != 0, args->node_id, in kfd_ioctl_create_event()
845 &args->event_id, &args->event_trigger_data, in kfd_ioctl_create_event()
846 &args->event_page_offset, in kfd_ioctl_create_event()
847 &args->event_slot_index); in kfd_ioctl_create_event()
849 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__); in kfd_ioctl_create_event()
856 struct kfd_ioctl_destroy_event_args *args = data; in kfd_ioctl_destroy_event() local
858 return kfd_event_destroy(p, args->event_id); in kfd_ioctl_destroy_event()
864 struct kfd_ioctl_set_event_args *args = data; in kfd_ioctl_set_event() local
866 return kfd_set_event(p, args->event_id); in kfd_ioctl_set_event()
872 struct kfd_ioctl_reset_event_args *args = data; in kfd_ioctl_reset_event() local
874 return kfd_reset_event(p, args->event_id); in kfd_ioctl_reset_event()
880 struct kfd_ioctl_wait_events_args *args = data; in kfd_ioctl_wait_events() local
882 return kfd_wait_on_events(p, args->num_events, in kfd_ioctl_wait_events()
883 (void __user *)args->events_ptr, in kfd_ioctl_wait_events()
884 (args->wait_for_all != 0), in kfd_ioctl_wait_events()
885 &args->timeout, &args->wait_result); in kfd_ioctl_wait_events()
890 struct kfd_ioctl_set_scratch_backing_va_args *args = data; in kfd_ioctl_set_scratch_backing_va() local
896 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
909 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
916 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
929 struct kfd_ioctl_get_tile_config_args *args = data; in kfd_ioctl_get_tile_config() local
935 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_tile_config()
942 args->gb_addr_config = config.gb_addr_config; in kfd_ioctl_get_tile_config()
943 args->num_banks = config.num_banks; in kfd_ioctl_get_tile_config()
944 args->num_ranks = config.num_ranks; in kfd_ioctl_get_tile_config()
946 if (args->num_tile_configs > config.num_tile_configs) in kfd_ioctl_get_tile_config()
947 args->num_tile_configs = config.num_tile_configs; in kfd_ioctl_get_tile_config()
948 err = copy_to_user((void __user *)args->tile_config_ptr, in kfd_ioctl_get_tile_config()
950 args->num_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
952 args->num_tile_configs = 0; in kfd_ioctl_get_tile_config()
956 if (args->num_macro_tile_configs > config.num_macro_tile_configs) in kfd_ioctl_get_tile_config()
957 args->num_macro_tile_configs = in kfd_ioctl_get_tile_config()
959 err = copy_to_user((void __user *)args->macro_tile_config_ptr, in kfd_ioctl_get_tile_config()
961 args->num_macro_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
963 args->num_macro_tile_configs = 0; in kfd_ioctl_get_tile_config()
973 struct kfd_ioctl_acquire_vm_args *args = data; in kfd_ioctl_acquire_vm() local
978 drm_file = fget(args->drm_fd); in kfd_ioctl_acquire_vm()
983 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_acquire_vm()
1034 struct kfd_ioctl_get_available_memory_args *args = data; in kfd_ioctl_get_available_memory() local
1035 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); in kfd_ioctl_get_available_memory()
1039 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, in kfd_ioctl_get_available_memory()
1048 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; in kfd_ioctl_alloc_memory_of_gpu() local
1054 uint64_t offset = args->mmap_offset; in kfd_ioctl_alloc_memory_of_gpu()
1055 uint32_t flags = args->flags; in kfd_ioctl_alloc_memory_of_gpu()
1057 if (args->size == 0) in kfd_ioctl_alloc_memory_of_gpu()
1071 if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) && in kfd_ioctl_alloc_memory_of_gpu()
1073 args->va_addr >> PAGE_SHIFT, in kfd_ioctl_alloc_memory_of_gpu()
1074 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { in kfd_ioctl_alloc_memory_of_gpu()
1076 args->va_addr); in kfd_ioctl_alloc_memory_of_gpu()
1086 args->mmap_offset >> PAGE_SHIFT, in kfd_ioctl_alloc_memory_of_gpu()
1087 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) { in kfd_ioctl_alloc_memory_of_gpu()
1089 args->mmap_offset); in kfd_ioctl_alloc_memory_of_gpu()
1097 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1120 if (args->size != kfd_doorbell_process_slice(dev->kfd)) { in kfd_ioctl_alloc_memory_of_gpu()
1130 if (args->size != PAGE_SIZE) { in kfd_ioctl_alloc_memory_of_gpu()
1142 dev->adev, args->va_addr, args->size, in kfd_ioctl_alloc_memory_of_gpu()
1157 uint64_t size = args->size; in kfd_ioctl_alloc_memory_of_gpu()
1166 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_alloc_memory_of_gpu()
1167 args->mmap_offset = offset; in kfd_ioctl_alloc_memory_of_gpu()
1173 args->mmap_offset = KFD_MMAP_TYPE_MMIO in kfd_ioctl_alloc_memory_of_gpu()
1174 | KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1191 struct kfd_ioctl_free_memory_of_gpu_args *args = data; in kfd_ioctl_free_memory_of_gpu() local
1202 if (p->signal_handle && (p->signal_handle == args->handle)) { in kfd_ioctl_free_memory_of_gpu()
1208 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1216 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1230 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1243 struct kfd_ioctl_map_memory_to_gpu_args *args = data; in kfd_ioctl_map_memory_to_gpu() local
1251 if (!args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1255 if (args->n_success > args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1260 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_map_memory_to_gpu()
1266 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_map_memory_to_gpu()
1267 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_map_memory_to_gpu()
1274 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1288 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1294 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1324 args->n_success = i+1; in kfd_ioctl_map_memory_to_gpu()
1336 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1361 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; in kfd_ioctl_unmap_memory_from_gpu() local
1368 if (!args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1372 if (args->n_success > args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1377 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_unmap_memory_from_gpu()
1383 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_unmap_memory_from_gpu()
1384 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_unmap_memory_from_gpu()
1391 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1398 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1404 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1413 pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices); in kfd_ioctl_unmap_memory_from_gpu()
1416 args->n_success = i+1; in kfd_ioctl_unmap_memory_from_gpu()
1430 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1463 struct kfd_ioctl_alloc_queue_gws_args *args = data; in kfd_ioctl_alloc_queue_gws() local
1468 q = pqm_get_user_queue(&p->pqm, args->queue_id); in kfd_ioctl_alloc_queue_gws()
1493 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); in kfd_ioctl_alloc_queue_gws()
1496 args->first_gws = 0; in kfd_ioctl_alloc_queue_gws()
1507 struct kfd_ioctl_get_dmabuf_info_args *args = data; in kfd_ioctl_get_dmabuf_info() local
1523 if (args->metadata_ptr) { in kfd_ioctl_get_dmabuf_info()
1524 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); in kfd_ioctl_get_dmabuf_info()
1530 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, in kfd_ioctl_get_dmabuf_info()
1531 &dmabuf_adev, &args->size, in kfd_ioctl_get_dmabuf_info()
1532 metadata_buffer, args->metadata_size, in kfd_ioctl_get_dmabuf_info()
1533 &args->metadata_size, &flags, &xcp_id); in kfd_ioctl_get_dmabuf_info()
1538 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; in kfd_ioctl_get_dmabuf_info()
1540 args->gpu_id = dev->id; in kfd_ioctl_get_dmabuf_info()
1541 args->flags = flags; in kfd_ioctl_get_dmabuf_info()
1545 r = copy_to_user((void __user *)args->metadata_ptr, in kfd_ioctl_get_dmabuf_info()
1546 metadata_buffer, args->metadata_size); in kfd_ioctl_get_dmabuf_info()
1560 struct kfd_ioctl_import_dmabuf_args *args = data; in kfd_ioctl_import_dmabuf() local
1568 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_import_dmabuf()
1580 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd, in kfd_ioctl_import_dmabuf()
1581 args->va_addr, pdd->drm_priv, in kfd_ioctl_import_dmabuf()
1595 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_import_dmabuf()
1610 struct kfd_ioctl_export_dmabuf_args *args = data; in kfd_ioctl_export_dmabuf() local
1617 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_export_dmabuf()
1630 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_export_dmabuf()
1641 ret = dma_buf_fd(dmabuf, args->flags); in kfd_ioctl_export_dmabuf()
1649 args->dmabuf_fd = ret; in kfd_ioctl_export_dmabuf()
1663 struct kfd_ioctl_smi_events_args *args = data; in kfd_ioctl_smi_events() local
1668 pdd = kfd_process_device_data_by_id(p, args->gpuid); in kfd_ioctl_smi_events()
1673 return kfd_smi_event_open(pdd->dev, &args->anon_fd); in kfd_ioctl_smi_events()
1681 struct kfd_ioctl_set_xnack_mode_args *args = data; in kfd_ioctl_set_xnack_mode() local
1685 if (args->xnack_enabled >= 0) { in kfd_ioctl_set_xnack_mode()
1692 if (p->xnack_enabled == args->xnack_enabled) in kfd_ioctl_set_xnack_mode()
1695 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) { in kfd_ioctl_set_xnack_mode()
1700 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled); in kfd_ioctl_set_xnack_mode()
1702 args->xnack_enabled = p->xnack_enabled; in kfd_ioctl_set_xnack_mode()
1713 struct kfd_ioctl_svm_args *args = data; in kfd_ioctl_svm() local
1717 args->start_addr, args->size, args->op, args->nattr); in kfd_ioctl_svm()
1719 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK)) in kfd_ioctl_svm()
1721 if (!args->start_addr || !args->size) in kfd_ioctl_svm()
1724 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, in kfd_ioctl_svm()
1725 args->attrs); in kfd_ioctl_svm()
2058 struct kfd_ioctl_criu_args *args) in criu_checkpoint() argument
2064 if (!args->devices || !args->bos || !args->priv_data) in criu_checkpoint()
2087 if (num_devices != args->num_devices || in criu_checkpoint()
2088 num_bos != args->num_bos || in criu_checkpoint()
2089 num_objects != args->num_objects || in criu_checkpoint()
2090 priv_size != args->priv_data_size) { in criu_checkpoint()
2097 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset); in criu_checkpoint()
2101 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices, in criu_checkpoint()
2102 (uint8_t __user *)args->priv_data, &priv_offset); in criu_checkpoint()
2114 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data, in criu_checkpoint()
2119 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data, in criu_checkpoint()
2124 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset); in criu_checkpoint()
2132 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, in criu_checkpoint()
2133 (uint8_t __user *)args->priv_data, &bo_priv_offset); in criu_checkpoint()
2146 struct kfd_ioctl_criu_args *args, in criu_restore_process() argument
2157 (void __user *)(args->priv_data + *priv_offset), in criu_restore_process()
2187 struct kfd_ioctl_criu_args *args, in criu_restore_devices() argument
2196 if (args->num_devices != p->n_pdds) in criu_restore_devices()
2199 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size) in criu_restore_devices()
2202 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL); in criu_restore_devices()
2206 ret = copy_from_user(device_buckets, (void __user *)args->devices, in criu_restore_devices()
2207 args->num_devices * sizeof(*device_buckets)); in criu_restore_devices()
2214 for (i = 0; i < args->num_devices; i++) { in criu_restore_devices()
2287 *priv_offset += args->num_devices * sizeof(*device_privs); in criu_restore_devices()
2429 struct kfd_ioctl_criu_args *args, in criu_restore_bos() argument
2439 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size) in criu_restore_bos()
2445 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL); in criu_restore_bos()
2449 files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL); in criu_restore_bos()
2455 ret = copy_from_user(bo_buckets, (void __user *)args->bos, in criu_restore_bos()
2456 args->num_bos * sizeof(*bo_buckets)); in criu_restore_bos()
2463 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL); in criu_restore_bos()
2469 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset, in criu_restore_bos()
2470 args->num_bos * sizeof(*bo_privs)); in criu_restore_bos()
2476 *priv_offset += args->num_bos * sizeof(*bo_privs); in criu_restore_bos()
2479 for (; i < args->num_bos; i++) { in criu_restore_bos()
2488 ret = copy_to_user((void __user *)args->bos, in criu_restore_bos()
2490 (args->num_bos * sizeof(*bo_buckets))); in criu_restore_bos()
2504 struct kfd_ioctl_criu_args *args, in criu_restore_objects() argument
2515 for (i = 0; i < args->num_objects; i++) { in criu_restore_objects()
2523 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset)); in criu_restore_objects()
2531 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data, in criu_restore_objects()
2537 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data, in criu_restore_objects()
2543 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data, in criu_restore_objects()
2560 struct kfd_ioctl_criu_args *args) in criu_restore() argument
2566 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size); in criu_restore()
2568 if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data || in criu_restore()
2569 !args->priv_data_size || !args->num_devices) in criu_restore()
2583 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size); in criu_restore()
2587 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size); in criu_restore()
2591 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size); in criu_restore()
2595 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size); in criu_restore()
2599 if (priv_offset != args->priv_data_size) { in criu_restore()
2616 struct kfd_ioctl_criu_args *args) in criu_unpause() argument
2640 struct kfd_ioctl_criu_args *args) in criu_resume() argument
2647 args->pid); in criu_resume()
2649 pid = find_get_pid(args->pid); in criu_resume()
2651 pr_err("Cannot find pid info for %i\n", args->pid); in criu_resume()
2661 pr_debug("Cannot find process info for %i\n", args->pid); in criu_resume()
2668 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid); in criu_resume()
2674 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid); in criu_resume()
2685 struct kfd_ioctl_criu_args *args) in criu_process_info() argument
2703 args->pid = task_pid_nr_ns(p->lead_thread, in criu_process_info()
2706 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos, in criu_process_info()
2707 &args->num_objects, &args->priv_data_size); in criu_process_info()
2712 args->num_devices, args->num_bos, args->num_objects, in criu_process_info()
2713 args->priv_data_size); in criu_process_info()
2726 struct kfd_ioctl_criu_args *args = data; in kfd_ioctl_criu() local
2729 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op); in kfd_ioctl_criu()
2730 switch (args->op) { in kfd_ioctl_criu()
2732 ret = criu_process_info(filep, p, args); in kfd_ioctl_criu()
2735 ret = criu_checkpoint(filep, p, args); in kfd_ioctl_criu()
2738 ret = criu_unpause(filep, p, args); in kfd_ioctl_criu()
2741 ret = criu_restore(filep, p, args); in kfd_ioctl_criu()
2744 ret = criu_resume(filep, p, args); in kfd_ioctl_criu()
2747 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op); in kfd_ioctl_criu()
2753 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret); in kfd_ioctl_criu()
2887 struct kfd_ioctl_runtime_enable_args *args = data; in kfd_ioctl_runtime_enable() local
2892 if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK) in kfd_ioctl_runtime_enable()
2893 r = runtime_enable(p, args->r_debug, in kfd_ioctl_runtime_enable()
2894 !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK)); in kfd_ioctl_runtime_enable()
2905 struct kfd_ioctl_dbg_trap_args *args = data; in kfd_ioctl_set_debug_trap() local
2918 pid = find_get_pid(args->pid); in kfd_ioctl_set_debug_trap()
2920 pr_debug("Cannot find pid info for %i\n", args->pid); in kfd_ioctl_set_debug_trap()
2937 if (args->op == KFD_IOC_DBG_TRAP_ENABLE) { in kfd_ioctl_set_debug_trap()
2951 pr_debug("Cannot find process PID %i to debug\n", args->pid); in kfd_ioctl_set_debug_trap()
2959 if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE in kfd_ioctl_set_debug_trap()
2961 pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid); in kfd_ioctl_set_debug_trap()
2971 if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) { in kfd_ioctl_set_debug_trap()
2972 pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op); in kfd_ioctl_set_debug_trap()
2978 (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE || in kfd_ioctl_set_debug_trap()
2979 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE || in kfd_ioctl_set_debug_trap()
2980 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES || in kfd_ioctl_set_debug_trap()
2981 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES || in kfd_ioctl_set_debug_trap()
2982 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || in kfd_ioctl_set_debug_trap()
2983 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH || in kfd_ioctl_set_debug_trap()
2984 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) { in kfd_ioctl_set_debug_trap()
2989 if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || in kfd_ioctl_set_debug_trap()
2990 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) { in kfd_ioctl_set_debug_trap()
2992 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ? in kfd_ioctl_set_debug_trap()
2993 args->set_node_address_watch.gpu_id : in kfd_ioctl_set_debug_trap()
2994 args->clear_node_address_watch.gpu_id); in kfd_ioctl_set_debug_trap()
3003 switch (args->op) { in kfd_ioctl_set_debug_trap()
3009 args->enable.dbg_fd, in kfd_ioctl_set_debug_trap()
3010 (void __user *)args->enable.rinfo_ptr, in kfd_ioctl_set_debug_trap()
3011 &args->enable.rinfo_size); in kfd_ioctl_set_debug_trap()
3013 target->exception_enable_mask = args->enable.exception_mask; in kfd_ioctl_set_debug_trap()
3021 args->send_runtime_event.gpu_id, in kfd_ioctl_set_debug_trap()
3022 args->send_runtime_event.queue_id, in kfd_ioctl_set_debug_trap()
3023 args->send_runtime_event.exception_mask); in kfd_ioctl_set_debug_trap()
3027 args->set_exceptions_enabled.exception_mask); in kfd_ioctl_set_debug_trap()
3031 args->launch_override.override_mode, in kfd_ioctl_set_debug_trap()
3032 args->launch_override.enable_mask, in kfd_ioctl_set_debug_trap()
3033 args->launch_override.support_request_mask, in kfd_ioctl_set_debug_trap()
3034 &args->launch_override.enable_mask, in kfd_ioctl_set_debug_trap()
3035 &args->launch_override.support_request_mask); in kfd_ioctl_set_debug_trap()
3039 args->launch_mode.launch_mode); in kfd_ioctl_set_debug_trap()
3043 args->suspend_queues.num_queues, in kfd_ioctl_set_debug_trap()
3044 args->suspend_queues.grace_period, in kfd_ioctl_set_debug_trap()
3045 args->suspend_queues.exception_mask, in kfd_ioctl_set_debug_trap()
3046 (uint32_t *)args->suspend_queues.queue_array_ptr); in kfd_ioctl_set_debug_trap()
3050 r = resume_queues(target, args->resume_queues.num_queues, in kfd_ioctl_set_debug_trap()
3051 (uint32_t *)args->resume_queues.queue_array_ptr); in kfd_ioctl_set_debug_trap()
3055 args->set_node_address_watch.address, in kfd_ioctl_set_debug_trap()
3056 args->set_node_address_watch.mask, in kfd_ioctl_set_debug_trap()
3057 &args->set_node_address_watch.id, in kfd_ioctl_set_debug_trap()
3058 args->set_node_address_watch.mode); in kfd_ioctl_set_debug_trap()
3062 args->clear_node_address_watch.id); in kfd_ioctl_set_debug_trap()
3065 r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags); in kfd_ioctl_set_debug_trap()
3069 &args->query_debug_event.queue_id, in kfd_ioctl_set_debug_trap()
3070 &args->query_debug_event.gpu_id, in kfd_ioctl_set_debug_trap()
3071 args->query_debug_event.exception_mask, in kfd_ioctl_set_debug_trap()
3072 &args->query_debug_event.exception_mask); in kfd_ioctl_set_debug_trap()
3076 args->query_exception_info.source_id, in kfd_ioctl_set_debug_trap()
3077 args->query_exception_info.exception_code, in kfd_ioctl_set_debug_trap()
3078 args->query_exception_info.clear_exception, in kfd_ioctl_set_debug_trap()
3079 (void __user *)args->query_exception_info.info_ptr, in kfd_ioctl_set_debug_trap()
3080 &args->query_exception_info.info_size); in kfd_ioctl_set_debug_trap()
3084 args->queue_snapshot.exception_mask, in kfd_ioctl_set_debug_trap()
3085 (void __user *)args->queue_snapshot.snapshot_buf_ptr, in kfd_ioctl_set_debug_trap()
3086 &args->queue_snapshot.num_queues, in kfd_ioctl_set_debug_trap()
3087 &args->queue_snapshot.entry_size); in kfd_ioctl_set_debug_trap()
3091 args->device_snapshot.exception_mask, in kfd_ioctl_set_debug_trap()
3092 (void __user *)args->device_snapshot.snapshot_buf_ptr, in kfd_ioctl_set_debug_trap()
3093 &args->device_snapshot.num_devices, in kfd_ioctl_set_debug_trap()
3094 &args->device_snapshot.entry_size); in kfd_ioctl_set_debug_trap()
3097 pr_err("Invalid option: %i\n", args->op); in kfd_ioctl_set_debug_trap()