Lines Matching full:iommu
28 #include <linux/iommu-helper.h>
29 #include <linux/iommu.h>
31 #include <linux/amd-iommu.h>
36 #include <asm/iommu.h>
49 * to the IOMMU core, which will then use this information to split
53 * Traditionally the IOMMU core just handed us the mappings directly,
60 * If at some point we'd like to utilize the IOMMU core's new behavior,
77 * if iommu=pt passed on kernel cmd line.
87 * general struct to manage commands send to an IOMMU
172 return dev->archdata.iommu; in get_dev_data()
263 if (dev->archdata.iommu) in iommu_init_device()
285 struct amd_iommu *iommu; in iommu_init_device() local
287 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
288 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
291 dev->archdata.iommu = dev_data; in iommu_init_device()
400 stats_dir = debugfs_create_dir("amd-iommu", NULL); in amd_iommu_stats_init()
451 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
513 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
518 spin_lock_irqsave(&iommu->lock, flags); in iommu_poll_events()
520 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
521 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
524 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
525 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; in iommu_poll_events()
528 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
530 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_poll_events()
533 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) in iommu_handle_ppr_entry() argument
541 raw = (u64 *)(iommu->ppr_log + head); in iommu_handle_ppr_entry()
573 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
578 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
581 spin_lock_irqsave(&iommu->lock, flags); in iommu_poll_ppr_log()
583 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
584 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
589 iommu_handle_ppr_entry(iommu, head); in iommu_poll_ppr_log()
593 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
594 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
598 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_poll_ppr_log()
600 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_poll_ppr_log()
605 struct amd_iommu *iommu; in amd_iommu_int_thread() local
607 for_each_iommu(iommu) { in amd_iommu_int_thread()
608 iommu_poll_events(iommu); in amd_iommu_int_thread()
609 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
622 * IOMMU command queuing functions
643 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
649 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
650 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; in copy_cmd_to_buffer()
655 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
656 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
803 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
810 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); in iommu_queue_command_sync()
813 spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
815 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in iommu_queue_command_sync()
816 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in iommu_queue_command_sync()
817 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; in iommu_queue_command_sync()
818 left = (head - next_tail) % iommu->cmd_buf_size; in iommu_queue_command_sync()
826 copy_cmd_to_buffer(iommu, &sync_cmd, tail); in iommu_queue_command_sync()
828 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
836 copy_cmd_to_buffer(iommu, cmd, tail); in iommu_queue_command_sync()
839 iommu->need_sync = sync; in iommu_queue_command_sync()
841 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
846 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
848 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
853 * buffer of an IOMMU
855 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
861 if (!iommu->need_sync) in iommu_completion_wait()
866 ret = iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
873 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
879 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
882 static void iommu_flush_dte_all(struct amd_iommu *iommu) in iommu_flush_dte_all() argument
887 iommu_flush_dte(iommu, devid); in iommu_flush_dte_all()
889 iommu_completion_wait(iommu); in iommu_flush_dte_all()
896 static void iommu_flush_tlb_all(struct amd_iommu *iommu) in iommu_flush_tlb_all() argument
904 iommu_queue_command(iommu, &cmd); in iommu_flush_tlb_all()
907 iommu_completion_wait(iommu); in iommu_flush_tlb_all()
910 static void iommu_flush_all(struct amd_iommu *iommu) in iommu_flush_all() argument
916 iommu_queue_command(iommu, &cmd); in iommu_flush_all()
917 iommu_completion_wait(iommu); in iommu_flush_all()
920 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
922 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
923 iommu_flush_all(iommu); in iommu_flush_all_caches()
925 iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
926 iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
936 struct amd_iommu *iommu; in device_flush_iotlb() local
941 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
945 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
953 struct amd_iommu *iommu; in device_flush_dte() local
956 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
958 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
971 * page. Otherwise it flushes the whole TLB of the IOMMU.
987 * Devices of this domain are behind this IOMMU in __domain_flush_pages()
1031 * Devices of this domain are behind this IOMMU in domain_flush_complete()
1182 * supporting all features of AMD IOMMU page tables like level skipping
1271 * this specific IOMMU.
1273 static int iommu_for_unity_map(struct amd_iommu *iommu, in iommu_for_unity_map() argument
1280 if (amd_iommu_rlookup_table[bdf] == iommu) in iommu_for_unity_map()
1316 * Init the unity mappings for a specific IOMMU in the system
1319 * the default domain DMA of that IOMMU if necessary.
1321 static int iommu_init_unity_mappings(struct amd_iommu *iommu) in iommu_init_unity_mappings() argument
1327 if (!iommu_for_unity_map(iommu, entry)) in iommu_init_unity_mappings()
1329 ret = dma_ops_unity_map(iommu->default_dom, entry); in iommu_init_unity_mappings()
1360 * interface functions. They work like the allocators in the other IOMMU
1402 struct amd_iommu *iommu; in alloc_new_range() local
1455 for_each_iommu(iommu) { in alloc_new_range()
1456 if (iommu->exclusion_start && in alloc_new_range()
1457 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1458 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1460 int pages = iommu_num_pages(iommu->exclusion_start, in alloc_new_range()
1461 iommu->exclusion_length, in alloc_new_range()
1463 startpage = iommu->exclusion_start >> PAGE_SHIFT; in alloc_new_range()
1605 * allocated for every IOMMU as the default domain. If device isolation
1882 struct amd_iommu *iommu; in do_attach() local
1885 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1894 domain->dev_iommu[iommu->index] += 1; in do_attach()
1903 struct amd_iommu *iommu; in do_detach() local
1905 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1908 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
2090 * left the caches in the IOMMU dirty. So we have to flush in attach_device()
2195 struct amd_iommu *iommu; in device_change_notifier() local
2203 iommu = amd_iommu_rlookup_table[devid]; in device_change_notifier()
2245 iommu_completion_wait(iommu); in device_change_notifier()
2268 * finds the corresponding IOMMU, the protection domain and the
2412 * mapping functions provided with this IOMMU driver.
2597 * device which is not handled by an AMD IOMMU in the system.
2885 * The function which clues the AMD IOMMU driver into dma_ops.
2895 struct amd_iommu *iommu; in amd_iommu_init_dma_ops() local
2899 * first allocate a default protection domain for every IOMMU we in amd_iommu_init_dma_ops()
2903 for_each_iommu(iommu) { in amd_iommu_init_dma_ops()
2904 iommu->default_dom = dma_ops_domain_alloc(); in amd_iommu_init_dma_ops()
2905 if (iommu->default_dom == NULL) in amd_iommu_init_dma_ops()
2907 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; in amd_iommu_init_dma_ops()
2908 ret = iommu_init_unity_mappings(iommu); in amd_iommu_init_dma_ops()
2934 for_each_iommu(iommu) { in amd_iommu_init_dma_ops()
2935 if (iommu->default_dom) in amd_iommu_init_dma_ops()
2936 dma_ops_domain_free(iommu->default_dom); in amd_iommu_init_dma_ops()
2944 * The following functions belong to the exported interface of AMD IOMMU
2946 * This interface allows access to lower level functions of the IOMMU
3070 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device()
3071 struct amd_iommu *iommu; in amd_iommu_detach_device() local
3082 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
3083 if (!iommu) in amd_iommu_detach_device()
3086 iommu_completion_wait(iommu); in amd_iommu_detach_device()
3094 struct amd_iommu *iommu; in amd_iommu_attach_device() local
3100 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
3102 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3103 if (!iommu) in amd_iommu_attach_device()
3111 iommu_completion_wait(iommu); in amd_iommu_attach_device()
3196 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_device_group()
3229 * The next functions do a basic initialization of IOMMU for pass through
3232 * In passthrough mode the IOMMU is initialized and enabled but not used for
3241 struct amd_iommu *iommu; in amd_iommu_init_passthrough() local
3258 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_init_passthrough()
3259 if (!iommu) in amd_iommu_init_passthrough()
3299 /* Page-table is not visible to IOMMU anymore, so free it */ in amd_iommu_domain_direct_map()
3366 * IOMMU TLB needs to be flushed before Device TLB to in __flush_pasid()
3367 * prevent device TLB refill from IOMMU TLB in __flush_pasid()
3378 /* Wait until IOMMU TLB flushes are complete */ in __flush_pasid()
3383 struct amd_iommu *iommu; in __flush_pasid() local
3389 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3394 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
3552 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
3558 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3563 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()