Lines Matching full:iommu

2  * IOMMU implementation for Cell Broadband Processor Architecture
35 #include <asm/iommu.h>
105 /* IOMMU sizing */
114 struct cbe_iommu *iommu; member
141 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, in invalidate_tce_cache() argument
148 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; in invalidate_tce_cache()
205 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
228 __pa(window->iommu->pad_page) | in tce_free_cell()
239 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
245 struct cbe_iommu *iommu = data; in ioc_interrupt() local
247 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in ioc_interrupt()
251 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); in ioc_interrupt()
263 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); in ioc_interrupt()
280 printk(KERN_ERR "iommu: can't get address for %s\n", in cell_iommu_find_ioc()
308 static void cell_iommu_setup_stab(struct cbe_iommu *iommu, in cell_iommu_setup_stab() argument
317 pr_debug("%s: iommu[%d]: segments: %lu\n", in cell_iommu_setup_stab()
318 __func__, iommu->nid, segments); in cell_iommu_setup_stab()
322 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); in cell_iommu_setup_stab()
324 iommu->stab = page_address(page); in cell_iommu_setup_stab()
325 memset(iommu->stab, 0, stab_size); in cell_iommu_setup_stab()
328 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, in cell_iommu_alloc_ptab() argument
345 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, in cell_iommu_alloc_ptab()
346 iommu->nid, ptab_size, get_order(ptab_size)); in cell_iommu_alloc_ptab()
347 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); in cell_iommu_alloc_ptab()
356 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", in cell_iommu_alloc_ptab()
357 __func__, iommu->nid, iommu->stab, ptab, in cell_iommu_alloc_ptab()
374 pr_debug("Setting up IOMMU stab:\n"); in cell_iommu_alloc_ptab()
380 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * in cell_iommu_alloc_ptab()
382 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); in cell_iommu_alloc_ptab()
388 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) in cell_iommu_enable_hardware() argument
394 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) in cell_iommu_enable_hardware()
396 __func__, iommu->nid); in cell_iommu_enable_hardware()
398 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); in cell_iommu_enable_hardware()
399 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; in cell_iommu_enable_hardware()
404 /* setup interrupts for the iommu. */ in cell_iommu_enable_hardware()
405 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in cell_iommu_enable_hardware()
406 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, in cell_iommu_enable_hardware()
408 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, in cell_iommu_enable_hardware()
412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); in cell_iommu_enable_hardware()
415 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); in cell_iommu_enable_hardware()
418 /* set the IOC segment table origin register (and turn on the iommu) */ in cell_iommu_enable_hardware()
419 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; in cell_iommu_enable_hardware()
420 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in cell_iommu_enable_hardware()
421 in_be64(iommu->xlate_regs + IOC_IOST_Origin); in cell_iommu_enable_hardware()
424 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; in cell_iommu_enable_hardware()
425 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); in cell_iommu_enable_hardware()
428 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, in cell_iommu_setup_hardware() argument
431 cell_iommu_setup_stab(iommu, base, size, 0, 0); in cell_iommu_setup_hardware()
432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, in cell_iommu_setup_hardware()
434 cell_iommu_enable_hardware(iommu); in cell_iommu_setup_hardware()
438 static struct iommu_window *find_window(struct cbe_iommu *iommu,
445 list_for_each_entry(window, &(iommu->windows), list) {
460 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", in cell_iommu_get_ioid()
469 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, in cell_iommu_setup_window() argument
479 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); in cell_iommu_setup_window()
485 window->iommu = iommu; in cell_iommu_setup_window()
488 window->table.it_base = (unsigned long)iommu->ptab; in cell_iommu_setup_window()
489 window->table.it_index = iommu->nid; in cell_iommu_setup_window()
493 iommu_init_table(&window->table, iommu->nid); in cell_iommu_setup_window()
501 list_add(&window->list, &iommu->windows); in cell_iommu_setup_window()
506 /* We need to map and reserve the first IOMMU page since it's used in cell_iommu_setup_window()
513 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); in cell_iommu_setup_window()
515 iommu->pad_page = page_address(page); in cell_iommu_setup_window()
516 clear_page(iommu->pad_page); in cell_iommu_setup_window()
520 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); in cell_iommu_setup_window()
546 struct cbe_iommu *iommu; in cell_get_iommu_table() local
549 * node's iommu. We -might- do something smarter later though it may in cell_get_iommu_table()
552 iommu = cell_iommu_for_node(dev_to_node(dev)); in cell_get_iommu_table()
553 if (iommu == NULL || list_empty(&iommu->windows)) { in cell_get_iommu_table()
554 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", in cell_get_iommu_table()
559 window = list_entry(iommu->windows.next, struct iommu_window, list); in cell_get_iommu_table()
717 struct cbe_iommu *iommu; in cell_iommu_alloc() local
723 printk(KERN_ERR "iommu: failed to get node for %s\n", in cell_iommu_alloc()
727 pr_debug("iommu: setting up iommu for node %d (%s)\n", in cell_iommu_alloc()
730 /* XXX todo: If we can have multiple windows on the same IOMMU, which in cell_iommu_alloc()
732 * iommu for that node is already setup. in cell_iommu_alloc()
735 * multiple window support since the cell iommu supports per-page ioids in cell_iommu_alloc()
739 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", in cell_iommu_alloc()
746 iommu = &iommus[i]; in cell_iommu_alloc()
747 iommu->stab = NULL; in cell_iommu_alloc()
748 iommu->nid = nid; in cell_iommu_alloc()
749 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); in cell_iommu_alloc()
750 INIT_LIST_HEAD(&iommu->windows); in cell_iommu_alloc()
752 return iommu; in cell_iommu_alloc()
758 struct cbe_iommu *iommu; in cell_iommu_init_one() local
761 iommu = cell_iommu_alloc(np); in cell_iommu_init_one()
762 if (!iommu) in cell_iommu_init_one()
772 cell_iommu_setup_hardware(iommu, base, size); in cell_iommu_init_one()
775 cell_iommu_setup_window(iommu, np, base, size, in cell_iommu_init_one()
794 pr_debug("iommu: cleaning up iommu on node %d\n", node); in cell_disable_iommus()
812 /* When no iommu is present, we use direct DMA ops */ in cell_iommu_init_disabled()
845 * all of physical memory. If not, we force enable IOMMU in cell_iommu_init_disabled()
848 printk(KERN_WARNING "iommu: force-enabled, dma window" in cell_iommu_init_disabled()
859 printk("iommu: disabled, direct DMA offset is 0x%lx\n", in cell_iommu_init_disabled()
866 * Fixed IOMMU mapping support
868 * This code adds support for setting up a fixed IOMMU mapping on certain
875 * we setup the fixed mapping immediately above the normal IOMMU window.
878 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
884 * mapping above the normal IOMMU window as we would run out of address space.
885 * Instead we move the normal IOMMU window to coincide with the hash page
914 dev_dbg(dev, "iommu: no dma-ranges found\n"); in cell_iommu_get_fixed_address()
941 dev_dbg(dev, "iommu: no suitable range found!\n"); in cell_iommu_get_fixed_address()
957 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); in dma_set_mask_and_switch()
960 dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); in dma_set_mask_and_switch()
978 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); in cell_dma_dev_setup_fixed()
990 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", in insert_16M_pte()
996 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, in cell_iommu_setup_fixed_ptab() argument
1002 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); in cell_iommu_setup_fixed_ptab()
1006 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); in cell_iommu_setup_fixed_ptab()
1012 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
1014 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
1022 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); in cell_iommu_setup_fixed_ptab()
1035 struct cbe_iommu *iommu; in cell_iommu_fixed_mapping_init() local
1043 pr_debug("iommu: fixed mapping disabled, no axons found\n"); in cell_iommu_fixed_mapping_init()
1052 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); in cell_iommu_fixed_mapping_init()
1057 * dynamic region, so find the top of the largest IOMMU window in cell_iommu_fixed_mapping_init()
1080 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); in cell_iommu_fixed_mapping_init()
1089 pr_debug("iommu: hash window not segment aligned\n"); in cell_iommu_fixed_mapping_init()
1098 pr_debug("iommu: hash window doesn't fit in" in cell_iommu_fixed_mapping_init()
1109 iommu = cell_iommu_alloc(np); in cell_iommu_fixed_mapping_init()
1110 BUG_ON(!iommu); in cell_iommu_fixed_mapping_init()
1119 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " in cell_iommu_fixed_mapping_init()
1120 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, in cell_iommu_fixed_mapping_init()
1123 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); in cell_iommu_fixed_mapping_init()
1124 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, in cell_iommu_fixed_mapping_init()
1126 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, in cell_iommu_fixed_mapping_init()
1128 cell_iommu_enable_hardware(iommu); in cell_iommu_fixed_mapping_init()
1129 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); in cell_iommu_fixed_mapping_init()
1187 /* If IOMMU is disabled or we have little enough RAM to not need in cell_iommu_init()
1190 * Note: should we make sure we have the IOMMU actually disabled ? in cell_iommu_init()
1206 /* Create an iommu for each /axon node. */ in cell_iommu_init()
1213 /* Create an iommu for each toplevel /pci-internal node for in cell_iommu_init()
1222 /* Setup default PCI iommu ops */ in cell_iommu_init()