Lines Matching +full:broken +full:- +full:turn +full:- +full:around

1 // SPDX-License-Identifier: GPL-2.0
29 #include <asm/io-unit.h>
75 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
98 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
109 /* XXX should we hyper_flush_whole_icache here - Anton */
158 if (size & (minsz - 1)) { in __srmmu_get_nocache()
161 size += minsz - 1; in __srmmu_get_nocache()
168 if (offset == -1) { in __srmmu_get_nocache()
215 if (vaddr & (size - 1)) { in srmmu_free_nocache()
220 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; in srmmu_free_nocache()
243 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
335 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); in get_pgd_fast()
357 spin_lock(&mm->page_table_lock); in pte_alloc_one()
362 spin_unlock(&mm->page_table_lock); in pte_alloc_one()
372 spin_lock(&mm->page_table_lock); in pte_free()
375 spin_unlock(&mm->page_table_lock); in pte_free()
380 /* context handling - a dynamically sized pool is used */
381 #define NO_CONTEXT -1
399 entry->next->prev = entry->prev; in remove_from_ctx_list()
400 entry->prev->next = entry->next; in remove_from_ctx_list()
405 entry->next = head; in add_to_ctx_list()
406 (entry->prev = head->prev)->next = entry; in add_to_ctx_list()
407 head->prev = entry; in add_to_ctx_list()
421 mm->context = ctxp->ctx_number; in alloc_context()
422 ctxp->ctx_mm = mm; in alloc_context()
426 if (ctxp->ctx_mm == old_mm) in alloc_context()
427 ctxp = ctxp->next; in alloc_context()
430 flush_cache_mm(ctxp->ctx_mm); in alloc_context()
431 flush_tlb_mm(ctxp->ctx_mm); in alloc_context()
434 ctxp->ctx_mm->context = NO_CONTEXT; in alloc_context()
435 ctxp->ctx_mm = mm; in alloc_context()
436 mm->context = ctxp->ctx_number; in alloc_context()
462 clist->ctx_number = ctx; in sparc_context_init()
463 clist->ctx_mm = NULL; in sparc_context_init()
476 if (mm->context == NO_CONTEXT) { in switch_mm()
480 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); in switch_mm()
489 srmmu_set_context(mm->context); in switch_mm()
513 * 36-bit physical address on the I/O space lines... in srmmu_mapioaddr()
525 len -= PAGE_SIZE; in srmmu_mapiorange()
555 len -= PAGE_SIZE; in srmmu_unmapiorange()
597 if ((ctx1 = vma->vm_mm->context) != -1) {
670 * around 8mb mapped for us.
708 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_early_allocate_ptable_skeleton()
743 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_allocate_ptable_skeleton()
767 * This is much cleaner than poking around physical address space
781 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ in srmmu_inherit_prom_mappings()
785 break; /* probably wrap around */ in srmmu_inherit_prom_mappings()
797 addr = start - PAGE_SIZE; in srmmu_inherit_prom_mappings()
846 /* Create a third-level SRMMU 16MB page mapping. */
918 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); in srmmu_paging_init()
935 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE)); in srmmu_paging_init()
949 local_ops->tlb_all(); in srmmu_paging_init()
959 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); in srmmu_paging_init()
1002 mm->context = NO_CONTEXT; in init_new_context()
1010 if (mm->context != NO_CONTEXT) { in destroy_context()
1012 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); in destroy_context()
1015 free_context(mm->context); in destroy_context()
1017 mm->context = NO_CONTEXT; in destroy_context()
1043 vac_line_size = prom_getint(nd, "cache-line-size"); in init_vac_layout()
1044 if (vac_line_size == -1) { in init_vac_layout()
1045 prom_printf("can't determine cache-line-size, halting.\n"); in init_vac_layout()
1048 cache_lines = prom_getint(nd, "cache-nlines"); in init_vac_layout()
1049 if (cache_lines == -1) { in init_vac_layout()
1050 prom_printf("can't determine cache-nlines, halting.\n"); in init_vac_layout()
1094 #if 0 /* XXX I think this is bad news... -DaveM */ in poke_hypersparc()
1144 * The Swift branch folding logic is completely broken. At in poke_swift()
1147 * it is coming from user mode (it mis-executes the branch in in poke_swift()
1149 * hosing your machine which is completely unacceptable. Turn in poke_swift()
1201 * broken hardware, send it back and we'll give you in init_swift()
1249 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_range()
1257 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_page()
1259 if (vma->vm_flags & VM_EXEC) in turbosparc_flush_cache_page()
1265 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1300 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_range()
1307 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_page()
1330 /* Write-back D-cache, emulate VLSI in poke_turbosparc()
1335 /* Do DVMA snooping in Dcache, Write-thru D-cache */ in poke_turbosparc()
1442 /* Must disable mixed-cmd mode here for other cpu's. */ in poke_viking()
1484 * Our workaround is to take a global spinlock around the TLB flushes,
1518 * "load from non-cacheable memory" interrupt bug. in init_viking()
1559 /* First, check for sparc-leon. */ in get_srmmu_type()
1581 prom_printf("Sparc-Linux Cypress support does not longer exit.\n"); in get_srmmu_type()
1606 if (!prom_getintdefault(cpunode, "psr-implementation", 1) && in get_srmmu_type()
1607 prom_getintdefault(cpunode, "psr-version", 1) == 5) { in get_srmmu_type()
1638 /* Local cross-calls. */
1641 xc1((smpfunc_t) local_ops->page_for_dma, page); in smp_flush_page_for_dma()
1642 local_ops->page_for_dma(page); in smp_flush_page_for_dma()
1647 xc0((smpfunc_t) local_ops->cache_all); in smp_flush_cache_all()
1648 local_ops->cache_all(); in smp_flush_cache_all()
1653 xc0((smpfunc_t) local_ops->tlb_all); in smp_flush_tlb_all()
1654 local_ops->tlb_all(); in smp_flush_tlb_all()
1659 if (mm->context != NO_CONTEXT) { in smp_flush_cache_mm()
1664 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm); in smp_flush_cache_mm()
1665 local_ops->cache_mm(mm); in smp_flush_cache_mm()
1671 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_mm()
1676 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm); in smp_flush_tlb_mm()
1677 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) in smp_flush_tlb_mm()
1681 local_ops->tlb_mm(mm); in smp_flush_tlb_mm()
1689 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_range()
1691 if (mm->context != NO_CONTEXT) { in smp_flush_cache_range()
1696 xc3((smpfunc_t) local_ops->cache_range, in smp_flush_cache_range()
1698 local_ops->cache_range(vma, start, end); in smp_flush_cache_range()
1706 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_range()
1708 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_range()
1713 xc3((smpfunc_t) local_ops->tlb_range, in smp_flush_tlb_range()
1715 local_ops->tlb_range(vma, start, end); in smp_flush_tlb_range()
1721 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_page()
1723 if (mm->context != NO_CONTEXT) { in smp_flush_cache_page()
1728 xc2((smpfunc_t) local_ops->cache_page, in smp_flush_cache_page()
1730 local_ops->cache_page(vma, page); in smp_flush_cache_page()
1736 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
1738 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_page()
1743 xc2((smpfunc_t) local_ops->tlb_page, in smp_flush_tlb_page()
1745 local_ops->tlb_page(vma, page); in smp_flush_tlb_page()
1755 * XXX This experiment failed, research further... -DaveM in smp_flush_page_to_ram()
1758 xc1((smpfunc_t) local_ops->page_to_ram, page); in smp_flush_page_to_ram()
1760 local_ops->page_to_ram(page); in smp_flush_page_to_ram()
1769 xc2((smpfunc_t) local_ops->sig_insns, in smp_flush_sig_insns()
1771 local_ops->sig_insns(mm, insn_addr); in smp_flush_sig_insns()
1800 smp_cachetlb_ops.tlb_all = local_ops->tlb_all; in load_mmu()
1801 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm; in load_mmu()
1802 smp_cachetlb_ops.tlb_range = local_ops->tlb_range; in load_mmu()
1803 smp_cachetlb_ops.tlb_page = local_ops->tlb_page; in load_mmu()
1808 smp_cachetlb_ops.cache_all = local_ops->cache_all; in load_mmu()
1809 smp_cachetlb_ops.cache_mm = local_ops->cache_mm; in load_mmu()
1810 smp_cachetlb_ops.cache_range = local_ops->cache_range; in load_mmu()
1811 smp_cachetlb_ops.cache_page = local_ops->cache_page; in load_mmu()
1813 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram; in load_mmu()
1814 smp_cachetlb_ops.sig_insns = local_ops->sig_insns; in load_mmu()
1815 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma; in load_mmu()