/linux-5.10/arch/riscv/boot/dts/sifive/ |
D | fu540-c000.dtsi | 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Copyright (c) 2018-2019 SiFive, Inc */ 4 /dts-v1/; 6 #include <dt-bindings/clock/sifive-fu540-prci.h> 9 #address-cells = <2>; 10 #size-cells = <2>; 11 compatible = "sifive,fu540-c000", "sifive,fu540"; 23 #address-cells = <1>; 24 #size-cells = <0>; 28 i-cache-block-size = <64>; [all …]
|
/linux-5.10/mm/ |
D | huge_memory.c | 1 // SPDX-License-Identifier: GPL-2.0-only 37 #include <asm/tlb.h> 68 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_enabled() 118 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page() 124 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page() 132 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_put_huge_zero_page() 190 ret = -EINVAL; in enabled_store() 222 return -EINVAL; in single_hugepage_flag_store() 276 return -EINVAL; in defrag_store() 328 return -ENOMEM; in hugepage_init_sysfs() [all …]
|
D | madvise.c | 1 // SPDX-License-Identifier: GPL-2.0 13 #include <linux/page-isolation.h> 26 #include <linux/backing-dev.h> 33 #include <asm/tlb.h> 38 struct mmu_gather *tlb; member 43 * Any behaviour which results in changes to the vma->vm_flags needs to 64 * We can potentially split a vm area into separate 71 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 74 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 90 if (vma->vm_flags & VM_IO) { in madvise_behavior() [all …]
|
D | mapping_dirty_helpers.c | 1 // SPDX-License-Identifier: GPL-2.0 10 * struct wp_walk - Private struct for pagetable walk callbacks 24 * wp_pte - Write-protect a pte 29 * The function write-protects a pte and records the range in 30 * virtual address space of touched ptes for efficient range TLB flushes. 35 struct wp_walk *wpwalk = walk->private; in wp_pte() 39 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in wp_pte() 42 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 43 wpwalk->total++; in wp_pte() 44 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); in wp_pte() [all …]
|
/linux-5.10/Documentation/admin-guide/mm/ |
D | transhuge.rst | 28 requiring larger clear-page copy-page in page faults which is a 38 1) the TLB miss will run faster (especially with virtualization using 42 2) a single TLB entry will be mapping a much larger amount of virtual 43 memory in turn reducing the number of TLB misses. With 44 virtualization and nested pagetables the TLB can be mapped of 47 the two is using hugepages just because of the fact the TLB miss is 78 possible to disable hugepages system-wide and to only have them inside 95 ------------------- 149 should be self-explanatory. 168 ------------------- [all …]
|
/linux-5.10/arch/arm/mm/ |
D | tlb-v4wb.S | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (C) 1997-2002 Russell King 7 * ARM architecture version 4 TLB handling functions. 8 * These assume a split I/D TLBs w/o I TLB entry, with a write buffer. 15 #include <asm/asm-offsets.h> 17 #include "proc-macros.S" 23 * Invalidate a range of TLB entries in the specified address space. 25 * - start - range start address 26 * - end - range end address 27 * - mm - mm_struct describing address space [all …]
|
D | tlb-v4wbi.S | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (C) 1997-2002 Russell King 7 * ARM architecture version 4 and version 5 TLB handling functions. 8 * These assume a split I/D TLBs, with a write buffer. 15 #include <asm/asm-offsets.h> 17 #include "proc-macros.S" 22 * Invalidate a range of TLB entries in the specified address space. 24 * - start - range start address 25 * - end - range end address 26 * - mm - mm_struct describing address space [all …]
|
D | tlb-v7.S | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 3 * linux/arch/arm/mm/tlb-v7.S 5 * Copyright (C) 1997-2002 Russell King 8 * ARM architecture version 6 TLB handling functions. 9 * These assume a split I/D TLB. 14 #include <asm/asm-offsets.h> 17 #include "proc-macros.S" 22 * Invalidate a range of TLB entries in the specified address space. 24 * - start - start address (may not be aligned) 25 * - end - end address (exclusive, may not be aligned) [all …]
|
D | tlb-v6.S | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 3 * linux/arch/arm/mm/tlb-v6.S 5 * Copyright (C) 1997-2002 Russell King 7 * ARM architecture version 6 TLB handling functions. 8 * These assume a split I/D TLB. 12 #include <asm/asm-offsets.h> 16 #include "proc-macros.S" 23 * Invalidate a range of TLB entries in the specified address space. 25 * - start - start address (may not be aligned) 26 * - end - end address (exclusive, may not be aligned) [all …]
|
D | tlb-v4.S | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (C) 1997-2002 Russell King 7 * ARM architecture version 4 TLB handling functions. 8 * These assume a split I/D TLBs, and no write buffer. 15 #include <asm/asm-offsets.h> 17 #include "proc-macros.S" 23 * Invalidate a range of TLB entries in the specified user address space. 25 * - start - range start address 26 * - end - range end address 27 * - mm - mm_struct describing address space [all …]
|
/linux-5.10/arch/arc/include/asm/ |
D | pgalloc.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * -"/proc/meminfo | grep PageTables" kept on increasing 10 * -Variable pg-sz means that Page Tables could be variable sized themselves 11 * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx] 12 * -Page Table size capped to max 1 to save memory - hence verified. 13 * -Since these deal with constants, gcc compile-time optimizes them. 16 * -Added pgtable ctor/dtor used for pgtable mem accounting 19 * -Switched pgtable_t from being struct page * to unsigned long 24 * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate) [all …]
|
D | pgtable.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. 8 * VALID marks a TLB entry exists and it will only happen if PRESENT 9 * - Utilise some unused free bits to confine PTE flags to 12 bits 10 * This is a must for 4k pg-sz 12 * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods 13 * -TLB Locking never really existed, except for initial specs 14 * -SILENT_xxx not needed for our port 15 * -Per my request, MMU V3 changes the layout of some of the bits [all …]
|
/linux-5.10/arch/mips/mm/ |
D | tlbex.c | 6 * Synthesize TLB refill handlers at runtime. 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 34 #include <asm/cpu-type.h> 53 * TLB load/store/modify handlers. 132 * CVMSEG starts at address -32768 and extends for in scratchpad_offset() 136 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; in scratchpad_offset() 231 * TLB exception handlers. 263 unsigned int count = (end - start) / sizeof(u32); in dump_handler() 280 /* The only general purpose registers allowed in TLB handlers. */ 308 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. [all …]
|
/linux-5.10/arch/sh/mm/ |
D | tlb-pteaex.c | 2 * arch/sh/mm/tlb-pteaex.c 4 * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions. 25 if (vma && current->active_mm != vma->vm_mm) in __update_tlb() 42 * For the extended mode TLB this is trivial, only the ESZ and in __update_tlb() 44 * the protection bits (with the exception of the compat-mode SZ in __update_tlb() 58 /* Load the TLB */ in __update_tlb() 64 * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB 65 * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped 67 * in extended mode, the legacy 8-bit ASID field in address array 1 has 86 * Flush all the TLB. in local_flush_tlb_all()
|
/linux-5.10/Documentation/devicetree/bindings/riscv/ |
D | cpus.yaml | 1 # SPDX-License-Identifier: (GPL-2.0 OR MIT) 3 --- 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 7 title: RISC-V bindings for 'cpus' DT nodes 10 - Paul Walmsley <paul.walmsley@sifive.com> 11 - Palmer Dabbelt <palmer@sifive.com> 14 This document uses some terminology common to the RISC-V community 18 mandated by the RISC-V ISA: a PC and some registers. This 28 - items: 29 - enum: [all …]
|
/linux-5.10/include/linux/ |
D | io-pgtable.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 22 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. 24 * @tlb_flush_all: Synchronously invalidate the entire TLB context. 25 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state 28 * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual 30 * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a 31 * single page. IOMMUs that cannot batch TLB invalidation 50 * struct io_pgtable_cfg - Configuration data for a set of page tables. 53 * action by the low-level page table allocator. 60 * @tlb: TLB management callbacks for this set of tables. [all …]
|
/linux-5.10/arch/powerpc/mm/ |
D | pgtable.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 27 #include <asm/tlb.h> 32 return current->thread.regs && TRAP(current->thread.regs) == 0x400; in is_exec_fault() 67 /* Server-style MMU handles coherency when hashing if HW exec permission 68 * is supposed per page (currently 64-bit only). If not, then, we always 84 if (!test_bit(PG_arch_1, &pg->flags)) { in set_pte_filter_hash() 86 set_bit(PG_arch_1, &pg->flags); in set_pte_filter_hash() 119 if (test_bit(PG_arch_1, &pg->flags)) in set_pte_filter() 125 set_bit(PG_arch_1, &pg->flags); in set_pte_filter() [all …]
|
/linux-5.10/arch/x86/kvm/ |
D | mmu.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 52 return ((1ULL << (e - s + 1)) - 1) << s; in rsvd_bits() 71 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) in kvm_mmu_reload() 93 u64 root_hpa = vcpu->arch.mmu->root_hpa; in kvm_mmu_load_pgd() 99 vcpu->arch.mmu->shadow_root_level); in kvm_mmu_load_pgd() 109 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) in kvm_mmu_do_page_fault() 112 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); in kvm_mmu_do_page_fault() 116 * Currently, we have two sorts of write-protection, a) the first one 117 * write-protects guest page to sync the guest modification, b) another one is [all …]
|
/linux-5.10/Documentation/admin-guide/hw-vuln/ |
D | multihit.rst | 6 instruction fetch hits multiple entries in the instruction TLB. This can 13 ------------------- 18 - non-Intel processors 20 - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont) 22 - Intel processors that have the PSCHANGE_MC_NO bit set in the 27 ------------ 32 CVE-2018-12207 Machine Check Error Avoidance on Page Size Change 37 ------- 42 the illusion of a very large memory for processors. This virtual space is split 47 processors include a structure, called TLB, that caches recent translations. [all …]
|
/linux-5.10/Documentation/vm/ |
D | highmem.rst | 25 VM space so that we don't have to pay the full TLB invalidation costs for 29 The traditional split for architectures using this approach is 3:1, 3GiB for 32 +--------+ 0xffffffff 34 +--------+ 0xc0000000 38 +--------+ 0x00000000 41 time, but because we need virtual address space for other things - including 42 temporary maps to access the rest of the physical memory - the actual direct 114 manipulate the kernel's page tables, the data TLB and/or the MMU's registers. 129 of RAM into your 32-bit machine. This has a number of consequences: 131 * Linux needs a page-frame structure for each page in the system and the [all …]
|
/linux-5.10/arch/powerpc/kvm/ |
D | book3s_hv_ras.c | 1 // SPDX-License-Identifier: GPL-2.0-only 20 #define SRR1_MC_LDSTERR (1ul << (63-42)) 21 #define SRR1_MC_IFETCH_SH (63-45) 24 #define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */ 25 #define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */ 26 #define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */ 29 #define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */ 30 #define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */ 32 #define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */ 33 #define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */ [all …]
|
/linux-5.10/arch/mips/kvm/ |
D | tlb.c | 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 26 #include <asm/tlb.h> 46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; in kvm_mips_get_root_asid() 57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; in kvm_mips_get_kernel_asid() 65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; in kvm_mips_get_user_asid() 71 /* Structure defining an tlb entry data set. */ 90 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_dump_guest_tlbs() 91 struct kvm_mips_tlb tlb; in kvm_mips_dump_guest_tlbs() local 98 tlb = vcpu->arch.guest_tlb[i]; in kvm_mips_dump_guest_tlbs() [all …]
|
/linux-5.10/arch/alpha/kernel/ |
D | err_marvel.c | 1 // SPDX-License-Identifier: GPL-2.0 49 env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; in marvel_print_680_frame() 56 env->cabinet, in marvel_print_680_frame() 57 env->drawer); in marvel_print_680_frame() 58 printk("%s Module Type: 0x%x - Unit ID 0x%x - " in marvel_print_680_frame() 61 env->module_type, in marvel_print_680_frame() 62 env->unit_id, in marvel_print_680_frame() 63 env->condition); in marvel_print_680_frame() 77 if (lf_subpackets->env[i]) in marvel_process_680_frame() 317 "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", in marvel_print_po7_ugbge_sym() [all …]
|
/linux-5.10/arch/powerpc/perf/ |
D | e500-pmu.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 30 * Table of generalized cache-related events. 31 * 0 means not supported, -1 means nonsensical, other values 36 * D-cache misses are not split into read/write/prefetch; 46 [C(OP_WRITE)] = { -1, -1 }, 62 * the chip's internal level-one TLB which is probably not 63 * what the user wants. Instead, unified level-two TLB misses 68 [C(OP_WRITE)] = { -1, -1 }, 69 [C(OP_PREFETCH)] = { -1, -1 }, [all …]
|
/linux-5.10/arch/x86/mm/pat/ |
D | set_memory.c | 1 // SPDX-License-Identifier: GPL-2.0-only 34 * The current flushing context - we pass it instead of 5 arguments: 61 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb 93 direct_pages_count[level]--; in split_page_count() 94 direct_pages_count[level - 1] += PTRS_PER_PTE; in split_page_count() 165 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); in cpastats_show() 219 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; in highmap_end_pfn() 235 /* There is no highmap on 32-bit */ in __cpa_pfn_in_highmap() 249 * process we need to flush the TLB and cache and the non-canonical address 253 * will fix the top bit if needed and is a no-op otherwise. [all …]
|