/linux-6.8/drivers/gpu/drm/i915/ |
D | i915_vma_resource.h | 1 /* SPDX-License-Identifier: MIT */ 9 #include <linux/dma-fence.h> 21 * The sg mask of the pages sg_table. i.e the mask of 27 * The gtt page sizes we are allowed to use given the 29 * express the smallest unit we can use for the whole 31 * to use opportunistically. 37 * struct i915_vma_bindinfo - Information needed for async bind 42 * @pages: The pages sg-table. 43 * @page_sizes: Page sizes of the pages. 44 * @pages_rsgt: Refcounted sg-table when delayed object destruction [all …]
|
D | i915_vma.c | 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 26 #include <linux/dma-fence-array.h> 54 if (kref_read(&vma->vm->ref)) in assert_vma_held_evict() 55 assert_object_held_shared(vma->obj); in assert_vma_held_evict() 78 if (!vma->node.stack) { in vma_print_allocator() 79 drm_dbg(vma->obj->base.dev, in vma_print_allocator() 81 vma->node.start, vma->node.size, reason); in vma_print_allocator() 85 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); in vma_print_allocator() 86 drm_dbg(vma->obj->base.dev, in vma_print_allocator() [all …]
|
D | i915_gem_gtt.c | 1 // SPDX-License-Identifier: MIT 7 #include <linux/slab.h> /* fault-inject.h is not standalone! */ 9 #include <linux/fault-inject.h> 29 struct sg_table *pages) in i915_gem_gtt_prepare_pages() argument 32 if (dma_map_sg_attrs(obj->base.dev->dev, in i915_gem_gtt_prepare_pages() 33 pages->sgl, pages->nents, in i915_gem_gtt_prepare_pages() 44 * try again - if there are no more pages to remove from in i915_gem_gtt_prepare_pages() 47 GEM_BUG_ON(obj->mm.pages == pages); in i915_gem_gtt_prepare_pages() 48 } while (i915_gem_shrink(NULL, to_i915(obj->base.dev), in i915_gem_gtt_prepare_pages() 49 obj->base.size >> PAGE_SHIFT, NULL, in i915_gem_gtt_prepare_pages() [all …]
|
D | i915_vma_types.h | 1 /* SPDX-License-Identifier: MIT */ 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 41 * singular instances with a view representing all of the object's backing pages 45 * pages is not equal to the backing store, or where the layout of the pages 50 * (2x2 pages): 62 * In this example both the size and layout of pages in the alternative view is 80 * Code wanting to add or use a new GGTT view needs to: 86 * New views are required to build a scatter-gather table from within the 99 /* in gtt pages */ [all …]
|
D | i915_vma.h | 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 28 #include <linux/io-mapping.h> 53 return !i915_active_is_idle(&vma->active); in i915_vma_is_active() 68 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); in i915_vma_move_to_active() 71 #define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) 80 return i915_is_dpt(vma->vm); in i915_vma_is_dpt() 125 return !list_empty(&vma->closed_link); in i915_vma_is_closed() 128 /* Internal use only. */ 131 return vma->node.size - 2 * vma->guard; in __i915_vma_size() [all …]
|
D | i915_gem_evict.c | 2 * Copyright © 2008-2010 Intel Corporation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * Chris Wilson <chris@chris-wilson.co.uuk> 43 return !kref_read(&vma->obj->base.refcount); 52 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { in ggtt_flush() 73 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma() 74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma() 75 i915_gem_object_put(vma->obj); in grab_vma() 80 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma() [all …]
|
/linux-6.8/Documentation/mm/ |
D | vmalloced-kernel-stacks.rst | 1 .. SPDX-License-Identifier: GPL-2.0 12 -------- 19 ------------ 23 it difficult to isolate and root-cause. 25 Virtually-mapped kernel stacks with guard pages causes kernel stack 30 support for virtually mapped stacks with guard pages. This feature 40 -------------------- 45 - vmalloc space must be large enough to hold many kernel stacks. This 46 may rule out many 32-bit architectures. 47 - Stacks in vmalloc space need to work reliably. For example, if [all …]
|
/linux-6.8/Documentation/dev-tools/ |
D | kfence.rst | 1 .. SPDX-License-Identifier: GPL-2.0 4 Kernel Electric-Fence (KFENCE) 7 Kernel Electric-Fence (KFENCE) is a low-overhead sampling-based memory safety 8 error detector. KFENCE detects heap out-of-bounds access, use-after-free, and 9 invalid-free errors. 15 non-production test workloads. One way to quickly achieve a large enough total 19 ----- 26 ``kfence.sample_interval`` to non-zero value), configure the kernel with:: 46 causes CPU wake-ups when the system is completely idle. This may be undesirable 47 on power-constrained systems. The boot parameter ``kfence.deferrable=1`` [all …]
|
/linux-6.8/Documentation/arch/arm64/ |
D | memory.rst | 12 with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit 14 64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB) 24 mappings while the user pgd contains only user (non-global) mappings. 29 AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit):: 31 Start End Size Use 32 ----------------------------------------------------------------------- 39 fffffbfffe000000 fffffbfffe7fffff 8MB [guard region] 41 fffffbffff800000 fffffbffffffffff 8MB [guard region] 43 fffffe0000000000 ffffffffffffffff 2TB [guard region] 46 AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):: [all …]
|
/linux-6.8/arch/x86/kernel/ |
D | irq_64.c | 1 // SPDX-License-Identifier: GPL-2.0 5 * This file contains the lowest level x86_64-specific interrupt 8 * x86_64-specific irq controller code. (e.g. i8259.c and 33 * VMAP the backing store with guard pages 38 struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE]; in map_irq_stack() local 45 pages[i] = pfn_to_page(pa >> PAGE_SHIFT); in map_irq_stack() 48 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack() 50 return -ENOMEM; in map_irq_stack() 53 per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; in map_irq_stack() 58 * If VMAP stacks are disabled due to KASAN, just use the per cpu [all …]
|
D | dumpstack_64.c | 1 // SPDX-License-Identifier: GPL-2.0 46 * On 64-bit, we have a generic entry stack that we in stack_type_name() 47 * use for all the kernel entry points, including in stack_type_name() 54 return exception_stack_names[type - STACK_TYPE_EXCEPTION]; in stack_type_name() 60 * struct estack_pages - Page descriptor for exception stacks 73 PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \ 80 * PAGE_SIZE, all pages covering a particular stack will have the same 81 * info. The guard pages including the not mapped DB2 stack are zeroed 117 k = (stk - begin) >> PAGE_SHIFT; in in_exception_stack() 120 /* Guard page? */ in in_exception_stack() [all …]
|
/linux-6.8/include/linux/ |
D | vmalloc.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 22 #define VM_MAP 0x00000004 /* vmap()ed pages */ 26 #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ 29 #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ 30 #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMA… 43 * Can be overridden by arch-specific value. 46 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 54 struct page **pages; member 128 * Highlevel APIs for driver use 131 extern void *vm_map_ram(struct page **pages, unsigned int count, int node); [all …]
|
D | page-flags.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 25 * - Pages reserved or allocated early during boot (before the page allocator 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) [all …]
|
/linux-6.8/lib/ |
D | Kconfig.kfence | 1 # SPDX-License-Identifier: GPL-2.0-only 7 bool "KFENCE: low-overhead sampling-based memory safety error detector" 12 KFENCE is a low-overhead sampling-based detector of heap out-of-bounds 13 access, use-after-free, and invalid-free errors. KFENCE is designed 17 See <file:Documentation/dev-tools/kfence.rst> for more details. 22 afford to use KASAN, continue using KASAN, for example in test 23 environments. If your kernel targets production use, and cannot 37 setting "kfence.sample_interval" to a non-zero value enables KFENCE. 45 pages are required; with one containing the object and two adjacent 46 ones used as guard pages. [all …]
|
/linux-6.8/drivers/gpu/drm/i915/gem/ |
D | i915_gem_domain.c | 2 * SPDX-License-Identifier: MIT 4 * Copyright © 2014-2016 Intel Corporation 21 #define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */ 25 struct drm_i915_private *i915 = to_i915(obj->base.dev); in gpu_write_needs_clflush() 35 * whether the object is un-cached or write-through. in gpu_write_needs_clflush() 43 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_cpu_write_needs_clflush() 45 if (obj->cache_dirty) in i915_gem_cpu_write_needs_clflush() 51 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) in i915_gem_cpu_write_needs_clflush() 54 /* Currently in use by HW (display engine)? Keep flushed. */ in i915_gem_cpu_write_needs_clflush() 65 if (!(obj->write_domain & flush_domains)) in flush_write_domain() [all …]
|
/linux-6.8/Documentation/arch/x86/ |
D | sgx.rst | 1 .. SPDX-License-Identifier: GPL-2.0 4 Software Guard eXtensions (SGX) 10 Software Guard eXtensions (SGX) hardware enables for user space applications 13 * Privileged (ring-0) ENCLS functions orchestrate the construction of the 15 * Unprivileged (ring-3) ENCLU functions allow an application to enter and 37 SGX utilizes an *Enclave Page Cache (EPC)* to store pages that are associated 38 with an enclave. It is contained in a BIOS-reserved region of physical memory. 39 Unlike pages used for regular memory, pages can only be accessed from outside of 49 ------------------ 56 Regular EPC pages contain the code and data of an enclave. [all …]
|
/linux-6.8/Documentation/devicetree/bindings/reserved-memory/ |
D | qcom,rmtfs-mem.yaml | 1 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 3 --- 4 $id: http://devicetree.org/schemas/reserved-memory/qcom,rmtfs-mem.yaml# 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 15 - Bjorn Andersson <bjorn.andersson@linaro.org> 18 - $ref: reserved-memory.yaml 22 const: qcom,rmtfs-mem 24 qcom,client-id: 27 identifier of the client to use this region for buffers 29 qcom,use-guard-pages: [all …]
|
/linux-6.8/drivers/gpu/drm/i915/gt/ |
D | intel_ggtt.c | 1 // SPDX-License-Identifier: MIT 13 #include <drm/intel-gtt.h> 46 * GTT and any objects within the GTT, i.e. we use the color adjustment in i915_ggtt_color_adjust() 47 * to insert a guard page to prevent prefetches crossing over the in i915_ggtt_color_adjust() 51 if (node->color != color) in i915_ggtt_color_adjust() 52 *end -= I915_GTT_PAGE_SIZE; in i915_ggtt_color_adjust() 57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw() 59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw() 61 ggtt->vm.is_ggtt = true; in ggtt_init_hw() 63 /* Only VLV supports read-only GGTT mappings */ in ggtt_init_hw() [all …]
|
/linux-6.8/arch/hexagon/include/asm/ |
D | mem-layout.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. 50 FIX_KMAP_END, /* check for per-cpuism */ 64 /* Gap between physical ram and vmalloc space for guard purposes. */ 77 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 80 * "permanent kernel mappings", defined as long-lasting mappings of 81 * high-memory page frames into the kernel address space. 85 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 86 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) 94 * Need to check the alignment/shift usage; some archs use [all …]
|
/linux-6.8/mm/ |
D | highmem.c | 1 // SPDX-License-Identifier: GPL-2.0 9 * Redesigned the x86 32-bit VM architecture to deal with 10 * 64-bit physical space. With current x86 CPUs this 47 * since a TLB flush - it is usable. 49 * since the last TLB flush - so we can't use it. 50 * n means that there are (n-1) current users of it. 57 * addresses where physical memory pages are mapped by kmap. 120 unsigned int pages = 0; in __nr_free_highpages() local 124 pages += zone_page_state(zone, NR_FREE_PAGES); in __nr_free_highpages() 127 return pages; in __nr_free_highpages() [all …]
|
/linux-6.8/arch/powerpc/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0 5 def_bool PPC64 && $(cc-option, -mabi=elfv2) 8 def_bool PPC64 && $(cc-option, -mcpu=power10 -mprefixed) 11 # Clang has a bug (https://github.com/llvm/llvm-project/issues/62372) 12 # where pcrel code is not generated if -msoft-float, -mno-altivec, or 13 # -mno-vsx options are also given. Without these options, fp/vec 16 def_bool PPC64 && CC_IS_GCC && $(cc-option, -mcpu=power10 -mpcrel) 35 # On Book3S 64, the default virtual address space for 64-bit processes 38 # between bottom-up and top-down allocations for applications that 41 default 29 if PPC_BOOK3S_64 && PPC_64K_PAGES # 29 = 45 (32T) - 16 (64K) [all …]
|
/linux-6.8/arch/s390/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0 306 depends on $(cc-option,-march=z10) 314 depends on $(cc-option,-march=z196) 323 depends on $(cc-option,-march=zEC12) 332 depends on $(cc-option,-march=z13) 341 depends on $(cc-option,-march=z14) 350 depends on $(cc-option,-march=z15) 359 depends on $(cc-option,-march=z16) 391 Cause the compiler to tune (-mtune) the generated code for a machine. 409 depends on $(cc-option,-mtune=z196) [all …]
|
/linux-6.8/arch/powerpc/include/asm/nohash/32/ |
D | pte-8xx.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 8 * We also use the two level tables, but we can put the real bits in them 14 * accessed, and overload the changed bit for write protect. We use 19 * register when the TLB entry is loaded. We will use bit 27 for guard, since 25 * load the PMD into MD_TWC. The 8M pages are only used for kernel 85 #include <asm/pgtable-masks.h> 139 pte_update(vma->vm_mm, address, ptep, clr, set, huge); in __ptep_set_access_flags() 168 * On the 8xx, the page tables are a bit special. For 16k pages, we have 169 * 4 identical entries. For 512k pages, we have 128 entries as if it was 170 * 4k pages, but they are flagged as 512k pages for the hardware. [all …]
|
/linux-6.8/arch/arm64/kvm/hyp/nvhe/ |
D | mm.c | 1 // SPDX-License-Identifier: GPL-2.0-only 54 return -EINVAL; in __pkvm_alloc_private_va_range() 61 return -ENOMEM; in __pkvm_alloc_private_va_range() 69 * pkvm_alloc_private_va_range - Allocates a private VA range. 169 size = end - start; in hyp_back_vmemmap() 201 return -EINVAL; in pkvm_cpu_set_vector() 235 kvm_pte_t pte, *ptep = slot->ptep; in hyp_fixmap_map() 243 return (void *)slot->addr; in hyp_fixmap_map() 248 kvm_pte_t *ptep = slot->ptep; in fixmap_clear_slot() 249 u64 addr = slot->addr; in fixmap_clear_slot() [all …]
|
/linux-6.8/drivers/hwtracing/coresight/ |
D | ultrasoc-smb.c | 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 16 #include "coresight-etm-perf.h" 17 #include "coresight-priv.h" 18 #include "ultrasoc-smb.h" 22 #define ULTRASOC_SMB_DSM_UUID "82ae1283-7f6a-4cbe-aa06-53e8fb24db18" 26 u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG); in smb_buffer_not_empty() 33 struct smb_data_buffer *sdb = &drvdata->sdb; in smb_update_data_size() 36 buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) - in smb_update_data_size() 37 sdb->buf_hw_base; in smb_update_data_size() 40 if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) { in smb_update_data_size() [all …]
|