Home
last modified time | relevance | path

Searched refs:regions (Results 1 – 25 of 407) sorted by relevance

12345678910>>...17

/linux/arch/powerpc/mm/nohash/
H A Dkaslr_booke.c23 struct regions { struct
38 struct regions __initdata regions; variable
113 if (regions.reserved_mem < 0) in overlaps_reserved_region()
117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region()
125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region()
126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region()
128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region()
131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region()
132 len -= 4 * regions in overlaps_reserved_region()
[all...]
/linux/drivers/mtd/chips/
H A Djedec_probe.c275 const uint32_t regions[6]; member
307 .regions = {
319 .regions = {
334 .regions = {
349 .regions = {
364 .regions = {
379 .regions = {
395 .regions = {
412 .regions = {
429 .regions
[all...]
/linux/mm/damon/tests/
H A Dvaddr-kunit.h44 * discontiguous regions which cover every mapped areas. However, the three
45 * regions should not include the two biggest unmapped areas in the original
47 * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
54 * three regions and returns. For more detail, refer to the comment of
60 * mapped. To cover every mappings, the three regions should start with 10,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
69 struct damon_addr_range regions[3] = {0}; in damon_test_three_regions_in_vmas() local
84 __damon_va_three_regions(&mm, regions); in damon_test_three_regions_in_vmas()
86 KUNIT_EXPECT_EQ(test, 10ul, regions[ in damon_test_three_regions_in_vmas()
130 damon_do_test_apply_three_regions(struct kunit * test,unsigned long * regions,int nr_regions,struct damon_addr_range * three_regions,unsigned long * expected,int nr_expected) damon_do_test_apply_three_regions() argument
164 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions1() local
186 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions2() local
210 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions3() local
235 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions4() local
[all...]
/linux/drivers/mtd/devices/
H A Dmtd_intel_dg.c40 } regions[] __counted_by(nregions); member
163 * [3:0] regions 12-15 read state
164 * [7:4] regions 12-15 write state
165 * [19:8] regions 0-11 read state
166 * [31:20] regions 0-11 write state
205 if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && in idg_nvm_get_region()
206 nvm->regions[i].offset <= from && in idg_nvm_get_region()
207 nvm->regions[i].size != 0) in idg_nvm_get_region()
438 u8 id = nvm->regions[ in intel_dg_nvm_init()
[all...]
/linux/drivers/gpu/drm/nouveau/nvkm/nvfw/
H A Dacr.c130 hdr->regions.no_regions); in flcn_acr_desc_dump()
132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump()
135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump()
137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump()
139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump()
141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump()
143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump()
145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump()
173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump()
175 for (i = 0; i < ARRAY_SIZE(hdr->regions in flcn_acr_desc_v1_dump()
[all...]
/linux/drivers/vfio/platform/
H A Dvfio_platform_common.c144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init()
146 if (!vdev->regions) in vfio_platform_regions_init()
153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init()
154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init()
155 vdev->regions[i].flags = 0; in vfio_platform_regions_init()
159 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init()
160 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init()
162 vdev->regions[i].flags |= in vfio_platform_regions_init()
166 * Only regions addressed with PAGE granularity may be in vfio_platform_regions_init()
169 if (!(vdev->regions[ in vfio_platform_regions_init()
[all...]
/linux/tools/testing/memblock/tests/
H A Dalloc_exact_nid_api.c30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check()
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check()
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check()
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
258 struct memblock_region *new_rgn = &memblock.reserved.regions[ in alloc_exact_nid_top_down_numa_no_overlap_low_check()
[all...]
H A Dalloc_nid_api.c66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check()
118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check()
169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check()
221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check()
307 * Expect a merge of both regions. Only the region size gets updated.
311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check()
359 * Expect a merge of regions. Only the region size gets updated.
363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check()
399 * there are two reserved regions at the borders, with a gap big enough to fit
416 struct memblock_region *rgn1 = &memblock.reserved.regions[ in alloc_nid_top_down_reserved_with_space_check()
[all...]
H A Dbasic_api.c17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check()
22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check()
37 * and size to the collection of available memory regions (memblock.memory).
45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check()
70 * NUMA node and memory flags to the collection of available memory regions.
78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check()
114 * available memory regions (memblock.memory). The total size and
121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check()
122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check()
167 * and has size of two regions minu
[all...]
H A Dalloc_api.c26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check()
73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check()
74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check()
121 * Expect a merge of both regions. Only the region size gets updated.
125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check()
163 * Expect a merge of both regions. Both the base address and size of the region
168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check()
204 * A test that tries to allocate memory when there are two reserved regions with
217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check()
254 * A test that tries to allocate memory when there are two reserved regions wit
[all...]
H A Dalloc_helpers_api.c20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check()
63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check()
110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check()
149 * regions get merged into one.
153 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check()
186 * Expect successful allocation and merge of both regions.
190 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_min_addr_cap_check()
236 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_high_addr_check()
278 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_no_space_above_check()
314 struct memblock_region *rgn = &memblock.reserved.regions[ in alloc_from_bottom_up_min_addr_cap_check()
[all...]
/linux/drivers/vfio/cdx/
H A Dmain.c19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), in vfio_cdx_open_device()
21 if (!vdev->regions) in vfio_cdx_open_device()
27 vdev->regions[i].addr = res->start; in vfio_cdx_open_device()
28 vdev->regions[i].size = resource_size(res); in vfio_cdx_open_device()
29 vdev->regions[i].type = res->flags; in vfio_cdx_open_device()
31 * Only regions addressed with PAGE granularity may be in vfio_cdx_open_device()
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_cdx_open_device()
35 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_cdx_open_device()
36 vdev->regions[i].flags |= in vfio_cdx_open_device()
38 vdev->regions[ in vfio_cdx_open_device()
[all...]
/linux/tools/testing/selftests/damon/
H A Daccess_memory_even.c5 * Receives number of regions and size of each region from user. Allocate the
6 * regions and repeatedly access even numbered (starting from zero) regions.
16 char **regions; in main() local
29 regions = malloc(sizeof(*regions) * nr_regions); in main()
31 regions[i] = malloc(sz_region); in main()
36 memset(regions[i], i, sz_region); in main()
/linux/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device()
32 if (!vdev->regions) in vfio_fsl_mc_open_device()
36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device()
39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device()
40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device()
41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device()
43 * Only regions addressed with PAGE granularity may be in vfio_fsl_mc_open_device()
46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device()
47 !(vdev->regions[ in vfio_fsl_mc_open_device()
[all...]
/linux/drivers/virt/nitro_enclaves/
H A Dne_misc_dev_test.c23 * regions = {}
34 * regions = {}
45 * regions = {
58 * regions = {
72 * regions = {
87 * regions = {
102 * regions = {
117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions()
118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions()
120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
[all...]
/linux/drivers/md/
H A Ddm-bio-prison-v1.c29 struct prison_region regions[] __counted_by(num_locks);
47 prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL); in dm_bio_prison_create()
53 spin_lock_init(&prison->regions[i].lock); in dm_bio_prison_create()
54 prison->regions[i].cell = RB_ROOT; in dm_bio_prison_create()
184 spin_lock_irq(&prison->regions[l].lock); in bio_detain()
185 r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result); in bio_detain()
186 spin_unlock_irq(&prison->regions[l].lock); in bio_detain()
223 spin_lock_irq(&prison->regions[l].lock); in dm_cell_release()
224 __cell_release(&prison->regions[l].cell, cell, bios); in dm_cell_release()
225 spin_unlock_irq(&prison->regions[ in dm_cell_release()
[all...]
/linux/drivers/net/dsa/sja1105/
H A Dsja1105_devlink.c7 /* Since devlink regions have a fixed size and the static config has a variable
85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions()
87 if (!priv->regions) in sja1105_setup_devlink_regions()
97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions()
99 kfree(priv->regions); in sja1105_setup_devlink_regions()
103 priv->regions[i] = region; in sja1105_setup_devlink_regions()
115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions()
117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
/linux/mm/
H A Dmemblock.c45 * Memblock is a method of managing memory regions during the early
50 * regions. There are several types of these collections:
56 * * ``reserved`` - describes the regions that were allocated
64 * which contains an array of memory regions along with
72 * arrays during addition of new regions. This feature should be used
129 .memory.regions = memblock_memory_init_regions,
133 .reserved.regions = memblock_reserved_init_regions,
143 .regions = memblock_physmem_init_regions,
158 for (i = 0, rgn = &memblock_type->regions[0]; \
160 i++, rgn = &memblock_type->regions[
[all...]
/linux/Documentation/admin-guide/device-mapper/
H A Ddm-clone.rst58 3. A small metadata device - it records which regions are already valid in the
59 destination device, i.e., which regions have already been hydrated, or have
68 dm-clone divides the source and destination devices in fixed sized regions.
77 Reads and writes from/to hydrated regions are serviced from the destination
93 as a hint to skip hydration of the regions covered by the request, i.e., it
111 A message `hydration_threshold <#regions>` can be used to set the maximum number
112 of regions being copied, the default being 1 region.
116 region size. A message `hydration_batch_size <#regions>` can be used to tune the
118 dm-clone trying to batch together contiguous regions, so we copy the data in
119 batches of this many regions
[all...]
/linux/drivers/soc/qcom/
H A Dsmem.c43 * the partition and holds properties for the two internal memory regions. The
44 * two regions are cached and non-cached memory respectively. Each region
273 * @num_regions: number of @regions
274 * @regions: list of the memory regions defining the shared memory
288 struct smem_region regions[] __counted_by(num_regions);
468 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
557 header = smem->regions[0].virt_base; in qcom_smem_get_global()
565 region = &smem->regions[i]; in qcom_smem_get_global()
746 header = __smem->regions[ in qcom_smem_get_free_space()
[all...]
/linux/Documentation/core-api/kho/
H A Dconcepts.rst9 regions, which could contain serialized system states, across kexec.
17 that describes preserved memory regions. These regions contain either
20 memory regions from KHO FDT.
38 We guarantee that we always have such regions through the scratch regions: On
39 first boot KHO allocates several physically contiguous memory regions. Since
40 after kexec these regions will be used by early memory allocations, there is a
45 used to explicitly define size of the scratch regions.
46 The scratch regions ar
[all...]
/linux/drivers/perf/
H A Dmarvell_cn10k_tad_pmu.c32 struct tad_region *regions; member
62 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read()
80 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop()
101 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
109 writeq_relaxed(reg_val, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
293 struct tad_region *regions; in tad_pmu_probe() local
342 regions = devm_kcalloc(&pdev->dev, tad_cnt, in tad_pmu_probe()
343 sizeof(*regions), GFP_KERNEL); in tad_pmu_probe()
344 if (!regions) in tad_pmu_probe()
347 /* ioremap the distributed TAD pmu regions */ in tad_pmu_probe()
[all...]
/linux/Documentation/mm/damon/
H A Dmonitoring_intervals_tuning_example.rst35 Then, list the DAMON-found regions of different access patterns, sorted by the
58 The list shows not seemingly hot regions, and only minimum access pattern
61 nearly identical. We can suspect this is because “adaptive regions adjustment”
63 hotness of regions using ``age`` as the recency information. That would be
68 The temperature ranges to total size of regions of each range histogram
87 regions detection. According to the :ref:`guide
132 DAMON found two distinct 4 KiB regions that pretty hot. The regions are also
137 Especially, the finding of the 4 KiB regions among the 62 GiB total memory
138 shows DAMON’s adaptive regions adjustmen
[all...]
/linux/Documentation/admin-guide/mm/damon/
H A Dlru_sort.rst31 DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access
33 memory regions that showing no access for a time that longer than a
85 Access frequency threshold for hot memory regions identification in permil.
94 Time threshold for cold memory regions identification in microseconds.
179 Minimum number of monitoring regions.
181 The minimal number of monitoring regions of DAMON for the cold memory
190 Maximum number of monitoring regions.
192 The maximum number of monitoring regions of DAMON for the cold memory
225 Number of hot memory regions that tried to be LRU-sorted.
230 Total bytes of hot memory regions tha
[all...]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/acr/
H A Dgp102.c203 desc->regions.no_regions = 2; in gp102_acr_load_setup()
204 desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; in gp102_acr_load_setup()
205 desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; in gp102_acr_load_setup()
206 desc->regions.region_props[0].region_id = 1; in gp102_acr_load_setup()
207 desc->regions.region_props[0].read_mask = 0xf; in gp102_acr_load_setup()
208 desc->regions.region_props[0].write_mask = 0xc; in gp102_acr_load_setup()
209 desc->regions.region_props[0].client_mask = 0x2; in gp102_acr_load_setup()
210 desc->regions.region_props[0].shadow_mem_start_addr = acr->shadow_start >> 8; in gp102_acr_load_setup()

12345678910>>...17