| /linux/kernel/ |
| H A D | range.c | 12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument 21 range[nr_range].start = start; in add_range() 22 range[nr_range].end = end; in add_range() 29 int add_range_with_merge(struct range *range, int az, int nr_range, in add_range_with_merge() argument 41 if (!range[i].end) in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 45 common_end = min(range[i].end, end); in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() 51 end = max(range[i].end, end); in add_range_with_merge() 53 memmove(&range[i], &range[i + 1], in add_range_with_merge() [all …]
|
| /linux/drivers/soc/ti/ |
| H A D | knav_qmss_acc.c | 20 #define knav_range_offset_to_inst(kdev, range, q) \ argument 21 (range->queue_base_inst + (q << kdev->inst_shift)) 23 static void __knav_acc_notify(struct knav_range_info *range, in __knav_acc_notify() argument 26 struct knav_device *kdev = range->kdev; in __knav_acc_notify() 30 range_base = kdev->base_id + range->queue_base; in __knav_acc_notify() 32 if (range->flags & RANGE_MULTI_QUEUE) { in __knav_acc_notify() 33 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify() 34 inst = knav_range_offset_to_inst(kdev, range, in __knav_acc_notify() 44 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify() 45 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify() [all …]
|
| H A D | knav_qmss_queue.c | 114 static int knav_queue_setup_irq(struct knav_range_info *range, in knav_queue_setup_irq() argument 117 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq() 120 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_setup_irq() 121 irq = range->irqs[queue].irq; in knav_queue_setup_irq() 126 if (range->irqs[queue].cpu_mask) { in knav_queue_setup_irq() 127 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); in knav_queue_setup_irq() 129 dev_warn(range->kdev->dev, in knav_queue_setup_irq() 140 struct knav_range_info *range = inst->range; in knav_queue_free_irq() local 141 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq() 144 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_free_irq() [all …]
|
| /linux/arch/s390/include/asm/ |
| H A D | physmem_info.h | 138 #define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \ argument 139 for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \ 140 range && range->end; range = range->chain ? __va(range->chain) : NULL, \ 141 *p_start = range ? range->start : 0, *p_end = range ? range->end : 0) 144 struct reserved_range *range) in __physmem_reserved_next() argument 146 if (!range) { in __physmem_reserved_next() 147 range = &physmem_info.reserved[*t]; in __physmem_reserved_next() 148 if (range->end) in __physmem_reserved_next() 149 return range; in __physmem_reserved_next() 151 if (range->chain) in __physmem_reserved_next() [all …]
|
| /linux/security/selinux/ss/ |
| H A D | context.h | 33 struct mls_range range; member 39 memset(&c->range, 0, sizeof(c->range)); in mls_context_init() 47 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy() 48 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy() 52 dst->range.level[1].sens = src->range.level[1].sens; in mls_context_cpy() 53 rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); in mls_context_cpy() 55 ebitmap_destroy(&dst->range.level[0].cat); in mls_context_cpy() 68 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy_low() 69 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy_low() 73 dst->range.level[1].sens = src->range.level[0].sens; in mls_context_cpy_low() [all …]
|
| H A D | mls.c | 44 u32 index_sens = context->range.level[l].sens; in mls_compute_context_len() 50 e = &context->range.level[l].cat; in mls_compute_context_len() 70 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len() 71 &context->range.level[1])) in mls_compute_context_len() 104 context->range.level[l].sens - 1)); in mls_sid_to_context() 110 e = &context->range.level[l].cat; in mls_sid_to_context() 147 if (mls_level_eq(&context->range.level[0], in mls_sid_to_context() 148 &context->range.level[1])) in mls_sid_to_context() 196 if (!mls_range_isvalid(p, &c->range)) in mls_context_isvalid() 208 if (!mls_range_contains(usrdatum->range, c->range)) in mls_context_isvalid() [all …]
|
| /linux/lib/ |
| H A D | logic_pio.c | 35 struct logic_pio_hwaddr *range; in logic_pio_register_range() local 50 list_for_each_entry(range, &io_range_list, list) { in logic_pio_register_range() 51 if (range->fwnode == new_range->fwnode) { in logic_pio_register_range() 56 if (range->flags == LOGIC_PIO_CPU_MMIO && in logic_pio_register_range() 59 if (start >= range->hw_start + range->size || in logic_pio_register_range() 60 end < range->hw_start) { in logic_pio_register_range() 61 mmio_end = range->io_start + range->size; in logic_pio_register_range() 66 } else if (range->flags == LOGIC_PIO_INDIRECT && in logic_pio_register_range() 68 iio_sz += range->size; in logic_pio_register_range() 109 void logic_pio_unregister_range(struct logic_pio_hwaddr *range) in logic_pio_unregister_range() argument [all …]
|
| /linux/mm/ |
| H A D | memremap.c | 41 static void pgmap_array_delete(struct range *range) in pgmap_array_delete() argument 43 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete() 50 struct range *range = &pgmap->ranges[range_id]; in pfn_first() local 51 unsigned long pfn = PHYS_PFN(range->start); in pfn_first() 63 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() local 65 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid() 66 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid() 75 const struct range *range = &pgmap->ranges[range_id]; in pfn_end() local 77 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end() 88 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range() local [all …]
|
| H A D | hmm.c | 34 struct hmm_range *range; member 51 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument 53 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() 56 range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; in hmm_pfns_fill() 57 range->hmm_pfns[i] |= cpu_flags; in hmm_pfns_fill() 100 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local 112 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault() 113 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault() 135 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local 144 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault() [all …]
|
| H A D | madvise.c | 77 struct madvise_behavior_range range; member 156 struct madvise_behavior_range *range = &madv_behavior->range; in madvise_update_vma() local 159 VMA_ITERATOR(vmi, madv_behavior->mm, range->start); in madvise_update_vma() 167 range->start, range->end, anon_name); in madvise_update_vma() 170 range->start, range->end, new_flags); in madvise_update_vma() 288 unsigned long start = madv_behavior->range.start; in madvise_willneed() 289 unsigned long end = madv_behavior->range.end; in madvise_willneed() 581 struct madvise_behavior_range *range = &madv_behavior->range; in madvise_cold_page_range() local 588 walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops, in madvise_cold_page_range() 616 struct madvise_behavior_range *range) in madvise_pageout_page_range() argument [all …]
|
| /linux/include/linux/ |
| H A D | range.h | 6 struct range { struct 11 static inline u64 range_len(const struct range *range) in range_len() argument 13 return range->end - range->start + 1; in range_len() 17 static inline bool range_contains(const struct range *r1, in range_contains() 18 const struct range *r2) in range_contains() 24 static inline bool range_overlaps(const struct range *r1, in range_overlaps() 25 const struct range *r2) in range_overlaps() 30 int add_range(struct range *range, int az, int nr_range, 34 int add_range_with_merge(struct range *range, int az, int nr_range, 37 void subtract_range(struct range *range, int az, u64 start, u64 end); [all …]
|
| H A D | mmu_notifier.h | 176 const struct mmu_notifier_range *range); 178 const struct mmu_notifier_range *range); 244 const struct mmu_notifier_range *range, 391 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range); 394 mmu_notifier_range_blockable(const struct mmu_notifier_range *range) in mmu_notifier_range_blockable() argument 396 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE); in mmu_notifier_range_blockable() 432 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) in mmu_notifier_invalidate_range_start() argument 437 if (mm_has_notifiers(range->mm)) { in mmu_notifier_invalidate_range_start() 438 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; in mmu_notifier_invalidate_range_start() 439 __mmu_notifier_invalidate_range_start(range); in mmu_notifier_invalidate_range_start() [all …]
|
| /linux/drivers/dax/ |
| H A D | kmem.c | 31 static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r) in dax_kmem_range() 34 struct range *range = &dax_range->range; in dax_kmem_range() local 37 r->start = ALIGN(range->start, memory_block_size_bytes()); in dax_kmem_range() 38 r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1; in dax_kmem_range() 40 r->start = range->start; in dax_kmem_range() 41 r->end = range->end; in dax_kmem_range() 98 struct range range; in dev_dax_kmem_probe() local 100 orig_len += range_len(&dev_dax->ranges[i].range); in dev_dax_kmem_probe() 101 rc = dax_kmem_range(dev_dax, i, &range); in dev_dax_kmem_probe() 104 i, range.start, range.end); in dev_dax_kmem_probe() [all …]
|
| /linux/drivers/android/binder/range_alloc/ |
| H A D | array.rs | 65 for range in &self.ranges { in debug_print() 70 range.offset, in debug_print() 71 range.size, in debug_print() 72 range.state.pid(), in debug_print() 73 range.state.is_oneway(), in debug_print() 75 if let DescriptorState::Reserved(_) = range.state { in debug_print() 101 for (i, range) in self.ranges.iter().enumerate() { in find_empty_range() 103 if size <= range.offset - end_of_prev { in find_empty_range() 109 end_of_prev = range.endpoint(); in find_empty_range() 157 .position(|range| range.offset == offset) in reservation_abort() [all …]
|
| /linux/arch/mips/loongson64/ |
| H A D | init.c | 156 struct logic_pio_hwaddr *range; in add_legacy_isa_io() local 159 range = kzalloc(sizeof(*range), GFP_ATOMIC); in add_legacy_isa_io() 160 if (!range) in add_legacy_isa_io() 163 range->fwnode = fwnode; in add_legacy_isa_io() 164 range->size = size = round_up(size, PAGE_SIZE); in add_legacy_isa_io() 165 range->hw_start = hw_start; in add_legacy_isa_io() 166 range->flags = LOGIC_PIO_CPU_MMIO; in add_legacy_isa_io() 168 ret = logic_pio_register_range(range); in add_legacy_isa_io() 170 kfree(range); in add_legacy_isa_io() 175 if (range->io_start != 0) { in add_legacy_isa_io() [all …]
|
| /linux/tools/arch/x86/kcpuid/ |
| H A D | kcpuid.c | 94 static char *range_to_str(struct cpuid_range *range) in range_to_str() argument 96 switch (range->index) { in range_to_str() 105 #define __for_each_cpuid_range(range, __condition) \ argument 107 i < ARRAY_SIZE(ranges) && ((range) = &ranges[i]) && (__condition); \ 110 #define for_each_valid_cpuid_range(range) __for_each_cpuid_range(range, (range)->nr != 0) argument 111 #define for_each_cpuid_range(range) __for_each_cpuid_range(range, true) argument 117 struct cpuid_range *range; in index_to_cpuid_range() local 119 for_each_valid_cpuid_range(range) { in index_to_cpuid_range() 120 if (range->index == range_idx && (u32)range->nr > func_idx) in index_to_cpuid_range() 121 return range; in index_to_cpuid_range() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_svm.c | 22 static bool xe_svm_range_in_vram(struct xe_svm_range *range) in xe_svm_range_in_vram() argument 31 .__flags = READ_ONCE(range->base.pages.flags.__flags), in xe_svm_range_in_vram() 37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) in xe_svm_range_has_vram_binding() argument 40 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding() 65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) in xe_svm_range_debug() argument 67 range_debug(range, operation); in xe_svm_range_debug() 73 struct xe_svm_range *range; in xe_svm_range_alloc() local 75 range = kzalloc(sizeof(*range), GFP_KERNEL); in xe_svm_range_alloc() 76 if (!range) in xe_svm_range_alloc() 79 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc() [all …]
|
| H A D | xe_svm.h | 64 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) in xe_svm_range_pages_valid() argument 66 return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base); in xe_svm_range_pages_valid() 85 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); 87 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, 93 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, 96 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, 99 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range); 102 struct xe_svm_range *range, 119 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) in xe_svm_range_has_dma_mapping() argument 121 lockdep_assert_held(&range->base.gpusvm->notifier_lock); in xe_svm_range_has_dma_mapping() [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_gpusvm.c | 489 struct drm_gpusvm_range *range, *__next; in drm_gpusvm_fini() local 497 drm_gpusvm_for_each_range_safe(range, __next, notifier, 0, in drm_gpusvm_fini() 499 drm_gpusvm_range_remove(gpusvm, range); in drm_gpusvm_fini() 577 struct drm_gpusvm_range *range) in drm_gpusvm_range_insert() argument 583 interval_tree_insert(&range->itree, ¬ifier->root); in drm_gpusvm_range_insert() 585 node = rb_prev(&range->itree.rb); in drm_gpusvm_range_insert() 591 list_add(&range->entry, head); in drm_gpusvm_range_insert() 603 struct drm_gpusvm_range *range) in __drm_gpusvm_range_remove() argument 605 interval_tree_remove(&range->itree, ¬ifier->root); in __drm_gpusvm_range_remove() 606 list_del(&range->entry); in __drm_gpusvm_range_remove() [all …]
|
| /linux/drivers/of/ |
| H A D | address.c | 33 u64 (*map)(__be32 *addr, const __be32 *range, 53 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, in of_bus_default_map() argument 58 cp = of_read_number(range + fna, na - fna); in of_bus_default_map() 59 s = of_read_number(range + na + pna, ns); in of_bus_default_map() 91 static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na, in of_bus_default_flags_map() argument 95 if (*addr != *range) in of_bus_default_flags_map() 98 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_default_flags_map() 171 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, in of_bus_pci_map() argument 177 rf = of_bus_pci_get_flags(range); in of_bus_pci_map() 183 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_pci_map() [all …]
|
| /linux/drivers/pci/hotplug/ |
| H A D | ibmphp_res.c | 368 static int add_bus_range(int type, struct range_node *range, struct bus_node *bus_cur) in add_bus_range() argument 392 if (range->start < range_cur->start) in add_bus_range() 402 bus_cur->rangeMem = range; in add_bus_range() 405 bus_cur->rangePFMem = range; in add_bus_range() 408 bus_cur->rangeIO = range; in add_bus_range() 411 range->next = range_cur; in add_bus_range() 412 range->rangeno = 1; in add_bus_range() 416 range->next = NULL; in add_bus_range() 417 range_prev->next = range; in add_bus_range() 418 range->rangeno = range_prev->rangeno + 1; in add_bus_range() [all …]
|
| /linux/mm/damon/ |
| H A D | sysfs-common.c | 22 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range), in damon_sysfs_ul_range_alloc() local 25 if (!range) in damon_sysfs_ul_range_alloc() 27 range->kobj = (struct kobject){}; in damon_sysfs_ul_range_alloc() 28 range->min = min; in damon_sysfs_ul_range_alloc() 29 range->max = max; in damon_sysfs_ul_range_alloc() 31 return range; in damon_sysfs_ul_range_alloc() 37 struct damon_sysfs_ul_range *range = container_of(kobj, in min_show() local 40 return sysfs_emit(buf, "%lu\n", range->min); in min_show() 46 struct damon_sysfs_ul_range *range = container_of(kobj, in min_store() local 55 range->min = min; in min_store() [all …]
|
| /linux/drivers/gpu/drm/sprd/ |
| H A D | megacores_pll.c | 221 u32 range[2], constant; in dphy_timing_config() local 236 range[L] = 50 * scale; in dphy_timing_config() 237 range[H] = INFINITY; in dphy_timing_config() 238 val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2; in dphy_timing_config() 243 range[L] = 38 * scale; in dphy_timing_config() 244 range[H] = 95 * scale; in dphy_timing_config() 245 tmp = AVERAGE(range[L], range[H]); in dphy_timing_config() 246 val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; in dphy_timing_config() 247 range[L] = 40 * scale + 4 * t_ui; in dphy_timing_config() 248 range[H] = 85 * scale + 6 * t_ui; in dphy_timing_config() [all …]
|
| /linux/net/netfilter/ |
| H A D | nf_nat_core.c | 404 const struct nf_nat_range2 *range) in nf_nat_inet_in_range() argument 407 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && in nf_nat_inet_in_range() 408 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); in nf_nat_inet_in_range() 410 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && in nf_nat_inet_in_range() 411 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; in nf_nat_inet_in_range() 448 const struct nf_nat_range2 *range) in nf_in_range() argument 453 if (range->flags & NF_NAT_RANGE_MAP_IPS && in nf_in_range() 454 !nf_nat_inet_in_range(tuple, range)) in nf_in_range() 457 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) in nf_in_range() 461 &range->min_proto, &range->max_proto); in nf_in_range() [all …]
|
| /linux/tools/testing/selftests/net/ |
| H A D | ip_local_port_range.c | 28 static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi) in unpack_port_range() argument 30 *lo = range & 0xffff; in unpack_port_range() 31 *hi = range >> 16; in unpack_port_range() 104 static int get_ip_local_port_range(int fd, __u32 *range) in get_ip_local_port_range() argument 115 *range = val; in get_ip_local_port_range() 238 __u32 range; in TEST_F() local 243 range = pack_port_range(t->range_lo, t->range_hi); in TEST_F() 244 err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); in TEST_F() 281 __u32 range; in TEST_F() local 289 range = pack_port_range(t->range_lo, t->range_hi); in TEST_F() [all …]
|