Lines Matching full:entry
168 static inline void dump_entry_trace(struct dma_debug_entry *entry) in dump_entry_trace() argument
171 if (entry) { in dump_entry_trace()
173 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace()
219 #define err_printk(dev, entry, format, arg...) do { \ argument
226 dump_entry_trace(entry); \
238 static int hash_fn(struct dma_debug_entry *entry) in hash_fn() argument
244 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn()
250 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, in get_hash_bucket() argument
254 int idx = hash_fn(entry); in get_hash_bucket()
292 * Search a given entry in the hash bucket list
298 struct dma_debug_entry *entry, *ret = NULL; in __hash_bucket_find() local
301 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
302 if (!match(ref, entry)) in __hash_bucket_find()
311 * best-fit algorithm here which returns the entry from in __hash_bucket_find()
317 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find()
318 entry->type == ref->type ? ++match_lvl : 0; in __hash_bucket_find()
319 entry->direction == ref->direction ? ++match_lvl : 0; in __hash_bucket_find()
320 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; in __hash_bucket_find()
324 return entry; in __hash_bucket_find()
327 * We found an entry that fits better then the in __hash_bucket_find()
331 ret = entry; in __hash_bucket_find()
355 struct dma_debug_entry *entry, index = *ref; in bucket_find_contain() local
359 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain()
361 if (entry) in bucket_find_contain()
362 return entry; in bucket_find_contain()
376 * Add an entry to a hash bucket
379 struct dma_debug_entry *entry) in hash_bucket_add() argument
381 list_add_tail(&entry->list, &bucket->list); in hash_bucket_add()
385 * Remove entry from a hash bucket list
387 static void hash_bucket_del(struct dma_debug_entry *entry) in hash_bucket_del() argument
389 list_del(&entry->list); in hash_bucket_del()
392 static unsigned long long phys_addr(struct dma_debug_entry *entry) in phys_addr() argument
394 if (entry->type == dma_debug_resource) in phys_addr()
395 return __pfn_to_phys(entry->pfn) + entry->offset; in phys_addr()
397 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; in phys_addr()
405 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
406 * the entry already exists at insertion time add a tag as a reference
416 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
426 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) in to_cacheline_number() argument
428 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + in to_cacheline_number()
429 (entry->offset >> L1_CACHE_SHIFT); in to_cacheline_number()
479 static int active_cacheline_insert(struct dma_debug_entry *entry) in active_cacheline_insert() argument
481 phys_addr_t cln = to_cacheline_number(entry); in active_cacheline_insert()
489 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_insert()
493 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); in active_cacheline_insert()
501 static void active_cacheline_remove(struct dma_debug_entry *entry) in active_cacheline_remove() argument
503 phys_addr_t cln = to_cacheline_number(entry); in active_cacheline_remove()
507 if (entry->direction == DMA_TO_DEVICE) in active_cacheline_remove()
530 struct dma_debug_entry *entry; in debug_dma_dump_mappings() local
534 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_dump_mappings()
535 if (!dev || dev == entry->dev) { in debug_dma_dump_mappings()
536 cln = to_cacheline_number(entry); in debug_dma_dump_mappings()
537 dev_info(entry->dev, in debug_dma_dump_mappings()
539 type2name[entry->type], idx, in debug_dma_dump_mappings()
540 phys_addr(entry), entry->pfn, in debug_dma_dump_mappings()
541 entry->dev_addr, entry->size, in debug_dma_dump_mappings()
542 &cln, dir2name[entry->direction], in debug_dma_dump_mappings()
543 maperr2str[entry->map_err_type]); in debug_dma_dump_mappings()
562 struct dma_debug_entry *entry; in dump_show() local
566 list_for_each_entry(entry, &bucket->list, list) { in dump_show()
567 cln = to_cacheline_number(entry); in dump_show()
570 dev_driver_string(entry->dev), in dump_show()
571 dev_name(entry->dev), in dump_show()
572 type2name[entry->type], idx, in dump_show()
573 phys_addr(entry), entry->pfn, in dump_show()
574 entry->dev_addr, entry->size, in dump_show()
575 &cln, dir2name[entry->direction], in dump_show()
576 maperr2str[entry->map_err_type]); in dump_show()
585 * Wrapper function for adding an entry to the hash.
588 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) in add_dma_entry() argument
594 bucket = get_hash_bucket(entry, &flags); in add_dma_entry()
595 hash_bucket_add(bucket, entry); in add_dma_entry()
598 rc = active_cacheline_insert(entry); in add_dma_entry()
603 err_printk(entry->dev, entry, in add_dma_entry()
610 struct dma_debug_entry *entry; in dma_debug_create_entries() local
613 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
614 if (!entry) in dma_debug_create_entries()
618 list_add_tail(&entry[i].list, &free_entries); in dma_debug_create_entries()
628 struct dma_debug_entry *entry; in __dma_entry_alloc() local
630 entry = list_entry(free_entries.next, struct dma_debug_entry, list); in __dma_entry_alloc()
631 list_del(&entry->list); in __dma_entry_alloc()
632 memset(entry, 0, sizeof(*entry)); in __dma_entry_alloc()
638 return entry; in __dma_entry_alloc()
665 struct dma_debug_entry *entry; in dma_entry_alloc() local
681 entry = __dma_entry_alloc(); in dma_entry_alloc()
689 entry->stack_len = stack_trace_save(entry->stack_entries, in dma_entry_alloc()
690 ARRAY_SIZE(entry->stack_entries), in dma_entry_alloc()
693 return entry; in dma_entry_alloc()
696 static void dma_entry_free(struct dma_debug_entry *entry) in dma_entry_free() argument
700 active_cacheline_remove(entry); in dma_entry_free()
707 list_add(&entry->list, &free_entries); in dma_entry_free()
832 struct dma_debug_entry *entry; in device_dma_allocations() local
838 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { in device_dma_allocations()
839 if (entry->dev == dev) { in device_dma_allocations()
841 *out_entry = entry; in device_dma_allocations()
853 struct dma_debug_entry *entry; in dma_debug_device_change() local
861 count = device_dma_allocations(dev, &entry); in dma_debug_device_change()
864 err_printk(dev, entry, "device driver has pending " in dma_debug_device_change()
870 count, entry->dev_addr, entry->size, in dma_debug_device_change()
871 dir2name[entry->direction], type2name[entry->type]); in dma_debug_device_change()
963 struct dma_debug_entry *entry; in check_unmap() local
968 entry = bucket_find_exact(bucket, ref); in check_unmap()
970 if (!entry) { in check_unmap()
988 if (ref->size != entry->size) { in check_unmap()
989 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
993 ref->dev_addr, entry->size, ref->size); in check_unmap()
996 if (ref->type != entry->type) { in check_unmap()
997 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1002 type2name[entry->type], type2name[ref->type]); in check_unmap()
1003 } else if ((entry->type == dma_debug_coherent) && in check_unmap()
1004 (phys_addr(ref) != phys_addr(entry))) { in check_unmap()
1005 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1011 phys_addr(entry), in check_unmap()
1016 ref->sg_call_ents != entry->sg_call_ents) { in check_unmap()
1017 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1018 "DMA sg list with different entry count " in check_unmap()
1020 entry->sg_call_ents, ref->sg_call_ents); in check_unmap()
1027 if (ref->direction != entry->direction) { in check_unmap()
1028 err_printk(ref->dev, entry, "device driver frees " in check_unmap()
1033 dir2name[entry->direction], in check_unmap()
1042 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in check_unmap()
1043 err_printk(ref->dev, entry, in check_unmap()
1048 type2name[entry->type]); in check_unmap()
1051 hash_bucket_del(entry); in check_unmap()
1052 dma_entry_free(entry); in check_unmap()
1096 struct dma_debug_entry *entry; in check_sync() local
1102 entry = bucket_find_contain(&bucket, ref, &flags); in check_sync()
1104 if (!entry) { in check_sync()
1112 if (ref->size > entry->size) { in check_sync()
1113 err_printk(dev, entry, "device driver syncs" in check_sync()
1118 entry->dev_addr, entry->size, in check_sync()
1122 if (entry->direction == DMA_BIDIRECTIONAL) in check_sync()
1125 if (ref->direction != entry->direction) { in check_sync()
1126 err_printk(dev, entry, "device driver syncs " in check_sync()
1130 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1131 dir2name[entry->direction], in check_sync()
1135 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && in check_sync()
1137 err_printk(dev, entry, "device driver syncs " in check_sync()
1141 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1142 dir2name[entry->direction], in check_sync()
1145 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && in check_sync()
1147 err_printk(dev, entry, "device driver syncs " in check_sync()
1151 (unsigned long long)ref->dev_addr, entry->size, in check_sync()
1152 dir2name[entry->direction], in check_sync()
1156 ref->sg_call_ents != entry->sg_call_ents) { in check_sync()
1157 err_printk(ref->dev, entry, "device driver syncs " in check_sync()
1158 "DMA sg list with different entry count " in check_sync()
1160 entry->sg_call_ents, ref->sg_call_ents); in check_sync()
1213 struct dma_debug_entry *entry; in debug_dma_map_page() local
1221 entry = dma_entry_alloc(); in debug_dma_map_page()
1222 if (!entry) in debug_dma_map_page()
1225 entry->dev = dev; in debug_dma_map_page()
1226 entry->type = dma_debug_single; in debug_dma_map_page()
1227 entry->pfn = page_to_pfn(page); in debug_dma_map_page()
1228 entry->offset = offset; in debug_dma_map_page()
1229 entry->dev_addr = dma_addr; in debug_dma_map_page()
1230 entry->size = size; in debug_dma_map_page()
1231 entry->direction = direction; in debug_dma_map_page()
1232 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_page()
1242 add_dma_entry(entry, attrs); in debug_dma_map_page()
1248 struct dma_debug_entry *entry; in debug_dma_mapping_error() local
1259 list_for_each_entry(entry, &bucket->list, list) { in debug_dma_mapping_error()
1260 if (!exact_match(&ref, entry)) in debug_dma_mapping_error()
1269 * best-fit algorithm here which updates the first entry in debug_dma_mapping_error()
1273 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { in debug_dma_mapping_error()
1274 entry->map_err_type = MAP_ERR_CHECKED; in debug_dma_mapping_error()
1303 struct dma_debug_entry *entry; in debug_dma_map_sg() local
1317 entry = dma_entry_alloc(); in debug_dma_map_sg()
1318 if (!entry) in debug_dma_map_sg()
1321 entry->type = dma_debug_sg; in debug_dma_map_sg()
1322 entry->dev = dev; in debug_dma_map_sg()
1323 entry->pfn = page_to_pfn(sg_page(s)); in debug_dma_map_sg()
1324 entry->offset = s->offset; in debug_dma_map_sg()
1325 entry->size = sg_dma_len(s); in debug_dma_map_sg()
1326 entry->dev_addr = sg_dma_address(s); in debug_dma_map_sg()
1327 entry->direction = direction; in debug_dma_map_sg()
1328 entry->sg_call_ents = nents; in debug_dma_map_sg()
1329 entry->sg_mapped_ents = mapped_ents; in debug_dma_map_sg()
1333 add_dma_entry(entry, attrs); in debug_dma_map_sg()
1340 struct dma_debug_entry *entry; in get_nr_mapped_entries() local
1346 entry = bucket_find_exact(bucket, ref); in get_nr_mapped_entries()
1349 if (entry) in get_nr_mapped_entries()
1350 mapped_ents = entry->sg_mapped_ents; in get_nr_mapped_entries()
1392 struct dma_debug_entry *entry; in debug_dma_alloc_coherent() local
1404 entry = dma_entry_alloc(); in debug_dma_alloc_coherent()
1405 if (!entry) in debug_dma_alloc_coherent()
1408 entry->type = dma_debug_coherent; in debug_dma_alloc_coherent()
1409 entry->dev = dev; in debug_dma_alloc_coherent()
1410 entry->offset = offset_in_page(virt); in debug_dma_alloc_coherent()
1411 entry->size = size; in debug_dma_alloc_coherent()
1412 entry->dev_addr = dma_addr; in debug_dma_alloc_coherent()
1413 entry->direction = DMA_BIDIRECTIONAL; in debug_dma_alloc_coherent()
1416 entry->pfn = vmalloc_to_pfn(virt); in debug_dma_alloc_coherent()
1418 entry->pfn = page_to_pfn(virt_to_page(virt)); in debug_dma_alloc_coherent()
1420 add_dma_entry(entry, attrs); in debug_dma_alloc_coherent()
1454 struct dma_debug_entry *entry; in debug_dma_map_resource() local
1459 entry = dma_entry_alloc(); in debug_dma_map_resource()
1460 if (!entry) in debug_dma_map_resource()
1463 entry->type = dma_debug_resource; in debug_dma_map_resource()
1464 entry->dev = dev; in debug_dma_map_resource()
1465 entry->pfn = PHYS_PFN(addr); in debug_dma_map_resource()
1466 entry->offset = offset_in_page(addr); in debug_dma_map_resource()
1467 entry->size = size; in debug_dma_map_resource()
1468 entry->dev_addr = dma_addr; in debug_dma_map_resource()
1469 entry->direction = direction; in debug_dma_map_resource()
1470 entry->map_err_type = MAP_ERR_NOT_CHECKED; in debug_dma_map_resource()
1472 add_dma_entry(entry, attrs); in debug_dma_map_resource()