Lines Matching full:mc
82 static inline void mapcache_lock(MapCache *mc) in mapcache_lock() argument
84 qemu_mutex_lock(&mc->lock); in mapcache_lock()
87 static inline void mapcache_unlock(MapCache *mc) in mapcache_unlock() argument
89 qemu_mutex_unlock(&mc->lock); in mapcache_unlock()
107 MapCache *mc; in xen_map_cache_init_single() local
111 mc = g_new0(MapCache, 1); in xen_map_cache_init_single()
113 mc->phys_offset_to_gaddr = f; in xen_map_cache_init_single()
114 mc->opaque = opaque; in xen_map_cache_init_single()
115 qemu_mutex_init(&mc->lock); in xen_map_cache_init_single()
117 QTAILQ_INIT(&mc->locked_entries); in xen_map_cache_init_single()
119 mc->bucket_shift = bucket_shift; in xen_map_cache_init_single()
120 mc->bucket_size = 1UL << bucket_shift; in xen_map_cache_init_single()
121 mc->max_mcache_size = max_size; in xen_map_cache_init_single()
123 mc->nr_buckets = in xen_map_cache_init_single()
124 (((mc->max_mcache_size >> XC_PAGE_SHIFT) + in xen_map_cache_init_single()
128 size = mc->nr_buckets * sizeof(MapCacheEntry); in xen_map_cache_init_single()
130 trace_xen_map_cache_init(mc->nr_buckets, size); in xen_map_cache_init_single()
131 mc->entry = g_malloc0(size); in xen_map_cache_init_single()
132 return mc; in xen_map_cache_init_single()
190 static void xen_remap_bucket(MapCache *mc, in xen_remap_bucket() argument
250 pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i; in xen_remap_bucket()
327 static uint8_t *xen_map_cache_unlocked(MapCache *mc, in xen_map_cache_unlocked() argument
343 address_index = phys_addr >> mc->bucket_shift; in xen_map_cache_unlocked()
344 address_offset = phys_addr & (mc->bucket_size - 1); in xen_map_cache_unlocked()
359 if (mc->last_entry != NULL && in xen_map_cache_unlocked()
360 mc->last_entry->paddr_index == address_index && in xen_map_cache_unlocked()
364 mc->last_entry->valid_mapping)) { in xen_map_cache_unlocked()
366 mc->last_entry->vaddr_base + address_offset in xen_map_cache_unlocked()
368 return mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
371 /* size is always a multiple of mc->bucket_size */ in xen_map_cache_unlocked()
374 if (cache_size % mc->bucket_size) { in xen_map_cache_unlocked()
375 cache_size += mc->bucket_size - (cache_size % mc->bucket_size); in xen_map_cache_unlocked()
378 cache_size = mc->bucket_size; in xen_map_cache_unlocked()
381 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_map_cache_unlocked()
402 xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy, in xen_map_cache_unlocked()
410 xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy, in xen_map_cache_unlocked()
418 mc->last_entry = NULL; in xen_map_cache_unlocked()
420 if (!translated && mc->phys_offset_to_gaddr) { in xen_map_cache_unlocked()
421 phys_addr = mc->phys_offset_to_gaddr(phys_addr, size); in xen_map_cache_unlocked()
434 mc->last_entry = entry; in xen_map_cache_unlocked()
444 reventry->vaddr_req = mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
445 reventry->paddr_index = mc->last_entry->paddr_index; in xen_map_cache_unlocked()
447 QTAILQ_INSERT_HEAD(&mc->locked_entries, reventry, next); in xen_map_cache_unlocked()
451 mc->last_entry->vaddr_base + address_offset in xen_map_cache_unlocked()
453 return mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
463 MapCache *mc = mapcache; in xen_map_cache() local
467 mc = is_write ? mapcache_grants_rw : mapcache_grants_ro; in xen_map_cache()
482 mapcache_lock(mc); in xen_map_cache()
483 p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset, in xen_map_cache()
485 mapcache_unlock(mc); in xen_map_cache()
489 static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr) in xen_ram_addr_from_mapcache_single() argument
498 mapcache_lock(mc); in xen_ram_addr_from_mapcache_single()
499 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_ram_addr_from_mapcache_single()
509 mapcache_unlock(mc); in xen_ram_addr_from_mapcache_single()
513 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_ram_addr_from_mapcache_single()
521 raddr = (reventry->paddr_index << mc->bucket_shift) + in xen_ram_addr_from_mapcache_single()
524 mapcache_unlock(mc); in xen_ram_addr_from_mapcache_single()
543 static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc, in xen_invalidate_map_cache_entry_unlocked() argument
553 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_entry_unlocked()
563 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_entry_unlocked()
571 QTAILQ_REMOVE(&mc->locked_entries, reventry, next); in xen_invalidate_map_cache_entry_unlocked()
574 if (mc->last_entry != NULL && in xen_invalidate_map_cache_entry_unlocked()
575 mc->last_entry->paddr_index == paddr_index) { in xen_invalidate_map_cache_entry_unlocked()
576 mc->last_entry = NULL; in xen_invalidate_map_cache_entry_unlocked()
579 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_invalidate_map_cache_entry_unlocked()
596 entry->size >> mc->bucket_shift); in xen_invalidate_map_cache_entry_unlocked()
630 static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer) in xen_invalidate_map_cache_entry_single() argument
632 mapcache_lock(mc); in xen_invalidate_map_cache_entry_single()
633 xen_invalidate_map_cache_entry_unlocked(mc, buffer); in xen_invalidate_map_cache_entry_single()
634 mapcache_unlock(mc); in xen_invalidate_map_cache_entry_single()
667 static void xen_invalidate_map_cache_single(MapCache *mc) in xen_invalidate_map_cache_single() argument
672 mapcache_lock(mc); in xen_invalidate_map_cache_single()
674 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_single()
682 for (i = 0; i < mc->nr_buckets; i++) { in xen_invalidate_map_cache_single()
683 MapCacheEntry *entry = &mc->entry[i]; in xen_invalidate_map_cache_single()
704 mc->last_entry = NULL; in xen_invalidate_map_cache_single()
706 mapcache_unlock(mc); in xen_invalidate_map_cache_single()
717 static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc, in xen_replace_cache_entry_unlocked() argument
726 address_index = old_phys_addr >> mc->bucket_shift; in xen_replace_cache_entry_unlocked()
727 address_offset = old_phys_addr & (mc->bucket_size - 1); in xen_replace_cache_entry_unlocked()
736 if (cache_size % mc->bucket_size) { in xen_replace_cache_entry_unlocked()
737 cache_size += mc->bucket_size - (cache_size % mc->bucket_size); in xen_replace_cache_entry_unlocked()
740 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_replace_cache_entry_unlocked()
752 address_index = new_phys_addr >> mc->bucket_shift; in xen_replace_cache_entry_unlocked()
753 address_offset = new_phys_addr & (mc->bucket_size - 1); in xen_replace_cache_entry_unlocked()
757 xen_remap_bucket(mc, entry, entry->vaddr_base, in xen_replace_cache_entry_unlocked()