Lines Matching full:vmid
3 * VMID allocator.
32 #define vmid2idx(vmid) ((vmid) & ~VMID_MASK) argument
36 * As vmid #0 is always reserved, we will never allocate one
42 #define vmid_gen_match(vmid) \ argument
43 (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
48 u64 vmid; in flush_context() local
53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); in flush_context()
55 /* Preserve reserved VMID */ in flush_context()
56 if (vmid == 0) in flush_context()
57 vmid = per_cpu(reserved_vmids, cpu); in flush_context()
58 __set_bit(vmid2idx(vmid), vmid_map); in flush_context()
59 per_cpu(reserved_vmids, cpu) = vmid; in flush_context()
72 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid) in check_update_reserved_vmid() argument
79 * and update to use newvmid (i.e. the same VMID in the current in check_update_reserved_vmid()
83 if (per_cpu(reserved_vmids, cpu) == vmid) { in check_update_reserved_vmid()
95 u64 vmid = atomic64_read(&kvm_vmid->id); in new_vmid() local
98 if (vmid != 0) { in new_vmid()
99 u64 newvmid = generation | (vmid & ~VMID_MASK); in new_vmid()
101 if (check_update_reserved_vmid(vmid, newvmid)) { in new_vmid()
106 if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) { in new_vmid()
112 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx); in new_vmid()
113 if (vmid != NUM_USER_VMIDS) in new_vmid()
122 vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1); in new_vmid()
125 __set_bit(vmid, vmid_map); in new_vmid()
126 cur_idx = vmid; in new_vmid()
127 vmid = idx2vmid(vmid) | generation; in new_vmid()
128 atomic64_set(&kvm_vmid->id, vmid); in new_vmid()
129 return vmid; in new_vmid()
141 u64 vmid, old_active_vmid; in kvm_arm_vmid_update() local
144 vmid = atomic64_read(&kvm_vmid->id); in kvm_arm_vmid_update()
152 * reserving the VMID space needlessly on rollover. in kvm_arm_vmid_update()
157 if (old_active_vmid != 0 && vmid_gen_match(vmid) && in kvm_arm_vmid_update()
159 old_active_vmid, vmid)) in kvm_arm_vmid_update()
164 /* Check that our VMID belongs to the current generation. */ in kvm_arm_vmid_update()
165 vmid = atomic64_read(&kvm_vmid->id); in kvm_arm_vmid_update()
166 if (!vmid_gen_match(vmid)) { in kvm_arm_vmid_update()
167 vmid = new_vmid(kvm_vmid); in kvm_arm_vmid_update()
171 atomic64_set(this_cpu_ptr(&active_vmids), vmid); in kvm_arm_vmid_update()
178 * Initialize the VMID allocator
186 * at least one more VMID than CPUs. VMID #0 is always reserved. in kvm_arm_vmid_alloc_init()