Lines Matching full:asid

64  * to what is traditionally called ASID on the RISC processors.
66 * We don't use the traditional ASID implementation, where each process/mm gets
67 * its own ASID and flush/restart when we run out of ASID space.
76 * ASID - [0, TLB_NR_DYN_ASIDS-1]
83 * ASID+1, because PCID 0 is special.
87 * PCID values, but we can still do with a single ASID denomination
112 * Given @asid, compute kPCID
114 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument
116 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid()
120 * Make sure that the dynamic ASID space does not conflict with the in kern_pcid()
126 * The ASID being passed in here should have respected the in kern_pcid()
129 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); in kern_pcid()
136 * If PCID is on, ASID-aware code paths put the ASID+1 into the in kern_pcid()
140 * the TLB for ASID 0 if the saved ASID was nonzero. It also means in kern_pcid()
144 return asid + 1; in kern_pcid()
148 * Given @asid, compute uPCID
150 static inline u16 user_pcid(u16 asid) in user_pcid() argument
152 u16 ret = kern_pcid(asid); in user_pcid()
159 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam) in build_cr3() argument
164 cr3 |= kern_pcid(asid); in build_cr3()
166 VM_WARN_ON_ONCE(asid != 0); in build_cr3()
172 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid, in build_cr3_noflush() argument
181 return build_cr3(pgd, asid, lam) | CR3_NOFLUSH; in build_cr3_noflush()
192 u16 asid; in clear_asid_other() local
203 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in clear_asid_other()
204 /* Do not need to flush the current asid */ in clear_asid_other()
205 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) in clear_asid_other()
209 * this asid, we do a flush: in clear_asid_other()
211 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); in clear_asid_other()
222 u16 asid; in choose_new_asid() local
247 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in choose_new_asid()
248 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
252 *new_asid = asid; in choose_new_asid()
253 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
259 * We don't currently own an ASID slot on this CPU. in choose_new_asid()
285 * When the search for a free ASID in the global ASID space reaches
289 * This way the global flush only needs to happen at ASID rollover
290 * time, and not at ASID allocation time.
306 /* Restart the search from the start of global ASID space. */ in reset_global_asid_space()
312 u16 asid; in allocate_global_asid() local
320 asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid); in allocate_global_asid()
322 if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) { in allocate_global_asid()
324 VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n", in allocate_global_asid()
329 /* Claim this global ASID. */ in allocate_global_asid()
330 __set_bit(asid, global_asid_used); in allocate_global_asid()
331 last_global_asid = asid; in allocate_global_asid()
333 return asid; in allocate_global_asid()
339 * a global ASID to this process, and use broadcast TLB invalidation.
366 * Assign a global ASID to the current process, protecting against
371 u16 asid; in use_global_asid() local
380 * The last global ASID was consumed while waiting for the lock. in use_global_asid()
382 * If this fires, a more aggressive ASID reuse scheme might be in use_global_asid()
390 asid = allocate_global_asid(); in use_global_asid()
391 if (!asid) in use_global_asid()
394 mm_assign_global_asid(mm, asid); in use_global_asid()
407 /* The global ASID can be re-used only after flush at wrap-around. */ in mm_free_global_asid()
417 * Is the mm transitioning from a CPU-local ASID to a global ASID?
419 static bool mm_needs_global_asid(struct mm_struct *mm, u16 asid) in mm_needs_global_asid() argument
426 /* Process is transitioning to a global ASID */ in mm_needs_global_asid()
427 if (global_asid && asid != global_asid) in mm_needs_global_asid()
435 * systems have over 8k CPUs. Because of this potential ASID shortage,
449 * Assign a global ASID if the process is active on in consider_global_asid()
469 * the target mm with an out of date ASID. in finish_asid_transition()
478 * If at least one CPU is not using the global ASID yet, in finish_asid_transition()
491 /* All the CPUs running this process are using the global ASID. */ in finish_asid_transition()
498 unsigned long asid = mm_global_asid(info->mm); in broadcast_tlb_flush() local
507 invlpgb_flush_single_pcid_nosync(kern_pcid(asid)); in broadcast_tlb_flush()
510 invlpgb_flush_single_pcid_nosync(user_pcid(asid)); in broadcast_tlb_flush()
519 invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd); in broadcast_tlb_flush()
521 invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd); in broadcast_tlb_flush()
533 * Given an ASID, flush the corresponding user ASID. We can delay this
538 static inline void invalidate_user_asid(u16 asid) in invalidate_user_asid() argument
540 /* There is no user ASID if address space separation is off */ in invalidate_user_asid()
545 * We only have a single ASID if PCID is off and the CR3 in invalidate_user_asid()
554 __set_bit(kern_pcid(asid), in invalidate_user_asid()
812 * back into an incorrect ASID slot and leave it there in switch_mm_irqs_off()
854 /* Check if the current mm is transitioning to a global ASID */ in switch_mm_irqs_off()
862 * Broadcast TLB invalidation keeps this ASID up to date in switch_mm_irqs_off()
935 /* The new ASID is already up to date. */ in switch_mm_irqs_off()
979 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
1011 /* Disable LAM, force ASID 0 and force a TLB flush. */ in initialize_tlbstate_and_flush()
1070 /* Reload the ASID if transitioning into or out of a global ASID */ in flush_tlb_func()