Lines Matching full:asid
62 * to what is traditionally called ASID on the RISC processors.
64 * We don't use the traditional ASID implementation, where each process/mm gets
65 * its own ASID and flush/restart when we run out of ASID space.
74 * ASID - [0, TLB_NR_DYN_ASIDS-1]
79 * ASID+1, because PCID 0 is special.
83 * PCID values, but we can still do with a single ASID denomination
111 * Given @asid, compute kPCID
113 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument
115 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid()
119 * Make sure that the dynamic ASID space does not conflict with the in kern_pcid()
125 * The ASID being passed in here should have respected the in kern_pcid()
128 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); in kern_pcid()
135 * If PCID is on, ASID-aware code paths put the ASID+1 into the in kern_pcid()
139 * the TLB for ASID 0 if the saved ASID was nonzero. It also means in kern_pcid()
143 return asid + 1; in kern_pcid()
147 * Given @asid, compute uPCID
149 static inline u16 user_pcid(u16 asid) in user_pcid() argument
151 u16 ret = kern_pcid(asid); in user_pcid()
158 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam) in build_cr3() argument
163 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in build_cr3()
164 cr3 |= kern_pcid(asid); in build_cr3()
166 VM_WARN_ON_ONCE(asid != 0); in build_cr3()
172 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid, in build_cr3_noflush() argument
181 return build_cr3(pgd, asid, lam) | CR3_NOFLUSH; in build_cr3_noflush()
192 u16 asid; in clear_asid_other() local
203 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in clear_asid_other()
204 /* Do not need to flush the current asid */ in clear_asid_other()
205 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) in clear_asid_other()
209 * this asid, we do a flush: in clear_asid_other()
211 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); in clear_asid_other()
222 u16 asid; in choose_new_asid() local
233 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { in choose_new_asid()
234 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
238 *new_asid = asid; in choose_new_asid()
239 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
245 * We don't currently own an ASID slot on this CPU. in choose_new_asid()
257 * Given an ASID, flush the corresponding user ASID. We can delay this
262 static inline void invalidate_user_asid(u16 asid) in invalidate_user_asid() argument
264 /* There is no user ASID if address space separation is off */ in invalidate_user_asid()
269 * We only have a single ASID if PCID is off and the CR3 in invalidate_user_asid()
278 __set_bit(kern_pcid(asid), in invalidate_user_asid()
540 * back into an incorrect ASID slot and leave it there in switch_mm_irqs_off()
647 /* The new ASID is already up to date. */ in switch_mm_irqs_off()
690 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
721 /* Disable LAM, force ASID 0 and force a TLB flush. */ in initialize_tlbstate_and_flush()