Lines Matching refs:mm_tlb_gen
1108 * - mm_tlb_gen: the latest generation.
1120 u64 mm_tlb_gen;
1173 * While the core might be still behind mm_tlb_gen, checking
1174 * mm_tlb_gen unnecessarily would have negative caching effects
1181 * Defer mm_tlb_gen reading as long as possible to avoid cache
1184 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
1186 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
1196 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
1197 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
1223 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
1228 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization.
1233 * local_tlb_gen all the way to mm_tlb_gen and we can probably
1238 f->new_tlb_gen == mm_tlb_gen) {
1265 /* Both paths above update our state to mm_tlb_gen. */
1266 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);