Lines Matching full:context

15  *   - The global context lock will not scale very well
18 * - Implement flush_tlb_mm() by making the context stale and picking
93 /* Steal a context from a task that has one at the moment.
98 * This isn't an LRU system, it just frees up each context in
103 * For context stealing, we use a slightly different approach for
124 if (mm->context.active) { in steal_context_smp()
132 /* Mark this mm has having no context anymore */ in steal_context_smp()
133 mm->context.id = MMU_NO_CONTEXT; in steal_context_smp()
177 /* Mark this mm as having no context anymore */ in steal_all_contexts()
178 mm->context.id = MMU_NO_CONTEXT; in steal_all_contexts()
183 mm->context.active = 0; in steal_all_contexts()
216 /* Flush the TLB for that context */ in steal_context_up()
219 /* Mark this mm has having no context anymore */ in steal_context_up()
220 mm->context.id = MMU_NO_CONTEXT; in steal_context_up()
241 pr_err("MMU: Context %d is %s and MM is %p !\n", in context_check_map()
244 nact += context_mm[id]->context.active; in context_check_map()
247 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", in context_check_map()
255 pr_err("MMU: Context 0 has been freed !!!\n"); in context_check_map()
273 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", in switch_mmu_context()
274 cpu, next, next->context.active, next->context.id); in switch_mmu_context()
278 next->context.active++; in switch_mmu_context()
280 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); in switch_mmu_context()
281 WARN_ON(prev->context.active < 1); in switch_mmu_context()
282 prev->context.active--; in switch_mmu_context()
288 /* If we already have a valid assigned context, skip all that */ in switch_mmu_context()
289 id = next->context.id; in switch_mmu_context()
299 /* We really don't have a context, let's try to acquire one */ in switch_mmu_context()
323 /* We know there's at least one free context, try to find it */ in switch_mmu_context()
332 next->context.id = id; in switch_mmu_context()
338 /* If that context got marked stale on this CPU, then flush the in switch_mmu_context()
365 * Set up the context for a new address space.
369 pr_hard("initing context for mm @%p\n", mm); in init_new_context()
373 * explicitly against context.id == 0. This ensures that we properly in init_new_context()
374 * initialize context slice details for newly allocated mm's (which will in init_new_context()
375 * have id == 0) and don't alter context slice inherited via fork (which in init_new_context()
378 if (mm->context.id == 0) in init_new_context()
380 mm->context.id = MMU_NO_CONTEXT; in init_new_context()
381 mm->context.active = 0; in init_new_context()
382 pte_frag_set(&mm->context, NULL); in init_new_context()
387 * We're finished using the context for an address space.
394 if (mm->context.id == MMU_NO_CONTEXT) in destroy_context()
397 WARN_ON(mm->context.active != 0); in destroy_context()
400 id = mm->context.id; in destroy_context()
403 mm->context.id = MMU_NO_CONTEXT; in destroy_context()
405 mm->context.active = 0; in destroy_context()
422 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); in mmu_ctx_cpu_prepare()
433 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); in mmu_ctx_cpu_dead()
446 * Initialize the context management stuff.
454 init_mm.context.active = NR_CPUS; in mmu_context_init()
457 * Allocate the maps used by context management in mmu_context_init()
480 "MMU: Allocated %zu bytes of context maps for %d contexts\n", in mmu_context_init()
486 * init_mm, and require using context 0 for a normal task. in mmu_context_init()
487 * Other processors reserve the use of context zero for the kernel. in mmu_context_init()