Lines Matching full:fs
178 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
180 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
248 vm_fault_unlock_map(struct faultstate *fs) in vm_fault_unlock_map() argument
251 if (fs->lookup_still_valid) { in vm_fault_unlock_map()
252 vm_map_lookup_done(fs->map, fs->entry); in vm_fault_unlock_map()
253 fs->lookup_still_valid = false; in vm_fault_unlock_map()
258 vm_fault_unlock_vp(struct faultstate *fs) in vm_fault_unlock_vp() argument
261 if (fs->vp != NULL) { in vm_fault_unlock_vp()
262 vput(fs->vp); in vm_fault_unlock_vp()
263 fs->vp = NULL; in vm_fault_unlock_vp()
268 vm_fault_might_be_cow(struct faultstate *fs) in vm_fault_might_be_cow() argument
270 return (fs->object != fs->first_object); in vm_fault_might_be_cow()
274 vm_fault_deallocate(struct faultstate *fs) in vm_fault_deallocate() argument
277 fs->m_needs_zeroing = true; in vm_fault_deallocate()
278 vm_fault_page_release(&fs->m_cow); in vm_fault_deallocate()
279 vm_fault_page_release(&fs->m); in vm_fault_deallocate()
280 vm_object_pip_wakeup(fs->object); in vm_fault_deallocate()
281 if (vm_fault_might_be_cow(fs)) { in vm_fault_deallocate()
282 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_deallocate()
283 vm_fault_page_free(&fs->first_m); in vm_fault_deallocate()
284 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_deallocate()
285 vm_object_pip_wakeup(fs->first_object); in vm_fault_deallocate()
287 vm_object_deallocate(fs->first_object); in vm_fault_deallocate()
288 vm_fault_unlock_map(fs); in vm_fault_deallocate()
289 vm_fault_unlock_vp(fs); in vm_fault_deallocate()
293 vm_fault_unlock_and_deallocate(struct faultstate *fs) in vm_fault_unlock_and_deallocate() argument
296 VM_OBJECT_UNLOCK(fs->object); in vm_fault_unlock_and_deallocate()
297 vm_fault_deallocate(fs); in vm_fault_unlock_and_deallocate()
301 vm_fault_dirty(struct faultstate *fs, vm_page_t m) in vm_fault_dirty() argument
305 if (((fs->prot & VM_PROT_WRITE) == 0 && in vm_fault_dirty()
306 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || in vm_fault_dirty()
312 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && in vm_fault_dirty()
313 (fs->fault_flags & VM_FAULT_WIRE) == 0) || in vm_fault_dirty()
314 (fs->fault_flags & VM_FAULT_DIRTY) != 0; in vm_fault_dirty()
335 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) in vm_fault_dirty()
344 vm_fault_is_read(const struct faultstate *fs) in vm_fault_is_read() argument
346 return ((fs->prot & VM_PROT_WRITE) == 0 && in vm_fault_is_read()
347 (fs->fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) == 0); in vm_fault_is_read()
351 * Unlocks fs.first_object and fs.map on success.
354 vm_fault_soft_fast(struct faultstate *fs) in vm_fault_soft_fast() argument
364 MPASS(fs->vp == NULL); in vm_fault_soft_fast()
373 m = vm_page_lookup_unlocked(fs->first_object, fs->first_pindex); in vm_fault_soft_fast()
375 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) { in vm_fault_soft_fast()
376 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_soft_fast()
380 vaddr = fs->vaddr; in vm_fault_soft_fast()
382 VM_OBJECT_RLOCK(fs->first_object); in vm_fault_soft_fast()
389 if (m->object != fs->first_object || m->pindex != fs->first_pindex) in vm_fault_soft_fast()
392 vm_object_busy(fs->first_object); in vm_fault_soft_fast()
395 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) in vm_fault_soft_fast()
407 if ((fs->prot & VM_PROT_WRITE) != 0) { in vm_fault_soft_fast()
415 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) in vm_fault_soft_fast()
418 while (rounddown2(vaddr, pagesizes[psind]) < fs->entry->start || in vm_fault_soft_fast()
419 roundup2(vaddr + 1, pagesizes[psind]) > fs->entry->end || in vm_fault_soft_fast()
423 !pmap_ps_enabled(fs->map->pmap)) { in vm_fault_soft_fast()
438 fs->fault_type |= VM_PROT_WRITE; in vm_fault_soft_fast()
442 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | in vm_fault_soft_fast()
443 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != in vm_fault_soft_fast()
446 if (fs->m_hold != NULL) { in vm_fault_soft_fast()
447 (*fs->m_hold) = m; in vm_fault_soft_fast()
450 if (psind == 0 && !fs->wired) in vm_fault_soft_fast()
451 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); in vm_fault_soft_fast()
452 VM_OBJECT_RUNLOCK(fs->first_object); in vm_fault_soft_fast()
453 vm_fault_dirty(fs, m); in vm_fault_soft_fast()
454 vm_object_unbusy(fs->first_object); in vm_fault_soft_fast()
455 vm_map_lookup_done(fs->map, fs->entry); in vm_fault_soft_fast()
459 vm_object_unbusy(fs->first_object); in vm_fault_soft_fast()
461 if (!VM_OBJECT_TRYUPGRADE(fs->first_object)) { in vm_fault_soft_fast()
462 VM_OBJECT_RUNLOCK(fs->first_object); in vm_fault_soft_fast()
463 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_soft_fast()
469 vm_fault_restore_map_lock(struct faultstate *fs) in vm_fault_restore_map_lock() argument
472 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); in vm_fault_restore_map_lock()
473 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); in vm_fault_restore_map_lock()
475 if (!vm_map_trylock_read(fs->map)) { in vm_fault_restore_map_lock()
476 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_restore_map_lock()
477 vm_map_lock_read(fs->map); in vm_fault_restore_map_lock()
478 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_restore_map_lock()
480 fs->lookup_still_valid = true; in vm_fault_restore_map_lock()
519 vm_fault_populate(struct faultstate *fs) in vm_fault_populate() argument
527 MPASS(fs->object == fs->first_object); in vm_fault_populate()
528 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); in vm_fault_populate()
529 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); in vm_fault_populate()
530 MPASS(fs->first_object->backing_object == NULL); in vm_fault_populate()
531 MPASS(fs->lookup_still_valid); in vm_fault_populate()
533 pager_first = OFF_TO_IDX(fs->entry->offset); in vm_fault_populate()
534 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; in vm_fault_populate()
535 vm_fault_unlock_map(fs); in vm_fault_populate()
536 vm_fault_unlock_vp(fs); in vm_fault_populate()
548 rv = vm_pager_populate(fs->first_object, fs->first_pindex, in vm_fault_populate()
549 fs->fault_type, fs->entry->max_protection, &pager_first, in vm_fault_populate()
552 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); in vm_fault_populate()
558 vm_fault_restore_map_lock(fs); in vm_fault_populate()
559 if (fs->map->timestamp != fs->map_generation) in vm_fault_populate()
568 MPASS(fs->first_pindex <= pager_last); in vm_fault_populate()
569 MPASS(fs->first_pindex >= pager_first); in vm_fault_populate()
570 MPASS(pager_last < fs->first_object->size); in vm_fault_populate()
572 vm_fault_restore_map_lock(fs); in vm_fault_populate()
573 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(fs->entry); in vm_fault_populate()
574 if (fs->map->timestamp != fs->map_generation) { in vm_fault_populate()
576 vm_fault_populate_cleanup(fs->first_object, pager_first, in vm_fault_populate()
579 m = vm_page_lookup(fs->first_object, pager_first); in vm_fault_populate()
580 if (m != fs->m) in vm_fault_populate()
595 m = vm_page_lookup(fs->first_object, pager_first); in vm_fault_populate()
597 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_populate()
598 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - in vm_fault_populate()
599 fs->entry->offset; in vm_fault_populate()
603 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, in vm_fault_populate()
604 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); in vm_fault_populate()
608 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, in vm_fault_populate()
609 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | in vm_fault_populate()
611 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_populate()
617 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { in vm_fault_populate()
621 if (fs->m_hold != NULL) { in vm_fault_populate()
622 *fs->m_hold = m + (fs->first_pindex - pager_first); in vm_fault_populate()
623 vm_page_wire(*fs->m_hold); in vm_fault_populate()
635 map_first = OFF_TO_IDX(fs->entry->offset); in vm_fault_populate()
637 vm_fault_populate_cleanup(fs->first_object, pager_first, in vm_fault_populate()
641 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; in vm_fault_populate()
643 vm_fault_populate_cleanup(fs->first_object, map_last + 1, in vm_fault_populate()
650 m = vm_page_lookup(fs->first_object, pidx); in vm_fault_populate()
651 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; in vm_fault_populate()
657 !pmap_ps_enabled(fs->map->pmap))) in vm_fault_populate()
660 writeable = (fs->prot & VM_PROT_WRITE) != 0; in vm_fault_populate()
664 vm_fault_dirty(fs, &m[i]); in vm_fault_populate()
677 fs->fault_type |= VM_PROT_WRITE; in vm_fault_populate()
678 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_populate()
679 rv = pmap_enter(fs->map->pmap, vaddr, m, in vm_fault_populate()
680 fs->prot & ~(writeable ? 0 : VM_PROT_WRITE), in vm_fault_populate()
681 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); in vm_fault_populate()
694 MPASS(!fs->wired); in vm_fault_populate()
696 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), in vm_fault_populate()
697 &m[i], fs->prot, fs->fault_type, 0); in vm_fault_populate()
702 VM_OBJECT_WLOCK(fs->first_object); in vm_fault_populate()
704 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && in vm_fault_populate()
705 m[i].pindex == fs->first_pindex) in vm_fault_populate()
709 if (fs->m_hold != NULL && in vm_fault_populate()
710 m[i].pindex == fs->first_pindex) { in vm_fault_populate()
711 (*fs->m_hold) = &m[i]; in vm_fault_populate()
820 vm_fault_object_ensure_wlocked(struct faultstate *fs) in vm_fault_object_ensure_wlocked() argument
822 if (fs->object == fs->first_object) in vm_fault_object_ensure_wlocked()
823 VM_OBJECT_ASSERT_WLOCKED(fs->object); in vm_fault_object_ensure_wlocked()
825 if (!fs->can_read_lock) { in vm_fault_object_ensure_wlocked()
826 VM_OBJECT_ASSERT_WLOCKED(fs->object); in vm_fault_object_ensure_wlocked()
830 if (VM_OBJECT_WOWNED(fs->object)) in vm_fault_object_ensure_wlocked()
833 if (VM_OBJECT_TRYUPGRADE(fs->object)) in vm_fault_object_ensure_wlocked()
840 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) in vm_fault_lock_vnode() argument
845 if (fs->object->type != OBJT_VNODE) in vm_fault_lock_vnode()
847 vp = fs->object->handle; in vm_fault_lock_vnode()
848 if (vp == fs->vp) { in vm_fault_lock_vnode()
857 vm_fault_unlock_vp(fs); in vm_fault_lock_vnode()
871 fs->vp = vp; in vm_fault_lock_vnode()
877 vm_fault_unlock_and_deallocate(fs); in vm_fault_lock_vnode()
879 vm_fault_deallocate(fs); in vm_fault_lock_vnode()
882 fs->vp = vp; in vm_fault_lock_vnode()
893 vm_fault_readahead(struct faultstate *fs) in vm_fault_readahead() argument
898 KASSERT(fs->lookup_still_valid, ("map unlocked")); in vm_fault_readahead()
899 era = fs->entry->read_ahead; in vm_fault_readahead()
900 behavior = vm_map_entry_behavior(fs->entry); in vm_fault_readahead()
905 if (fs->vaddr == fs->entry->next_read) in vm_fault_readahead()
906 vm_fault_dontneed(fs, fs->vaddr, nera); in vm_fault_readahead()
907 } else if (fs->vaddr == fs->entry->next_read) { in vm_fault_readahead()
922 vm_fault_dontneed(fs, fs->vaddr, nera); in vm_fault_readahead()
934 fs->entry->read_ahead = nera; in vm_fault_readahead()
941 vm_fault_lookup(struct faultstate *fs) in vm_fault_lookup() argument
945 KASSERT(!fs->lookup_still_valid, in vm_fault_lookup()
947 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | in vm_fault_lookup()
948 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, in vm_fault_lookup()
949 &fs->first_pindex, &fs->prot, &fs->wired); in vm_fault_lookup()
951 vm_fault_unlock_vp(fs); in vm_fault_lookup()
955 fs->map_generation = fs->map->timestamp; in vm_fault_lookup()
957 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { in vm_fault_lookup()
959 __func__, (u_long)fs->vaddr); in vm_fault_lookup()
962 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && in vm_fault_lookup()
963 fs->entry->wiring_thread != curthread) { in vm_fault_lookup()
964 vm_map_unlock_read(fs->map); in vm_fault_lookup()
965 vm_map_lock(fs->map); in vm_fault_lookup()
966 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && in vm_fault_lookup()
967 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { in vm_fault_lookup()
968 vm_fault_unlock_vp(fs); in vm_fault_lookup()
969 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_fault_lookup()
970 vm_map_unlock_and_wait(fs->map, 0); in vm_fault_lookup()
972 vm_map_unlock(fs->map); in vm_fault_lookup()
976 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); in vm_fault_lookup()
978 if (fs->wired) in vm_fault_lookup()
979 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); in vm_fault_lookup()
981 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, in vm_fault_lookup()
982 ("!fs->wired && VM_FAULT_WIRE")); in vm_fault_lookup()
983 fs->lookup_still_valid = true; in vm_fault_lookup()
989 vm_fault_relookup(struct faultstate *fs) in vm_fault_relookup() argument
996 if (!vm_map_trylock_read(fs->map)) in vm_fault_relookup()
999 fs->lookup_still_valid = true; in vm_fault_relookup()
1000 if (fs->map->timestamp == fs->map_generation) in vm_fault_relookup()
1003 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, in vm_fault_relookup()
1004 &fs->entry, &retry_object, &retry_pindex, &retry_prot, in vm_fault_relookup()
1005 &fs->wired); in vm_fault_relookup()
1015 if (retry_object != fs->first_object || in vm_fault_relookup()
1016 retry_pindex != fs->first_pindex) in vm_fault_relookup()
1027 fs->prot &= retry_prot; in vm_fault_relookup()
1028 fs->fault_type &= retry_prot; in vm_fault_relookup()
1029 if (fs->prot == 0) in vm_fault_relookup()
1033 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, in vm_fault_relookup()
1040 vm_fault_can_cow_rename(struct faultstate *fs) in vm_fault_can_cow_rename() argument
1044 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && in vm_fault_can_cow_rename()
1046 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0); in vm_fault_can_cow_rename()
1050 vm_fault_cow(struct faultstate *fs) in vm_fault_cow() argument
1054 KASSERT(vm_fault_might_be_cow(fs), in vm_fault_cow()
1068 if (vm_fault_can_cow_rename(fs) && vm_page_xbusied(fs->m)) { in vm_fault_cow()
1075 is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object); in vm_fault_cow()
1077 fs->object == fs->first_object->backing_object) { in vm_fault_cow()
1078 if (VM_OBJECT_TRYWLOCK(fs->object)) { in vm_fault_cow()
1079 rename_cow = vm_fault_can_cow_rename(fs); in vm_fault_cow()
1081 VM_OBJECT_WUNLOCK(fs->object); in vm_fault_cow()
1087 vm_page_assert_xbusied(fs->m); in vm_fault_cow()
1090 * Remove but keep xbusy for replace. fs->m is moved into in vm_fault_cow()
1091 * fs->first_object and left busy while fs->first_m is in vm_fault_cow()
1094 vm_page_remove_xbusy(fs->m); in vm_fault_cow()
1095 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, in vm_fault_cow()
1096 fs->first_m); in vm_fault_cow()
1097 vm_page_dirty(fs->m); in vm_fault_cow()
1102 vm_reserv_rename(fs->m, fs->first_object, fs->object, in vm_fault_cow()
1103 OFF_TO_IDX(fs->first_object->backing_object_offset)); in vm_fault_cow()
1105 VM_OBJECT_WUNLOCK(fs->object); in vm_fault_cow()
1106 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_cow()
1107 fs->first_m = fs->m; in vm_fault_cow()
1108 fs->m = NULL; in vm_fault_cow()
1112 VM_OBJECT_WUNLOCK(fs->first_object); in vm_fault_cow()
1116 pmap_copy_page(fs->m, fs->first_m); in vm_fault_cow()
1117 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { in vm_fault_cow()
1118 vm_page_wire(fs->first_m); in vm_fault_cow()
1119 vm_page_unwire(fs->m, PQ_INACTIVE); in vm_fault_cow()
1126 fs->m_cow = fs->m; in vm_fault_cow()
1127 fs->m = NULL; in vm_fault_cow()
1146 * In the fs->m shared busy case, the xbusy state of in vm_fault_cow()
1147 * fs->first_m prevents new mappings of fs->m from in vm_fault_cow()
1149 * shadow chain should wait for xbusy on fs->first_m. in vm_fault_cow()
1151 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) in vm_fault_cow()
1152 pmap_remove_all(fs->m_cow); in vm_fault_cow()
1155 vm_object_pip_wakeup(fs->object); in vm_fault_cow()
1160 fs->object = fs->first_object; in vm_fault_cow()
1161 fs->pindex = fs->first_pindex; in vm_fault_cow()
1162 fs->m = fs->first_m; in vm_fault_cow()
1168 vm_fault_next(struct faultstate *fs) in vm_fault_next() argument
1172 if (fs->object == fs->first_object || !fs->can_read_lock) in vm_fault_next()
1173 VM_OBJECT_ASSERT_WLOCKED(fs->object); in vm_fault_next()
1175 VM_OBJECT_ASSERT_LOCKED(fs->object); in vm_fault_next()
1187 if (fs->object == fs->first_object) { in vm_fault_next()
1188 fs->first_m = fs->m; in vm_fault_next()
1189 fs->m = NULL; in vm_fault_next()
1190 } else if (fs->m != NULL) { in vm_fault_next()
1191 if (!vm_fault_object_ensure_wlocked(fs)) { in vm_fault_next()
1192 fs->can_read_lock = false; in vm_fault_next()
1193 vm_fault_unlock_and_deallocate(fs); in vm_fault_next()
1196 vm_fault_page_free(&fs->m); in vm_fault_next()
1203 next_object = fs->object->backing_object; in vm_fault_next()
1206 MPASS(fs->first_m != NULL); in vm_fault_next()
1207 KASSERT(fs->object != next_object, ("object loop %p", next_object)); in vm_fault_next()
1208 if (fs->can_read_lock) in vm_fault_next()
1213 if (fs->object != fs->first_object) in vm_fault_next()
1214 vm_object_pip_wakeup(fs->object); in vm_fault_next()
1215 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); in vm_fault_next()
1216 VM_OBJECT_UNLOCK(fs->object); in vm_fault_next()
1217 fs->object = next_object; in vm_fault_next()
1223 vm_fault_zerofill(struct faultstate *fs) in vm_fault_zerofill() argument
1230 if (vm_fault_might_be_cow(fs)) { in vm_fault_zerofill()
1231 vm_object_pip_wakeup(fs->object); in vm_fault_zerofill()
1232 fs->object = fs->first_object; in vm_fault_zerofill()
1233 fs->pindex = fs->first_pindex; in vm_fault_zerofill()
1235 MPASS(fs->first_m != NULL); in vm_fault_zerofill()
1236 MPASS(fs->m == NULL); in vm_fault_zerofill()
1237 fs->m = fs->first_m; in vm_fault_zerofill()
1238 fs->first_m = NULL; in vm_fault_zerofill()
1243 if (fs->m_needs_zeroing) { in vm_fault_zerofill()
1244 pmap_zero_page(fs->m); in vm_fault_zerofill()
1253 sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE); in vm_fault_zerofill()
1258 fs->m, i, (uintmax_t)*p)); in vm_fault_zerofill()
1267 vm_page_valid(fs->m); in vm_fault_zerofill()
1275 vm_fault_allocate_oom(struct faultstate *fs) in vm_fault_allocate_oom() argument
1279 vm_fault_unlock_and_deallocate(fs); in vm_fault_allocate_oom()
1282 if (!fs->oom_started) { in vm_fault_allocate_oom()
1283 fs->oom_started = true; in vm_fault_allocate_oom()
1284 getmicrotime(&fs->oom_start_time); in vm_fault_allocate_oom()
1289 timevalsub(&now, &fs->oom_start_time); in vm_fault_allocate_oom()
1298 fs->oom_started = false; in vm_fault_allocate_oom()
1306 vm_fault_allocate(struct faultstate *fs, struct pctrie_iter *pages) in vm_fault_allocate() argument
1311 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { in vm_fault_allocate()
1312 res = vm_fault_lock_vnode(fs, true); in vm_fault_allocate()
1318 if (fs->pindex >= fs->object->size) { in vm_fault_allocate()
1319 vm_fault_unlock_and_deallocate(fs); in vm_fault_allocate()
1323 if (fs->object == fs->first_object && in vm_fault_allocate()
1324 (fs->first_object->flags & OBJ_POPULATE) != 0 && in vm_fault_allocate()
1325 fs->first_object->shadow_count == 0) { in vm_fault_allocate()
1326 res = vm_fault_populate(fs); in vm_fault_allocate()
1331 vm_fault_unlock_and_deallocate(fs); in vm_fault_allocate()
1357 dset = fs->object->domain.dr_policy; in vm_fault_allocate()
1362 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); in vm_fault_allocate()
1364 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) { in vm_fault_allocate()
1365 vm_fault_unlock_and_deallocate(fs); in vm_fault_allocate()
1368 fs->m = vm_page_alloc_iter(fs->object, fs->pindex, in vm_fault_allocate()
1371 if (fs->m == NULL) { in vm_fault_allocate()
1372 if (vm_fault_allocate_oom(fs)) in vm_fault_allocate()
1376 fs->m_needs_zeroing = (fs->m->flags & PG_ZERO) == 0; in vm_fault_allocate()
1377 fs->oom_started = false; in vm_fault_allocate()
1388 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) in vm_fault_getpages() argument
1403 e_start = fs->entry->start; in vm_fault_getpages()
1404 e_end = fs->entry->end; in vm_fault_getpages()
1405 behavior = vm_map_entry_behavior(fs->entry); in vm_fault_getpages()
1418 if (fs->nera == -1 && !P_KILLED(curproc)) in vm_fault_getpages()
1419 fs->nera = vm_fault_readahead(fs); in vm_fault_getpages()
1427 vm_fault_unlock_map(fs); in vm_fault_getpages()
1429 status = vm_fault_lock_vnode(fs, false); in vm_fault_getpages()
1433 KASSERT(fs->vp == NULL || !vm_map_is_system(fs->map), in vm_fault_getpages()
1440 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || in vm_fault_getpages()
1446 if (fs->nera > 0) { in vm_fault_getpages()
1448 ahead = fs->nera; in vm_fault_getpages()
1460 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; in vm_fault_getpages()
1462 atop(fs->vaddr - e_start)); in vm_fault_getpages()
1465 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); in vm_fault_getpages()
1469 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); in vm_fault_getpages()
1481 VM_OBJECT_WLOCK(fs->object); in vm_fault_getpages()
1482 vm_fault_page_free(&fs->m); in vm_fault_getpages()
1483 vm_fault_unlock_and_deallocate(fs); in vm_fault_getpages()
1504 vm_fault_busy_sleep(struct faultstate *fs, int allocflags) in vm_fault_busy_sleep() argument
1511 vm_page_aflag_set(fs->m, PGA_REFERENCED); in vm_fault_busy_sleep()
1512 if (vm_fault_might_be_cow(fs)) { in vm_fault_busy_sleep()
1513 vm_fault_page_release(&fs->first_m); in vm_fault_busy_sleep()
1514 vm_object_pip_wakeup(fs->first_object); in vm_fault_busy_sleep()
1516 vm_object_pip_wakeup(fs->object); in vm_fault_busy_sleep()
1517 vm_fault_unlock_map(fs); in vm_fault_busy_sleep()
1518 if (!vm_page_busy_sleep(fs->m, "vmpfw", allocflags)) in vm_fault_busy_sleep()
1519 VM_OBJECT_UNLOCK(fs->object); in vm_fault_busy_sleep()
1521 vm_object_deallocate(fs->first_object); in vm_fault_busy_sleep()
1533 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) in vm_fault_object() argument
1539 if (fs->object == fs->first_object || !fs->can_read_lock) in vm_fault_object()
1540 VM_OBJECT_ASSERT_WLOCKED(fs->object); in vm_fault_object()
1542 VM_OBJECT_ASSERT_LOCKED(fs->object); in vm_fault_object()
1549 if ((fs->object->flags & OBJ_DEAD) != 0) { in vm_fault_object()
1550 dead = fs->object->type == OBJT_DEAD; in vm_fault_object()
1551 vm_fault_unlock_and_deallocate(fs); in vm_fault_object()
1561 vm_page_iter_init(&pages, fs->object); in vm_fault_object()
1562 fs->m = vm_radix_iter_lookup(&pages, fs->pindex); in vm_fault_object()
1563 if (fs->m != NULL) { in vm_fault_object()
1574 if (vm_page_all_valid(fs->m) && in vm_fault_object()
1576 * No write permissions for the new fs->m mapping, in vm_fault_object()
1578 * other writeable COW mappings of fs->m cannot in vm_fault_object()
1581 (vm_fault_is_read(fs) || vm_fault_might_be_cow(fs)) && in vm_fault_object()
1583 * fs->m cannot be renamed from object to in vm_fault_object()
1588 (!vm_fault_can_cow_rename(fs) || in vm_fault_object()
1589 fs->object != fs->first_object->backing_object)) { in vm_fault_object()
1590 if (!vm_page_trysbusy(fs->m)) { in vm_fault_object()
1591 vm_fault_busy_sleep(fs, VM_ALLOC_SBUSY); in vm_fault_object()
1599 if (__predict_true(vm_page_all_valid(fs->m) && in vm_fault_object()
1600 (vm_fault_is_read(fs) || in vm_fault_object()
1601 vm_fault_might_be_cow(fs)))) { in vm_fault_object()
1602 VM_OBJECT_UNLOCK(fs->object); in vm_fault_object()
1606 vm_page_sunbusy(fs->m); in vm_fault_object()
1609 if (!vm_page_tryxbusy(fs->m)) { in vm_fault_object()
1610 vm_fault_busy_sleep(fs, 0); in vm_fault_object()
1619 if (vm_page_all_valid(fs->m)) { in vm_fault_object()
1620 VM_OBJECT_UNLOCK(fs->object); in vm_fault_object()
1630 if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) || in vm_fault_object()
1631 fs->object == fs->first_object)) { in vm_fault_object()
1632 if (!vm_fault_object_ensure_wlocked(fs)) { in vm_fault_object()
1633 fs->can_read_lock = false; in vm_fault_object()
1634 vm_fault_unlock_and_deallocate(fs); in vm_fault_object()
1637 res = vm_fault_allocate(fs, &pages); in vm_fault_object()
1647 if (vm_fault_object_needs_getpages(fs->object)) { in vm_fault_object()
1658 VM_OBJECT_UNLOCK(fs->object); in vm_fault_object()
1659 res = vm_fault_getpages(fs, behindp, aheadp); in vm_fault_object()
1661 VM_OBJECT_WLOCK(fs->object); in vm_fault_object()
1694 struct faultstate fs; in vm_fault() local
1705 fs.vp = NULL; in vm_fault()
1706 fs.vaddr = vaddr; in vm_fault()
1707 fs.m_hold = m_hold; in vm_fault()
1708 fs.fault_flags = fault_flags; in vm_fault()
1709 fs.map = map; in vm_fault()
1710 fs.lookup_still_valid = false; in vm_fault()
1711 fs.m_needs_zeroing = true; in vm_fault()
1712 fs.oom_started = false; in vm_fault()
1713 fs.nera = -1; in vm_fault()
1714 fs.can_read_lock = true; in vm_fault()
1719 fs.fault_type = fault_type; in vm_fault()
1725 rv = vm_fault_lookup(&fs); in vm_fault()
1739 if (fs.vp == NULL /* avoid locked vnode leak */ && in vm_fault()
1740 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && in vm_fault()
1741 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { in vm_fault()
1742 res = vm_fault_soft_fast(&fs); in vm_fault()
1744 VM_OBJECT_ASSERT_UNLOCKED(fs.first_object); in vm_fault()
1747 VM_OBJECT_ASSERT_WLOCKED(fs.first_object); in vm_fault()
1749 vm_page_iter_init(&pages, fs.first_object); in vm_fault()
1750 VM_OBJECT_WLOCK(fs.first_object); in vm_fault()
1762 vm_object_reference_locked(fs.first_object); in vm_fault()
1763 vm_object_pip_add(fs.first_object, 1); in vm_fault()
1765 fs.m_cow = fs.m = fs.first_m = NULL; in vm_fault()
1770 fs.object = fs.first_object; in vm_fault()
1771 fs.pindex = fs.first_pindex; in vm_fault()
1773 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { in vm_fault()
1774 res = vm_fault_allocate(&fs, &pages); in vm_fault()
1792 KASSERT(fs.m == NULL, in vm_fault()
1793 ("page still set %p at loop start", fs.m)); in vm_fault()
1795 res = vm_fault_object(&fs, &behind, &ahead); in vm_fault()
1824 res_next = vm_fault_next(&fs); in vm_fault()
1830 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { in vm_fault()
1831 if (fs.first_object == fs.object) in vm_fault()
1832 vm_fault_page_free(&fs.first_m); in vm_fault()
1833 vm_fault_unlock_and_deallocate(&fs); in vm_fault()
1836 VM_OBJECT_UNLOCK(fs.object); in vm_fault()
1837 vm_fault_zerofill(&fs); in vm_fault()
1848 * Regardless of the busy state of fs.m, fs.first_m is always in vm_fault()
1853 vm_page_assert_busied(fs.m); in vm_fault()
1854 VM_OBJECT_ASSERT_UNLOCKED(fs.object); in vm_fault()
1861 if (vm_fault_might_be_cow(&fs)) { in vm_fault()
1865 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { in vm_fault()
1866 vm_fault_cow(&fs); in vm_fault()
1877 fs.prot &= ~VM_PROT_WRITE; in vm_fault()
1885 if (!fs.lookup_still_valid) { in vm_fault()
1886 rv = vm_fault_relookup(&fs); in vm_fault()
1888 vm_fault_deallocate(&fs); in vm_fault()
1894 VM_OBJECT_ASSERT_UNLOCKED(fs.object); in vm_fault()
1903 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; in vm_fault()
1912 if (fs.m_cow != NULL) { in vm_fault()
1913 KASSERT(vm_page_none_valid(fs.m), in vm_fault()
1914 ("vm_fault: page %p is already valid", fs.m_cow)); in vm_fault()
1915 vm_page_valid(fs.m); in vm_fault()
1922 vm_page_assert_busied(fs.m); in vm_fault()
1923 KASSERT(vm_page_all_valid(fs.m), in vm_fault()
1924 ("vm_fault: page %p partially invalid", fs.m)); in vm_fault()
1926 vm_fault_dirty(&fs, fs.m); in vm_fault()
1934 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, in vm_fault()
1935 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); in vm_fault()
1936 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && in vm_fault()
1937 fs.wired == 0) in vm_fault()
1938 vm_fault_prefault(&fs, vaddr, in vm_fault()
1946 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) in vm_fault()
1947 vm_page_wire(fs.m); in vm_fault()
1949 vm_page_activate(fs.m); in vm_fault()
1950 if (fs.m_hold != NULL) { in vm_fault()
1951 (*fs.m_hold) = fs.m; in vm_fault()
1952 vm_page_wire(fs.m); in vm_fault()
1955 KASSERT(fs.first_object == fs.object || vm_page_xbusied(fs.first_m), in vm_fault()
1957 if (vm_page_xbusied(fs.m)) in vm_fault()
1958 vm_page_xunbusy(fs.m); in vm_fault()
1960 vm_page_sunbusy(fs.m); in vm_fault()
1961 fs.m = NULL; in vm_fault()
1966 vm_fault_deallocate(&fs); in vm_fault()
1971 if (racct_enable && fs.object->type == OBJT_VNODE) { in vm_fault()
1973 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { in vm_fault()
1999 * When "fs->first_object" is a shadow object, the pages in the backing object
2004 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) in vm_fault_dontneed() argument
2013 VM_OBJECT_ASSERT_UNLOCKED(fs->object); in vm_fault_dontneed()
2014 first_object = fs->first_object; in vm_fault_dontneed()
2023 (entry = fs->entry)->start < end) { in vm_fault_dontneed()
2028 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); in vm_fault_dontneed()
2067 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, in vm_fault_prefault() argument
2079 pmap = fs->map->pmap; in vm_fault_prefault()
2083 entry = fs->entry; in vm_fault_prefault()
2098 if ((fs->prot & VM_PROT_WRITE) != 0) in vm_fault_prefault()