Lines Matching +full:non +full:- +full:descriptive
1 // SPDX-License-Identifier: GPL-2.0
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
43 * pseudo-locked regions.
56 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn)); in pseudo_lock_devnode()
65 * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
70 * pseudo-locking. This includes testing to ensure pseudo-locked regions
72 * as well as that these pseudo-locked regions can maintain their low cache
75 * After a platform has been validated to support pseudo-locking its
125 * pseudo_lock_minor_get - Obtain available minor number
140 return -ENOSPC; in pseudo_lock_minor_get()
149 * pseudo_lock_minor_release - Return minor number to available
158 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
159 * @minor: The minor number of the device representing pseudo-locked region
162 * pseudo-locked region it belongs to. This is done by matching the minor
163 * number of the device to the pseudo-locked region it belongs.
165 * Minor numbers are assigned at the time a pseudo-locked region is associated
168 * Return: On success return pointer to resource group owning the pseudo-locked
176 if (rdtgrp->plr && rdtgrp->plr->minor == minor) { in region_find_by_minor()
185 * struct pseudo_lock_pm_req - A power management QoS request list entry
186 * @list: Entry within the @pm_reqs list for a pseudo-locked region
198 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
199 dev_pm_qos_remove_request(&pm_req->req); in pseudo_lock_cstates_relax()
200 list_del(&pm_req->list); in pseudo_lock_cstates_relax()
206 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
207 * @plr: Pseudo-locked region
229 for_each_cpu(cpu, &plr->d->hdr.cpu_mask) { in pseudo_lock_cstates_constrain()
233 ret = -ENOMEM; in pseudo_lock_cstates_constrain()
237 &pm_req->req, in pseudo_lock_cstates_constrain()
244 ret = -1; in pseudo_lock_cstates_constrain()
247 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
258 * pseudo_lock_region_clear - Reset pseudo-lock region data
259 * @plr: pseudo-lock region
261 * All content of the pseudo-locked region is reset - any memory allocated
268 plr->size = 0; in pseudo_lock_region_clear()
269 plr->line_size = 0; in pseudo_lock_region_clear()
270 kfree(plr->kmem); in pseudo_lock_region_clear()
271 plr->kmem = NULL; in pseudo_lock_region_clear()
272 plr->s = NULL; in pseudo_lock_region_clear()
273 if (plr->d) in pseudo_lock_region_clear()
274 plr->d->plr = NULL; in pseudo_lock_region_clear()
275 plr->d = NULL; in pseudo_lock_region_clear()
276 plr->cbm = 0; in pseudo_lock_region_clear()
277 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
281 * pseudo_lock_region_init - Initialize pseudo-lock region information
282 * @plr: pseudo-lock region
284 * Called after user provided a schemata to be pseudo-locked. From the
287 * required for pseudo-locking is deduced from this data and &struct
289 * - size in bytes of the region to be pseudo-locked
290 * - cache line size to know the stride with which data needs to be accessed
291 * to be pseudo-locked
292 * - a cpu associated with the cache instance on which the pseudo-locking
295 * Return: 0 on success, <0 on failure. Descriptive error will be written
300 enum resctrl_scope scope = plr->s->res->ctrl_scope; in pseudo_lock_region_init()
305 return -ENODEV; in pseudo_lock_region_init()
308 plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask); in pseudo_lock_region_init()
310 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
312 plr->cpu); in pseudo_lock_region_init()
313 ret = -ENODEV; in pseudo_lock_region_init()
317 ci = get_cpu_cacheinfo_level(plr->cpu, scope); in pseudo_lock_region_init()
319 plr->line_size = ci->coherency_line_size; in pseudo_lock_region_init()
320 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); in pseudo_lock_region_init()
324 ret = -1; in pseudo_lock_region_init()
332 * pseudo_lock_init - Initialize a pseudo-lock region
333 * @rdtgrp: resource group to which new pseudo-locked region will belong
335 * A pseudo-locked region is associated with a resource group. When this
336 * association is created the pseudo-locked region is initialized. The
337 * details of the pseudo-locked region are not known at this time so only
348 return -ENOMEM; in pseudo_lock_init()
350 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
351 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
352 rdtgrp->plr = plr; in pseudo_lock_init()
357 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
358 * @plr: pseudo-lock region
360 * Initialize the details required to set up the pseudo-locked region and
361 * allocate the contiguous memory that will be pseudo-locked to the cache.
363 * Return: 0 on success, <0 on failure. Descriptive error will be written
378 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
380 ret = -E2BIG; in pseudo_lock_region_alloc()
384 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
385 if (!plr->kmem) { in pseudo_lock_region_alloc()
387 ret = -ENOMEM; in pseudo_lock_region_alloc()
400 * pseudo_lock_free - Free a pseudo-locked region
401 * @rdtgrp: resource group to which pseudo-locked region belonged
403 * The pseudo-locked region's resources have already been released, or not
411 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
412 kfree(rdtgrp->plr); in pseudo_lock_free()
413 rdtgrp->plr = NULL; in pseudo_lock_free()
417 * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
418 * @_plr: the pseudo-lock region descriptor
420 * This is the core pseudo-locking flow.
425 * with class of service set to the bitmask of the pseudo-locked region.
448 * pseudo-locking success rate when KASAN is active. in resctrl_arch_pseudo_lock_fn()
461 * will get a cache hit in below loop from outside of pseudo-locked in resctrl_arch_pseudo_lock_fn()
480 * being pseudo-locked is reached the hardware will not read beyond in resctrl_arch_pseudo_lock_fn()
481 * the buffer and evict pseudo-locked memory read earlier from the in resctrl_arch_pseudo_lock_fn()
488 mem_r = plr->kmem; in resctrl_arch_pseudo_lock_fn()
489 size = plr->size; in resctrl_arch_pseudo_lock_fn()
490 line_size = plr->line_size; in resctrl_arch_pseudo_lock_fn()
494 * pseudo-locked followed by reading of kernel memory to load it in resctrl_arch_pseudo_lock_fn()
497 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); in resctrl_arch_pseudo_lock_fn()
501 * into cache region associated with just activated plr->closid. in resctrl_arch_pseudo_lock_fn()
503 * - In first loop the cache region is shared with the page walker in resctrl_arch_pseudo_lock_fn()
505 * - In the second loop the paging structure caches are used and in resctrl_arch_pseudo_lock_fn()
532 * does not overlap with pseudo-locked region. in resctrl_arch_pseudo_lock_fn()
536 /* Re-enable the hardware prefetcher(s) */ in resctrl_arch_pseudo_lock_fn()
540 plr->thread_done = 1; in resctrl_arch_pseudo_lock_fn()
541 wake_up_interruptible(&plr->lock_thread_wq); in resctrl_arch_pseudo_lock_fn()
546 * rdtgroup_monitor_in_progress - Test if monitoring in progress
554 return !list_empty(&rdtgrp->mon.crdtgrp_list); in rdtgroup_monitor_in_progress()
558 * rdtgroup_locksetup_user_restrict - Restrict user access to group
561 * A resource group used for cache pseudo-locking cannot have cpus or tasks
608 * rdtgroup_locksetup_user_restore - Restore user access to group
655 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
659 * to represent a pseudo-locked region and is in the process of being set
660 * up to do so. A resource group used for a pseudo-locked region would
663 * future. Monitoring of a pseudo-locked region is not allowed either.
665 * The above and more restrictions on a pseudo-locked region are checked
681 rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); in rdtgroup_locksetup_enter()
682 return -EINVAL; in rdtgroup_locksetup_enter()
686 * Cache Pseudo-locking not supported when CDP is enabled. in rdtgroup_locksetup_enter()
690 * - When CDP is enabled two separate resources are exposed, in rdtgroup_locksetup_enter()
692 * The implication for pseudo-locking is that if a in rdtgroup_locksetup_enter()
693 * pseudo-locked region is created on a domain of one in rdtgroup_locksetup_enter()
694 * resource (eg. L3CODE), then a pseudo-locked region cannot in rdtgroup_locksetup_enter()
697 * pseudo-locked region involves a call to wbinvd that will in rdtgroup_locksetup_enter()
699 * - Considering the previous, it may be possible to only in rdtgroup_locksetup_enter()
700 * expose one of the CDP resources to pseudo-locking and in rdtgroup_locksetup_enter()
704 * - If only one region is exposed to pseudo-locking we should in rdtgroup_locksetup_enter()
706 * for pseudo-locking should take into account both resources. in rdtgroup_locksetup_enter()
707 * Similarly, if a pseudo-locked region is created in one in rdtgroup_locksetup_enter()
714 return -EINVAL; in rdtgroup_locksetup_enter()
719 * platform does not support Cache Pseudo-Locking. in rdtgroup_locksetup_enter()
722 rdt_last_cmd_puts("Pseudo-locking not supported\n"); in rdtgroup_locksetup_enter()
723 return -EINVAL; in rdtgroup_locksetup_enter()
728 return -EINVAL; in rdtgroup_locksetup_enter()
733 return -EINVAL; in rdtgroup_locksetup_enter()
736 if (!cpumask_empty(&rdtgrp->cpu_mask)) { in rdtgroup_locksetup_enter()
738 return -EINVAL; in rdtgroup_locksetup_enter()
743 return -EIO; in rdtgroup_locksetup_enter()
748 rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); in rdtgroup_locksetup_enter()
755 * anymore when this group would be used for pseudo-locking. This in rdtgroup_locksetup_enter()
758 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_locksetup_enter()
770 * rdtgroup_locksetup_exit - resource group exist locksetup mode
783 ret = alloc_rmid(rdtgrp->closid); in rdtgroup_locksetup_exit()
788 rdtgrp->mon.rmid = ret; in rdtgroup_locksetup_exit()
793 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_locksetup_exit()
802 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
808 * pseudo-locked region on @d.
813 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
821 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
822 cbm_len = d->plr->s->res->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
823 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
831 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
834 * The setup of a pseudo-locked region affects all cache instances within
836 * pseudo-locked regions exist within a cache hierarchy to prevent any
837 * attempts to create new pseudo-locked regions in the same hierarchy.
839 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
850 /* Walking r->domains, ensure it can't race with cpuhp */ in rdtgroup_pseudo_locked_in_hierarchy()
857 * First determine which cpus have pseudo-locked regions in rdtgroup_pseudo_locked_in_hierarchy()
861 list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) { in rdtgroup_pseudo_locked_in_hierarchy()
862 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
864 &d_i->hdr.cpu_mask); in rdtgroup_pseudo_locked_in_hierarchy()
869 * Next test if new pseudo-locked region would intersect with in rdtgroup_pseudo_locked_in_hierarchy()
872 if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl)) in rdtgroup_pseudo_locked_in_hierarchy()
880 * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
881 * pseudo-locked memory
882 * @_plr: pseudo-lock region to measure
908 mem_r = READ_ONCE(plr->kmem); in resctrl_arch_measure_cycles_lat_fn()
914 for (i = 0; i < plr->size; i += 32) { in resctrl_arch_measure_cycles_lat_fn()
921 trace_pseudo_lock_mem_latency((u32)(end - start)); in resctrl_arch_measure_cycles_lat_fn()
925 plr->thread_done = 1; in resctrl_arch_measure_cycles_lat_fn()
926 wake_up_interruptible(&plr->lock_thread_wq); in resctrl_arch_measure_cycles_lat_fn()
975 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, in measure_residency_fn()
980 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, in measure_residency_fn()
1008 * interrupts disabled - it is thus safe to read the counter index. in measure_residency_fn()
1012 line_size = READ_ONCE(plr->line_size); in measure_residency_fn()
1013 mem_r = READ_ONCE(plr->kmem); in measure_residency_fn()
1014 size = READ_ONCE(plr->size); in measure_residency_fn()
1017 * Read counter variables twice - first to load the instructions in measure_residency_fn()
1024 * From SDM: Performing back-to-back fast reads are not guaranteed in measure_residency_fn()
1060 /* Re-enable hardware prefetchers */ in measure_residency_fn()
1071 counts->miss_before = miss_before; in measure_residency_fn()
1072 counts->hits_before = hits_before; in measure_residency_fn()
1073 counts->miss_after = miss_after; in measure_residency_fn()
1074 counts->hits_after = hits_after; in measure_residency_fn()
1084 * Non-architectural event for the Goldmont Microarchitecture in resctrl_arch_measure_l2_residency()
1108 trace_pseudo_lock_l2(counts.hits_after - counts.hits_before, in resctrl_arch_measure_l2_residency()
1109 counts.miss_after - counts.miss_before); in resctrl_arch_measure_l2_residency()
1111 plr->thread_done = 1; in resctrl_arch_measure_l2_residency()
1112 wake_up_interruptible(&plr->lock_thread_wq); in resctrl_arch_measure_l2_residency()
1148 counts.miss_after -= counts.miss_before; in resctrl_arch_measure_l3_residency()
1158 counts.hits_after -= counts.hits_before; in resctrl_arch_measure_l3_residency()
1160 counts.hits_after -= min(counts.miss_after, counts.hits_after); in resctrl_arch_measure_l3_residency()
1162 counts.hits_after -= counts.hits_before; in resctrl_arch_measure_l3_residency()
1167 plr->thread_done = 1; in resctrl_arch_measure_l3_residency()
1168 wake_up_interruptible(&plr->lock_thread_wq); in resctrl_arch_measure_l3_residency()
1173 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1174 * @rdtgrp: Resource group to which the pseudo-locked region belongs.
1175 * @sel: Selector of which measurement to perform on a pseudo-locked region.
1177 * The measurement of latency to access a pseudo-locked region should be
1178 * done from a cpu that is associated with that pseudo-locked region.
1186 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles()
1189 int ret = -1; in pseudo_lock_measure_cycles()
1194 if (rdtgrp->flags & RDT_DELETED) { in pseudo_lock_measure_cycles()
1195 ret = -ENODEV; in pseudo_lock_measure_cycles()
1199 if (!plr->d) { in pseudo_lock_measure_cycles()
1200 ret = -ENODEV; in pseudo_lock_measure_cycles()
1204 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1205 cpu = cpumask_first(&plr->d->hdr.cpu_mask); in pseudo_lock_measure_cycles()
1207 ret = -ENODEV; in pseudo_lock_measure_cycles()
1211 plr->cpu = cpu; in pseudo_lock_measure_cycles()
1230 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1231 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1247 struct rdtgroup *rdtgrp = file->private_data; in pseudo_lock_measure_trigger()
1253 buf_size = min(count, (sizeof(buf) - 1)); in pseudo_lock_measure_trigger()
1255 return -EFAULT; in pseudo_lock_measure_trigger()
1261 return -EINVAL; in pseudo_lock_measure_trigger()
1262 ret = debugfs_file_get(file->f_path.dentry); in pseudo_lock_measure_trigger()
1268 debugfs_file_put(file->f_path.dentry); in pseudo_lock_measure_trigger()
1281 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1282 * @rdtgrp: resource group to which pseudo-lock region belongs
1284 * Called when a resource group in the pseudo-locksetup mode receives a
1285 * valid schemata that should be pseudo-locked. Since the resource group is
1286 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1288 * occurs the resource group remains in the pseudo-locksetup mode with the
1290 * information and ready for the user to re-attempt pseudo-locking by
1293 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1294 * on failure. Descriptive error will be written to last_cmd_status buffer.
1298 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create()
1311 ret = -EINVAL; in rdtgroup_pseudo_lock_create()
1314 kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL); in rdtgroup_pseudo_lock_create()
1316 ret = -ENOMEM; in rdtgroup_pseudo_lock_create()
1320 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1323 plr->cpu, "pseudo_lock/%u"); in rdtgroup_pseudo_lock_create()
1330 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1331 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1339 * empty pseudo-locking loop. in rdtgroup_pseudo_lock_create()
1353 * pseudo-locked region will still be here on return. in rdtgroup_pseudo_lock_create()
1356 * deadlock with the mm->mmap_lock which is obtained in the in rdtgroup_pseudo_lock_create()
1363 plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl); in rdtgroup_pseudo_lock_create()
1364 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1366 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1383 /* We released the mutex - check if group was removed while we did so */ in rdtgroup_pseudo_lock_create()
1384 if (rdtgrp->flags & RDT_DELETED) { in rdtgroup_pseudo_lock_create()
1385 ret = -ENODEV; in rdtgroup_pseudo_lock_create()
1389 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1391 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; in rdtgroup_pseudo_lock_create()
1392 closid_free(rdtgrp->closid); in rdtgroup_pseudo_lock_create()
1402 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1413 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1414 * @rdtgrp: resource group to which the pseudo-locked region belongs
1416 * The removal of a pseudo-locked region can be initiated when the resource
1419 * not go back to pseudo-locksetup mode before it is removed, instead it is
1428 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove()
1430 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_pseudo_lock_remove()
1432 * Default group cannot be a pseudo-locked region so we can in rdtgroup_pseudo_lock_remove()
1435 closid_free(rdtgrp->closid); in rdtgroup_pseudo_lock_remove()
1440 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1441 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1442 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1457 return -ENODEV; in pseudo_lock_dev_open()
1460 filp->private_data = rdtgrp; in pseudo_lock_dev_open()
1461 atomic_inc(&rdtgrp->waitcount); in pseudo_lock_dev_open()
1462 /* Perform a non-seekable open - llseek is not supported */ in pseudo_lock_dev_open()
1463 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); in pseudo_lock_dev_open()
1475 rdtgrp = filp->private_data; in pseudo_lock_dev_release()
1479 return -ENODEV; in pseudo_lock_dev_release()
1481 filp->private_data = NULL; in pseudo_lock_dev_release()
1482 atomic_dec(&rdtgrp->waitcount); in pseudo_lock_dev_release()
1490 return -EINVAL; in pseudo_lock_dev_mremap()
1499 unsigned long vsize = vma->vm_end - vma->vm_start; in pseudo_lock_dev_mmap()
1500 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; in pseudo_lock_dev_mmap()
1508 rdtgrp = filp->private_data; in pseudo_lock_dev_mmap()
1512 return -ENODEV; in pseudo_lock_dev_mmap()
1515 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1517 if (!plr->d) { in pseudo_lock_dev_mmap()
1519 return -ENODEV; in pseudo_lock_dev_mmap()
1524 * with the pseudo-locked region. If this is not the case the task in pseudo_lock_dev_mmap()
1526 * pseudo-locked region. in pseudo_lock_dev_mmap()
1528 if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) { in pseudo_lock_dev_mmap()
1530 return -EINVAL; in pseudo_lock_dev_mmap()
1533 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1534 psize = plr->size - off; in pseudo_lock_dev_mmap()
1536 if (off > plr->size) { in pseudo_lock_dev_mmap()
1538 return -ENOSPC; in pseudo_lock_dev_mmap()
1543 * do not allow copy-on-write mapping. in pseudo_lock_dev_mmap()
1545 if (!(vma->vm_flags & VM_SHARED)) { in pseudo_lock_dev_mmap()
1547 return -EINVAL; in pseudo_lock_dev_mmap()
1552 return -ENOSPC; in pseudo_lock_dev_mmap()
1555 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()
1557 if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, in pseudo_lock_dev_mmap()
1558 vsize, vma->vm_page_prot)) { in pseudo_lock_dev_mmap()
1560 return -EAGAIN; in pseudo_lock_dev_mmap()
1562 vma->vm_ops = &pseudo_mmap_ops; in pseudo_lock_dev_mmap()