1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/srcu.h>
10 #include <linux/interval_tree.h>
11
12 struct mmu_notifier_subscriptions;
13 struct mmu_notifier;
14 struct mmu_notifier_range;
15 struct mmu_interval_notifier;
16
17 /**
18 * enum mmu_notifier_event - reason for the mmu notifier callback
19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20 * move the range
21 *
22 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
23 * madvise() or replacing a page by another one, ...).
24 *
25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 * ie using the vma access permission (vm_page_prot) to update the whole range
27 * is enough no need to inspect changes to the CPU page table (mprotect()
28 * syscall)
29 *
30 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
31 * pages in the range so to mirror those changes the user must inspect the CPU
32 * page table (from the end callback).
33 *
34 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
35 * access flags). User should soft dirty the page in the end callback to make
36 * sure that anyone relying on soft dirtiness catch pages that might be written
37 * through non CPU mappings.
38 *
39 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
40 * that the mm refcount is zero and the range is no longer accessible.
41 *
42 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
43 * a device driver to possibly ignore the invalidation if the
44 * owner field matches the driver's device private pgmap owner.
45 *
46 * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive.
47 * The owner is initialized to the value provided by the caller of
48 * make_device_exclusive(), such that this caller can filter out these
49 * events.
50 */
51 enum mmu_notifier_event {
52 MMU_NOTIFY_UNMAP = 0,
53 MMU_NOTIFY_CLEAR,
54 MMU_NOTIFY_PROTECTION_VMA,
55 MMU_NOTIFY_PROTECTION_PAGE,
56 MMU_NOTIFY_SOFT_DIRTY,
57 MMU_NOTIFY_RELEASE,
58 MMU_NOTIFY_MIGRATE,
59 MMU_NOTIFY_EXCLUSIVE,
60 };
61
62 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
63
64 struct mmu_notifier_ops {
65 /*
66 * Called either by mmu_notifier_unregister or when the mm is
67 * being destroyed by exit_mmap, always before all pages are
68 * freed. This can run concurrently with other mmu notifier
69 * methods (the ones invoked outside the mm context) and it
70 * should tear down all secondary mmu mappings and freeze the
71 * secondary mmu. If this method isn't implemented you've to
72 * be sure that nothing could possibly write to the pages
73 * through the secondary mmu by the time the last thread with
74 * tsk->mm == mm exits.
75 *
76 * As side note: the pages freed after ->release returns could
77 * be immediately reallocated by the gart at an alias physical
78 * address with a different cache model, so if ->release isn't
79 * implemented because all _software_ driven memory accesses
80 * through the secondary mmu are terminated by the time the
81 * last thread of this mm quits, you've also to be sure that
82 * speculative _hardware_ operations can't allocate dirty
83 * cachelines in the cpu that could not be snooped and made
84 * coherent with the other read and write operations happening
85 * through the gart alias address, so leading to memory
86 * corruption.
87 */
88 void (*release)(struct mmu_notifier *subscription,
89 struct mm_struct *mm);
90
91 /*
92 * clear_flush_young is called after the VM is
93 * test-and-clearing the young/accessed bitflag in the
94 * pte. This way the VM will provide proper aging to the
95 * accesses to the page through the secondary MMUs and not
96 * only to the ones through the Linux pte.
97 * Start-end is necessary in case the secondary MMU is mapping the page
98 * at a smaller granularity than the primary MMU.
99 */
100 bool (*clear_flush_young)(struct mmu_notifier *subscription,
101 struct mm_struct *mm,
102 unsigned long start,
103 unsigned long end);
104
105 /*
106 * clear_young is a lightweight version of clear_flush_young. Like the
107 * latter, it is supposed to test-and-clear the young/accessed bitflag
108 * in the secondary pte, but it may omit flushing the secondary tlb.
109 */
110 bool (*clear_young)(struct mmu_notifier *subscription,
111 struct mm_struct *mm,
112 unsigned long start,
113 unsigned long end);
114
115 /*
116 * test_young is called to check the young/accessed bitflag in
117 * the secondary pte. This is used to know if the page is
118 * frequently used without actually clearing the flag or tearing
119 * down the secondary mapping on the page.
120 */
121 bool (*test_young)(struct mmu_notifier *subscription,
122 struct mm_struct *mm,
123 unsigned long address);
124
125 /*
126 * invalidate_range_start() and invalidate_range_end() must be
127 * paired and are called only when the mmap_lock and/or the
128 * locks protecting the reverse maps are held. If the subsystem
129 * can't guarantee that no additional references are taken to
130 * the pages in the range, it has to implement the
131 * invalidate_range() notifier to remove any references taken
132 * after invalidate_range_start().
133 *
134 * Invalidation of multiple concurrent ranges may be
135 * optionally permitted by the driver. Either way the
136 * establishment of sptes is forbidden in the range passed to
137 * invalidate_range_begin/end for the whole duration of the
138 * invalidate_range_begin/end critical section.
139 *
140 * invalidate_range_start() is called when all pages in the
141 * range are still mapped and have at least a refcount of one.
142 *
143 * invalidate_range_end() is called when all pages in the
144 * range have been unmapped and the pages have been freed by
145 * the VM.
146 *
147 * The VM will remove the page table entries and potentially
148 * the page between invalidate_range_start() and
149 * invalidate_range_end(). If the page must not be freed
150 * because of pending I/O or other circumstances then the
151 * invalidate_range_start() callback (or the initial mapping
152 * by the driver) must make sure that the refcount is kept
153 * elevated.
154 *
155 * If the driver increases the refcount when the pages are
156 * initially mapped into an address space then either
157 * invalidate_range_start() or invalidate_range_end() may
158 * decrease the refcount. If the refcount is decreased on
159 * invalidate_range_start() then the VM can free pages as page
160 * table entries are removed. If the refcount is only
161 * dropped on invalidate_range_end() then the driver itself
162 * will drop the last refcount but it must take care to flush
163 * any secondary tlb before doing the final free on the
164 * page. Pages will no longer be referenced by the linux
165 * address space but may still be referenced by sptes until
166 * the last refcount is dropped.
167 *
168 * If blockable argument is set to false then the callback cannot
169 * sleep and has to return with -EAGAIN if sleeping would be required.
170 * 0 should be returned otherwise. Please note that notifiers that can
171 * fail invalidate_range_start are not allowed to implement
172 * invalidate_range_end, as there is no mechanism for informing the
173 * notifier that its start failed.
174 */
175 int (*invalidate_range_start)(struct mmu_notifier *subscription,
176 const struct mmu_notifier_range *range);
177 void (*invalidate_range_end)(struct mmu_notifier *subscription,
178 const struct mmu_notifier_range *range);
179
180 /*
181 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
182 * which shares page-tables with the CPU. The
183 * invalidate_range_start()/end() callbacks should not be implemented as
184 * invalidate_secondary_tlbs() already catches the points in time when
185 * an external TLB needs to be flushed.
186 *
187 * This requires arch_invalidate_secondary_tlbs() to be called while
188 * holding the ptl spin-lock and therefore this callback is not allowed
189 * to sleep.
190 *
191 * This is called by architecture code whenever invalidating a TLB
192 * entry. It is assumed that any secondary TLB has the same rules for
193 * when invalidations are required. If this is not the case architecture
194 * code will need to call this explicitly when required for secondary
195 * TLB invalidation.
196 */
197 void (*arch_invalidate_secondary_tlbs)(
198 struct mmu_notifier *subscription,
199 struct mm_struct *mm,
200 unsigned long start,
201 unsigned long end);
202
203 /*
204 * These callbacks are used with the get/put interface to manage the
205 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
206 * notifier for use with the mm.
207 *
208 * free_notifier() is only called after the mmu_notifier has been
209 * fully put, calls to any ops callback are prevented and no ops
210 * callbacks are currently running. It is called from a SRCU callback
211 * and cannot sleep.
212 */
213 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
214 void (*free_notifier)(struct mmu_notifier *subscription);
215 };
216
217 /*
218 * The notifier chains are protected by mmap_lock and/or the reverse map
219 * semaphores. Notifier chains are only changed when all reverse maps and
220 * the mmap_lock locks are taken.
221 *
222 * Therefore notifier chains can only be traversed when either
223 *
224 * 1. mmap_lock is held.
225 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
226 * 3. No other concurrent thread can access the list (release)
227 */
228 struct mmu_notifier {
229 struct hlist_node hlist;
230 const struct mmu_notifier_ops *ops;
231 struct mm_struct *mm;
232 struct rcu_head rcu;
233 unsigned int users;
234 };
235
236 /**
237 * struct mmu_interval_notifier_finish - mmu_interval_notifier two-pass abstraction
238 * @link: Lockless list link for the notifiers pending pass list
239 * @notifier: The mmu_interval_notifier for which the finish pass is called.
240 *
241 * Allocate, typically using GFP_NOWAIT in the interval notifier's start pass.
242 * Note that with a large number of notifiers implementing two passes,
243 * allocation with GFP_NOWAIT will become increasingly likely to fail, so consider
244 * implementing a small pool instead of using kmalloc() allocations.
245 *
246 * If the implementation needs to pass data between the start and the finish passes,
247 * the recommended way is to embed struct mmu_interval_notifier_finish into a larger
248 * structure that also contains the data needed to be shared. Keep in mind that
249 * a notifier callback can be invoked in parallel, and each invocation needs its
250 * own struct mmu_interval_notifier_finish.
251 *
252 * If allocation fails, then the &mmu_interval_notifier_ops->invalidate_start op
253 * needs to implements the full notifier functionality. Please refer to its
254 * documentation.
255 */
256 struct mmu_interval_notifier_finish {
257 struct llist_node link;
258 struct mmu_interval_notifier *notifier;
259 };
260
261 /**
262 * struct mmu_interval_notifier_ops - callback for range notification
263 * @invalidate: Upon return the caller must stop using any SPTEs within this
264 * range. This function can sleep. Return false only if sleeping
265 * was required but mmu_notifier_range_blockable(range) is false.
266 * @invalidate_start: Similar to @invalidate, but intended for two-pass notifier
267 * callbacks where the call to @invalidate_start is the first
268 * pass and any struct mmu_interval_notifier_finish pointer
269 * returned in the @finish parameter describes the finish pass.
270 * If *@finish is %NULL on return, then no final pass will be
271 * called, and @invalidate_start needs to implement the full
272 * notifier, behaving like @invalidate. The value of *@finish
273 * is guaranteed to be %NULL at function entry.
274 * @invalidate_finish: Called as the second pass for any notifier that returned
275 * a non-NULL *@finish from @invalidate_start. The @finish
276 * pointer passed here is the same one returned by
277 * @invalidate_start.
278 */
279 struct mmu_interval_notifier_ops {
280 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
281 const struct mmu_notifier_range *range,
282 unsigned long cur_seq);
283 bool (*invalidate_start)(struct mmu_interval_notifier *interval_sub,
284 const struct mmu_notifier_range *range,
285 unsigned long cur_seq,
286 struct mmu_interval_notifier_finish **finish);
287 void (*invalidate_finish)(struct mmu_interval_notifier_finish *finish);
288 };
289
290 struct mmu_interval_notifier {
291 struct interval_tree_node interval_tree;
292 const struct mmu_interval_notifier_ops *ops;
293 struct mm_struct *mm;
294 struct hlist_node deferred_item;
295 unsigned long invalidate_seq;
296 };
297
298 #ifdef CONFIG_MMU_NOTIFIER
299
300 #ifdef CONFIG_LOCKDEP
301 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
302 #endif
303
304 struct mmu_notifier_range {
305 struct mm_struct *mm;
306 unsigned long start;
307 unsigned long end;
308 unsigned flags;
309 enum mmu_notifier_event event;
310 void *owner;
311 };
312
mm_has_notifiers(struct mm_struct * mm)313 static inline int mm_has_notifiers(struct mm_struct *mm)
314 {
315 return unlikely(mm->notifier_subscriptions);
316 }
317
318 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
319 struct mm_struct *mm);
320 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)321 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
322 {
323 struct mmu_notifier *ret;
324
325 mmap_write_lock(mm);
326 ret = mmu_notifier_get_locked(ops, mm);
327 mmap_write_unlock(mm);
328 return ret;
329 }
330 void mmu_notifier_put(struct mmu_notifier *subscription);
331 void mmu_notifier_synchronize(void);
332
333 extern int mmu_notifier_register(struct mmu_notifier *subscription,
334 struct mm_struct *mm);
335 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
336 struct mm_struct *mm);
337 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
338 struct mm_struct *mm);
339
340 unsigned long
341 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
342 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
343 struct mm_struct *mm, unsigned long start,
344 unsigned long length,
345 const struct mmu_interval_notifier_ops *ops);
346 int mmu_interval_notifier_insert_locked(
347 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
348 unsigned long start, unsigned long length,
349 const struct mmu_interval_notifier_ops *ops);
350 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
351
352 /**
353 * mmu_interval_set_seq - Save the invalidation sequence
354 * @interval_sub: The subscription passed to invalidate
355 * @cur_seq: The cur_seq passed to the invalidate() callback
356 *
357 * This must be called unconditionally from the invalidate callback of a
358 * struct mmu_interval_notifier_ops under the same lock that is used to call
359 * mmu_interval_read_retry(). It updates the sequence number for later use by
360 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
361 *
362 * If the caller does not call mmu_interval_read_begin() or
363 * mmu_interval_read_retry() then this call is not required.
364 */
365 static inline void
mmu_interval_set_seq(struct mmu_interval_notifier * interval_sub,unsigned long cur_seq)366 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
367 unsigned long cur_seq)
368 {
369 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
370 }
371
372 /**
373 * mmu_interval_read_retry - End a read side critical section against a VA range
374 * @interval_sub: The subscription
375 * @seq: The return of the paired mmu_interval_read_begin()
376 *
377 * This MUST be called under a user provided lock that is also held
378 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
379 *
380 * Each call should be paired with a single mmu_interval_read_begin() and
381 * should be used to conclude the read side.
382 *
383 * Returns: true if an invalidation collided with this critical section, and
384 * the caller should retry.
385 */
386 static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)387 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
388 unsigned long seq)
389 {
390 return interval_sub->invalidate_seq != seq;
391 }
392
393 /**
394 * mmu_interval_check_retry - Test if a collision has occurred
395 * @interval_sub: The subscription
396 * @seq: The return of the matching mmu_interval_read_begin()
397 *
398 * This can be used in the critical section between mmu_interval_read_begin()
399 * and mmu_interval_read_retry().
400 *
401 * This call can be used as part of loops and other expensive operations to
402 * expedite a retry.
403 * It can be called many times and does not have to hold the user
404 * provided lock.
405 *
406 * Returns: true indicates an invalidation has collided with this critical
407 * region and a future mmu_interval_read_retry() will return true.
408 * False is not reliable and only suggests a collision may not have
409 * occurred.
410 */
411 static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)412 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
413 unsigned long seq)
414 {
415 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
416 return READ_ONCE(interval_sub->invalidate_seq) != seq;
417 }
418
419 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
420 extern void __mmu_notifier_release(struct mm_struct *mm);
421 bool __mmu_notifier_clear_flush_young(struct mm_struct *mm,
422 unsigned long start, unsigned long end);
423 bool __mmu_notifier_clear_young(struct mm_struct *mm,
424 unsigned long start, unsigned long end);
425 bool __mmu_notifier_test_young(struct mm_struct *mm,
426 unsigned long address);
427 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
428 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
429 extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
430 unsigned long start, unsigned long end);
431 extern bool
432 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
433
434 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)435 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
436 {
437 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
438 }
439
mmu_notifier_release(struct mm_struct * mm)440 static inline void mmu_notifier_release(struct mm_struct *mm)
441 {
442 if (mm_has_notifiers(mm))
443 __mmu_notifier_release(mm);
444 }
445
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)446 static inline bool mmu_notifier_clear_flush_young(struct mm_struct *mm,
447 unsigned long start, unsigned long end)
448 {
449 if (mm_has_notifiers(mm))
450 return __mmu_notifier_clear_flush_young(mm, start, end);
451 return false;
452 }
453
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)454 static inline bool mmu_notifier_clear_young(struct mm_struct *mm,
455 unsigned long start, unsigned long end)
456 {
457 if (mm_has_notifiers(mm))
458 return __mmu_notifier_clear_young(mm, start, end);
459 return false;
460 }
461
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)462 static inline bool mmu_notifier_test_young(struct mm_struct *mm,
463 unsigned long address)
464 {
465 if (mm_has_notifiers(mm))
466 return __mmu_notifier_test_young(mm, address);
467 return false;
468 }
469
470 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)471 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
472 {
473 might_sleep();
474
475 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
476 if (mm_has_notifiers(range->mm)) {
477 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
478 __mmu_notifier_invalidate_range_start(range);
479 }
480 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
481 }
482
483 /*
484 * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
485 * can return an error if a notifier can't proceed without blocking, in which
486 * case you're not allowed to modify PTEs in the specified range.
487 *
488 * This is mainly intended for OOM handling.
489 */
490 static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)491 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
492 {
493 int ret = 0;
494
495 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
496 if (mm_has_notifiers(range->mm)) {
497 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
498 ret = __mmu_notifier_invalidate_range_start(range);
499 }
500 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
501 return ret;
502 }
503
504 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)505 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
506 {
507 if (mmu_notifier_range_blockable(range))
508 might_sleep();
509
510 if (mm_has_notifiers(range->mm))
511 __mmu_notifier_invalidate_range_end(range);
512 }
513
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)514 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
515 unsigned long start, unsigned long end)
516 {
517 if (mm_has_notifiers(mm))
518 __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
519 }
520
mmu_notifier_subscriptions_init(struct mm_struct * mm)521 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
522 {
523 mm->notifier_subscriptions = NULL;
524 }
525
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)526 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
527 {
528 if (mm_has_notifiers(mm))
529 __mmu_notifier_subscriptions_destroy(mm);
530 }
531
532
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct mm_struct * mm,unsigned long start,unsigned long end)533 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
534 enum mmu_notifier_event event,
535 unsigned flags,
536 struct mm_struct *mm,
537 unsigned long start,
538 unsigned long end)
539 {
540 range->event = event;
541 range->mm = mm;
542 range->start = start;
543 range->end = end;
544 range->flags = flags;
545 }
546
mmu_notifier_range_init_owner(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned int flags,struct mm_struct * mm,unsigned long start,unsigned long end,void * owner)547 static inline void mmu_notifier_range_init_owner(
548 struct mmu_notifier_range *range,
549 enum mmu_notifier_event event, unsigned int flags,
550 struct mm_struct *mm, unsigned long start,
551 unsigned long end, void *owner)
552 {
553 mmu_notifier_range_init(range, event, flags, mm, start, end);
554 range->owner = owner;
555 }
556
557 #else /* CONFIG_MMU_NOTIFIER */
558
559 struct mmu_notifier_range {
560 unsigned long start;
561 unsigned long end;
562 };
563
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)564 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
565 unsigned long start,
566 unsigned long end)
567 {
568 range->start = start;
569 range->end = end;
570 }
571
572 #define mmu_notifier_range_init(range,event,flags,mm,start,end) \
573 _mmu_notifier_range_init(range, start, end)
574 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
575 end, owner) \
576 _mmu_notifier_range_init(range, start, end)
577
578 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)579 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
580 {
581 return true;
582 }
583
mm_has_notifiers(struct mm_struct * mm)584 static inline int mm_has_notifiers(struct mm_struct *mm)
585 {
586 return 0;
587 }
588
mmu_notifier_release(struct mm_struct * mm)589 static inline void mmu_notifier_release(struct mm_struct *mm)
590 {
591 }
592
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)593 static inline bool mmu_notifier_clear_flush_young(struct mm_struct *mm,
594 unsigned long start, unsigned long end)
595 {
596 return false;
597 }
598
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)599 static inline bool mmu_notifier_clear_young(struct mm_struct *mm,
600 unsigned long start, unsigned long end)
601 {
602 return false;
603 }
604
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)605 static inline bool mmu_notifier_test_young(struct mm_struct *mm,
606 unsigned long address)
607 {
608 return false;
609 }
610
611 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)612 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
613 {
614 }
615
616 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)617 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
618 {
619 return 0;
620 }
621
622 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)623 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
624 {
625 }
626
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)627 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
628 unsigned long start, unsigned long end)
629 {
630 }
631
mmu_notifier_subscriptions_init(struct mm_struct * mm)632 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
633 {
634 }
635
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)636 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
637 {
638 }
639
640 #define mmu_notifier_range_update_to_read_only(r) false
641
mmu_notifier_synchronize(void)642 static inline void mmu_notifier_synchronize(void)
643 {
644 }
645
646 #endif /* CONFIG_MMU_NOTIFIER */
647
648 #endif /* _LINUX_MMU_NOTIFIER_H */
649