1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 ARM Ltd.
4 */
5
6 #include <linux/mm.h>
7 #include <linux/efi.h>
8 #include <linux/export.h>
9 #include <asm/tlbflush.h>
10
mm_is_user(struct mm_struct * mm)11 static inline bool mm_is_user(struct mm_struct *mm)
12 {
13 /*
14 * Don't attempt to apply the contig bit to kernel mappings, because
15 * dynamically adding/removing the contig bit can cause page faults.
16 * These racing faults are ok for user space, since they get serialized
17 * on the PTL. But kernel mappings can't tolerate faults.
18 */
19 if (unlikely(mm_is_efi(mm)))
20 return false;
21 return mm != &init_mm;
22 }
23
contpte_align_down(pte_t * ptep)24 static inline pte_t *contpte_align_down(pte_t *ptep)
25 {
26 return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES);
27 }
28
contpte_try_unfold_partial(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)29 static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr,
30 pte_t *ptep, unsigned int nr)
31 {
32 /*
33 * Unfold any partially covered contpte block at the beginning and end
34 * of the range.
35 */
36
37 if (ptep != contpte_align_down(ptep) || nr < CONT_PTES)
38 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
39
40 if (ptep + nr != contpte_align_down(ptep + nr)) {
41 unsigned long last_addr = addr + PAGE_SIZE * (nr - 1);
42 pte_t *last_ptep = ptep + nr - 1;
43
44 contpte_try_unfold(mm, last_addr, last_ptep,
45 __ptep_get(last_ptep));
46 }
47 }
48
contpte_convert(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)49 static void contpte_convert(struct mm_struct *mm, unsigned long addr,
50 pte_t *ptep, pte_t pte)
51 {
52 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
53 unsigned long start_addr;
54 pte_t *start_ptep;
55 int i;
56
57 start_ptep = ptep = contpte_align_down(ptep);
58 start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
59 pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte));
60
61 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) {
62 pte_t ptent = __ptep_get_and_clear(mm, addr, ptep);
63
64 if (pte_dirty(ptent))
65 pte = pte_mkdirty(pte);
66
67 if (pte_young(ptent))
68 pte = pte_mkyoung(pte);
69 }
70
71 /*
72 * On eliding the __tlb_flush_range() under BBML2+noabort:
73 *
74 * NOTE: Instead of using N=16 as the contiguous block length, we use
75 * N=4 for clarity.
76 *
77 * NOTE: 'n' and 'c' are used to denote the "contiguous bit" being
78 * unset and set, respectively.
79 *
80 * We worry about two cases where contiguous bit is used:
81 * - When folding N smaller non-contiguous ptes as 1 contiguous block.
82 * - When unfolding a contiguous block into N smaller non-contiguous ptes.
83 *
84 * Currently, the BBML0 folding case looks as follows:
85 *
86 * 0) Initial page-table layout:
87 *
88 * +----+----+----+----+
89 * |RO,n|RO,n|RO,n|RW,n| <--- last page being set as RO
90 * +----+----+----+----+
91 *
92 * 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
93 *
94 * +----+----+----+----+
95 * | 0 | 0 | 0 | 0 |
96 * +----+----+----+----+
97 *
98 * 2) __flush_tlb_range():
99 *
100 * |____ tlbi + dsb ____|
101 *
102 * 3) __set_ptes() to repaint contiguous block:
103 *
104 * +----+----+----+----+
105 * |RO,c|RO,c|RO,c|RO,c|
106 * +----+----+----+----+
107 *
108 * 4) The kernel will eventually __flush_tlb() for changed page:
109 *
110 * |____| <--- tlbi + dsb
111 *
112 * As expected, the intermediate tlbi+dsb ensures that other PEs
113 * only ever see an invalid (0) entry, or the new contiguous TLB entry.
114 * The final tlbi+dsb will always throw away the newly installed
115 * contiguous TLB entry, which is a micro-optimisation opportunity,
116 * but does not affect correctness.
117 *
118 * In the BBML2 case, the change is avoiding the intermediate tlbi+dsb.
119 * This means a few things, but notably other PEs will still "see" any
120 * stale cached TLB entries. This could lead to a "contiguous bit
121 * misprogramming" issue until the final tlbi+dsb of the changed page,
122 * which would clear out both the stale (RW,n) entry and the new (RO,c)
123 * contiguous entry installed in its place.
124 *
125 * What this is saying, is the following:
126 *
127 * +----+----+----+----+
128 * |RO,n|RO,n|RO,n|RW,n| <--- old page tables, all non-contiguous
129 * +----+----+----+----+
130 *
131 * +----+----+----+----+
132 * |RO,c|RO,c|RO,c|RO,c| <--- new page tables, all contiguous
133 * +----+----+----+----+
134 * /\
135 * ||
136 *
137 * If both the old single (RW,n) and new contiguous (RO,c) TLB entries
138 * are present, and a write is made to this address, do we fault or
139 * is the write permitted (via amalgamation)?
140 *
141 * The relevant Arm ARM DDI 0487L.a requirements are RNGLXZ and RJQQTC,
142 * and together state that when BBML1 or BBML2 are implemented, either
143 * a TLB conflict abort is raised (which we expressly forbid), or will
144 * "produce an OA, access permissions, and memory attributes that are
145 * consistent with any of the programmed translation table values".
146 *
147 * That is to say, will either raise a TLB conflict, or produce one of
148 * the cached TLB entries, but never amalgamate.
149 *
150 * Thus, as the page tables are only considered "consistent" after
151 * the final tlbi+dsb (which evicts both the single stale (RW,n) TLB
152 * entry as well as the new contiguous (RO,c) TLB entry), omitting the
153 * initial tlbi+dsb is correct.
154 *
155 * It is also important to note that at the end of the BBML2 folding
156 * case, we are still left with potentially all N TLB entries still
157 * cached (the N-1 non-contiguous ptes, and the single contiguous
158 * block). However, over time, natural TLB pressure will cause the
159 * non-contiguous pte TLB entries to be flushed, leaving only the
160 * contiguous block TLB entry. This means that omitting the tlbi+dsb is
161 * not only correct, but also keeps our eventual performance benefits.
162 *
163 * For the unfolding case, BBML0 looks as follows:
164 *
165 * 0) Initial page-table layout:
166 *
167 * +----+----+----+----+
168 * |RW,c|RW,c|RW,c|RW,c| <--- last page being set as RO
169 * +----+----+----+----+
170 *
171 * 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
172 *
173 * +----+----+----+----+
174 * | 0 | 0 | 0 | 0 |
175 * +----+----+----+----+
176 *
177 * 2) __flush_tlb_range():
178 *
179 * |____ tlbi + dsb ____|
180 *
181 * 3) __set_ptes() to repaint as non-contiguous:
182 *
183 * +----+----+----+----+
184 * |RW,n|RW,n|RW,n|RW,n|
185 * +----+----+----+----+
186 *
187 * 4) Update changed page permissions:
188 *
189 * +----+----+----+----+
190 * |RW,n|RW,n|RW,n|RO,n| <--- last page permissions set
191 * +----+----+----+----+
192 *
193 * 5) The kernel will eventually __flush_tlb() for changed page:
194 *
195 * |____| <--- tlbi + dsb
196 *
197 * For BBML2, we again remove the intermediate tlbi+dsb. Here, there
198 * are no issues, as the final tlbi+dsb covering the changed page is
199 * guaranteed to remove the original large contiguous (RW,c) TLB entry,
200 * as well as the intermediate (RW,n) TLB entry; the next access will
201 * install the new (RO,n) TLB entry and the page tables are only
202 * considered "consistent" after the final tlbi+dsb, so software must
203 * be prepared for this inconsistency prior to finishing the mm dance
204 * regardless.
205 */
206
207 if (!system_supports_bbml2_noabort())
208 __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
209
210 __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
211 }
212
__contpte_try_fold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)213 void __contpte_try_fold(struct mm_struct *mm, unsigned long addr,
214 pte_t *ptep, pte_t pte)
215 {
216 /*
217 * We have already checked that the virtual and pysical addresses are
218 * correctly aligned for a contpte mapping in contpte_try_fold() so the
219 * remaining checks are to ensure that the contpte range is fully
220 * covered by a single folio, and ensure that all the ptes are valid
221 * with contiguous PFNs and matching prots. We ignore the state of the
222 * access and dirty bits for the purpose of deciding if its a contiguous
223 * range; the folding process will generate a single contpte entry which
224 * has a single access and dirty bit. Those 2 bits are the logical OR of
225 * their respective bits in the constituent pte entries. In order to
226 * ensure the contpte range is covered by a single folio, we must
227 * recover the folio from the pfn, but special mappings don't have a
228 * folio backing them. Fortunately contpte_try_fold() already checked
229 * that the pte is not special - we never try to fold special mappings.
230 * Note we can't use vm_normal_page() for this since we don't have the
231 * vma.
232 */
233
234 unsigned long folio_start, folio_end;
235 unsigned long cont_start, cont_end;
236 pte_t expected_pte, subpte;
237 struct folio *folio;
238 struct page *page;
239 unsigned long pfn;
240 pte_t *orig_ptep;
241 pgprot_t prot;
242
243 int i;
244
245 if (!mm_is_user(mm))
246 return;
247
248 page = pte_page(pte);
249 folio = page_folio(page);
250 folio_start = addr - (page - &folio->page) * PAGE_SIZE;
251 folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE;
252 cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE);
253 cont_end = cont_start + CONT_PTE_SIZE;
254
255 if (folio_start > cont_start || folio_end < cont_end)
256 return;
257
258 pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES);
259 prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
260 expected_pte = pfn_pte(pfn, prot);
261 orig_ptep = ptep;
262 ptep = contpte_align_down(ptep);
263
264 for (i = 0; i < CONT_PTES; i++) {
265 subpte = pte_mkold(pte_mkclean(__ptep_get(ptep)));
266 if (!pte_same(subpte, expected_pte))
267 return;
268 expected_pte = pte_advance_pfn(expected_pte, 1);
269 ptep++;
270 }
271
272 pte = pte_mkcont(pte);
273 contpte_convert(mm, addr, orig_ptep, pte);
274 }
275 EXPORT_SYMBOL_GPL(__contpte_try_fold);
276
__contpte_try_unfold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)277 void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
278 pte_t *ptep, pte_t pte)
279 {
280 /*
281 * We have already checked that the ptes are contiguous in
282 * contpte_try_unfold(), so just check that the mm is user space.
283 */
284 if (!mm_is_user(mm))
285 return;
286
287 pte = pte_mknoncont(pte);
288 contpte_convert(mm, addr, ptep, pte);
289 }
290 EXPORT_SYMBOL_GPL(__contpte_try_unfold);
291
contpte_ptep_get(pte_t * ptep,pte_t orig_pte)292 pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte)
293 {
294 /*
295 * Gather access/dirty bits, which may be populated in any of the ptes
296 * of the contig range. We are guaranteed to be holding the PTL, so any
297 * contiguous range cannot be unfolded or otherwise modified under our
298 * feet.
299 */
300
301 pte_t pte;
302 int i;
303
304 ptep = contpte_align_down(ptep);
305
306 for (i = 0; i < CONT_PTES; i++, ptep++) {
307 pte = __ptep_get(ptep);
308
309 if (pte_dirty(pte)) {
310 orig_pte = pte_mkdirty(orig_pte);
311 for (; i < CONT_PTES; i++, ptep++) {
312 pte = __ptep_get(ptep);
313 if (pte_young(pte)) {
314 orig_pte = pte_mkyoung(orig_pte);
315 break;
316 }
317 }
318 break;
319 }
320
321 if (pte_young(pte)) {
322 orig_pte = pte_mkyoung(orig_pte);
323 i++;
324 ptep++;
325 for (; i < CONT_PTES; i++, ptep++) {
326 pte = __ptep_get(ptep);
327 if (pte_dirty(pte)) {
328 orig_pte = pte_mkdirty(orig_pte);
329 break;
330 }
331 }
332 break;
333 }
334 }
335
336 return orig_pte;
337 }
338 EXPORT_SYMBOL_GPL(contpte_ptep_get);
339
contpte_is_consistent(pte_t pte,unsigned long pfn,pgprot_t orig_prot)340 static inline bool contpte_is_consistent(pte_t pte, unsigned long pfn,
341 pgprot_t orig_prot)
342 {
343 pgprot_t prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
344
345 return pte_valid_cont(pte) && pte_pfn(pte) == pfn &&
346 pgprot_val(prot) == pgprot_val(orig_prot);
347 }
348
contpte_ptep_get_lockless(pte_t * orig_ptep)349 pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
350 {
351 /*
352 * The ptep_get_lockless() API requires us to read and return *orig_ptep
353 * so that it is self-consistent, without the PTL held, so we may be
354 * racing with other threads modifying the pte. Usually a READ_ONCE()
355 * would suffice, but for the contpte case, we also need to gather the
356 * access and dirty bits from across all ptes in the contiguous block,
357 * and we can't read all of those neighbouring ptes atomically, so any
358 * contiguous range may be unfolded/modified/refolded under our feet.
359 * Therefore we ensure we read a _consistent_ contpte range by checking
360 * that all ptes in the range are valid and have CONT_PTE set, that all
361 * pfns are contiguous and that all pgprots are the same (ignoring
362 * access/dirty). If we find a pte that is not consistent, then we must
363 * be racing with an update so start again. If the target pte does not
364 * have CONT_PTE set then that is considered consistent on its own
365 * because it is not part of a contpte range.
366 */
367
368 pgprot_t orig_prot;
369 unsigned long pfn;
370 pte_t orig_pte;
371 pte_t *ptep;
372 pte_t pte;
373 int i;
374
375 retry:
376 orig_pte = __ptep_get(orig_ptep);
377
378 if (!pte_valid_cont(orig_pte))
379 return orig_pte;
380
381 orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte)));
382 ptep = contpte_align_down(orig_ptep);
383 pfn = pte_pfn(orig_pte) - (orig_ptep - ptep);
384
385 for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) {
386 pte = __ptep_get(ptep);
387
388 if (!contpte_is_consistent(pte, pfn, orig_prot))
389 goto retry;
390
391 if (pte_dirty(pte)) {
392 orig_pte = pte_mkdirty(orig_pte);
393 for (; i < CONT_PTES; i++, ptep++, pfn++) {
394 pte = __ptep_get(ptep);
395
396 if (!contpte_is_consistent(pte, pfn, orig_prot))
397 goto retry;
398
399 if (pte_young(pte)) {
400 orig_pte = pte_mkyoung(orig_pte);
401 break;
402 }
403 }
404 break;
405 }
406
407 if (pte_young(pte)) {
408 orig_pte = pte_mkyoung(orig_pte);
409 i++;
410 ptep++;
411 pfn++;
412 for (; i < CONT_PTES; i++, ptep++, pfn++) {
413 pte = __ptep_get(ptep);
414
415 if (!contpte_is_consistent(pte, pfn, orig_prot))
416 goto retry;
417
418 if (pte_dirty(pte)) {
419 orig_pte = pte_mkdirty(orig_pte);
420 break;
421 }
422 }
423 break;
424 }
425 }
426
427 return orig_pte;
428 }
429 EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless);
430
contpte_set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)431 void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
432 pte_t *ptep, pte_t pte, unsigned int nr)
433 {
434 unsigned long next;
435 unsigned long end;
436 unsigned long pfn;
437 pgprot_t prot;
438
439 /*
440 * The set_ptes() spec guarantees that when nr > 1, the initial state of
441 * all ptes is not-present. Therefore we never need to unfold or
442 * otherwise invalidate a range before we set the new ptes.
443 * contpte_set_ptes() should never be called for nr < 2.
444 */
445 VM_WARN_ON(nr == 1);
446
447 if (!mm_is_user(mm))
448 return __set_ptes(mm, addr, ptep, pte, nr);
449
450 end = addr + (nr << PAGE_SHIFT);
451 pfn = pte_pfn(pte);
452 prot = pte_pgprot(pte);
453
454 do {
455 next = pte_cont_addr_end(addr, end);
456 nr = (next - addr) >> PAGE_SHIFT;
457 pte = pfn_pte(pfn, prot);
458
459 if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0)
460 pte = pte_mkcont(pte);
461 else
462 pte = pte_mknoncont(pte);
463
464 __set_ptes(mm, addr, ptep, pte, nr);
465
466 addr = next;
467 ptep += nr;
468 pfn += nr;
469
470 } while (addr != end);
471 }
472 EXPORT_SYMBOL_GPL(contpte_set_ptes);
473
contpte_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)474 void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
475 pte_t *ptep, unsigned int nr, int full)
476 {
477 contpte_try_unfold_partial(mm, addr, ptep, nr);
478 __clear_full_ptes(mm, addr, ptep, nr, full);
479 }
480 EXPORT_SYMBOL_GPL(contpte_clear_full_ptes);
481
contpte_get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)482 pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
483 unsigned long addr, pte_t *ptep,
484 unsigned int nr, int full)
485 {
486 contpte_try_unfold_partial(mm, addr, ptep, nr);
487 return __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
488 }
489 EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes);
490
contpte_ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)491 int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
492 unsigned long addr, pte_t *ptep)
493 {
494 /*
495 * ptep_clear_flush_young() technically requires us to clear the access
496 * flag for a _single_ pte. However, the core-mm code actually tracks
497 * access/dirty per folio, not per page. And since we only create a
498 * contig range when the range is covered by a single folio, we can get
499 * away with clearing young for the whole contig range here, so we avoid
500 * having to unfold.
501 */
502
503 int young = 0;
504 int i;
505
506 ptep = contpte_align_down(ptep);
507 addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
508
509 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
510 young |= __ptep_test_and_clear_young(vma, addr, ptep);
511
512 return young;
513 }
514 EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young);
515
contpte_ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)516 int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
517 unsigned long addr, pte_t *ptep)
518 {
519 int young;
520
521 young = contpte_ptep_test_and_clear_young(vma, addr, ptep);
522
523 if (young) {
524 /*
525 * See comment in __ptep_clear_flush_young(); same rationale for
526 * eliding the trailing DSB applies here.
527 */
528 addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
529 __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
530 PAGE_SIZE, true, 3);
531 }
532
533 return young;
534 }
535 EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young);
536
contpte_wrprotect_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)537 void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
538 pte_t *ptep, unsigned int nr)
539 {
540 /*
541 * If wrprotecting an entire contig range, we can avoid unfolding. Just
542 * set wrprotect and wait for the later mmu_gather flush to invalidate
543 * the tlb. Until the flush, the page may or may not be wrprotected.
544 * After the flush, it is guaranteed wrprotected. If it's a partial
545 * range though, we must unfold, because we can't have a case where
546 * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this
547 * would cause it to continue to be unpredictable after the flush.
548 */
549
550 contpte_try_unfold_partial(mm, addr, ptep, nr);
551 __wrprotect_ptes(mm, addr, ptep, nr);
552 }
553 EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes);
554
contpte_clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)555 void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
556 unsigned long addr, pte_t *ptep,
557 unsigned int nr, cydp_t flags)
558 {
559 /*
560 * We can safely clear access/dirty without needing to unfold from
561 * the architectures perspective, even when contpte is set. If the
562 * range starts or ends midway through a contpte block, we can just
563 * expand to include the full contpte block. While this is not
564 * exactly what the core-mm asked for, it tracks access/dirty per
565 * folio, not per page. And since we only create a contpte block
566 * when it is covered by a single folio, we can get away with
567 * clearing access/dirty for the whole block.
568 */
569 unsigned long start = addr;
570 unsigned long end = start + nr * PAGE_SIZE;
571
572 if (pte_cont(__ptep_get(ptep + nr - 1)))
573 end = ALIGN(end, CONT_PTE_SIZE);
574
575 if (pte_cont(__ptep_get(ptep))) {
576 start = ALIGN_DOWN(start, CONT_PTE_SIZE);
577 ptep = contpte_align_down(ptep);
578 }
579
580 __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
581 }
582 EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
583
contpte_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)584 int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
585 unsigned long addr, pte_t *ptep,
586 pte_t entry, int dirty)
587 {
588 unsigned long start_addr;
589 pte_t orig_pte;
590 int i;
591
592 /*
593 * Gather the access/dirty bits for the contiguous range. If nothing has
594 * changed, its a noop.
595 */
596 orig_pte = pte_mknoncont(ptep_get(ptep));
597 if (pte_val(orig_pte) == pte_val(entry))
598 return 0;
599
600 /*
601 * We can fix up access/dirty bits without having to unfold the contig
602 * range. But if the write bit is changing, we must unfold.
603 */
604 if (pte_write(orig_pte) == pte_write(entry)) {
605 /*
606 * For HW access management, we technically only need to update
607 * the flag on a single pte in the range. But for SW access
608 * management, we need to update all the ptes to prevent extra
609 * faults. Avoid per-page tlb flush in __ptep_set_access_flags()
610 * and instead flush the whole range at the end.
611 */
612 ptep = contpte_align_down(ptep);
613 start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
614
615 /*
616 * We are not advancing entry because __ptep_set_access_flags()
617 * only consumes access flags from entry. And since we have checked
618 * for the whole contpte block and returned early, pte_same()
619 * within __ptep_set_access_flags() is likely false.
620 */
621 for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
622 __ptep_set_access_flags(vma, addr, ptep, entry, 0);
623
624 if (dirty)
625 __flush_tlb_range(vma, start_addr, addr,
626 PAGE_SIZE, true, 3);
627 } else {
628 __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte);
629 __ptep_set_access_flags(vma, addr, ptep, entry, dirty);
630 }
631
632 return 1;
633 }
634 EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags);
635