1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _S390_TLB_H
31da177e4SLinus Torvalds #define _S390_TLB_H
41da177e4SLinus Torvalds
51da177e4SLinus Torvalds /*
6ba8a9229SMartin Schwidefsky * TLB flushing on s390 is complicated. The following requirement
7ba8a9229SMartin Schwidefsky * from the principles of operation is the most arduous:
8ba8a9229SMartin Schwidefsky *
9ba8a9229SMartin Schwidefsky * "A valid table entry must not be changed while it is attached
10ba8a9229SMartin Schwidefsky * to any CPU and may be used for translation by that CPU except to
11ba8a9229SMartin Schwidefsky * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
12ba8a9229SMartin Schwidefsky * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
13ba8a9229SMartin Schwidefsky * table entry, or (3) make a change by means of a COMPARE AND SWAP
14ba8a9229SMartin Schwidefsky * AND PURGE instruction that purges the TLB."
15ba8a9229SMartin Schwidefsky *
16ba8a9229SMartin Schwidefsky * The modification of a pte of an active mm struct therefore is
17ba8a9229SMartin Schwidefsky * a two step process: i) invalidate the pte, ii) store the new pte.
18ba8a9229SMartin Schwidefsky * This is true for the page protection bit as well.
19ba8a9229SMartin Schwidefsky * The only possible optimization is to flush at the beginning of
20ba8a9229SMartin Schwidefsky * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
21ba8a9229SMartin Schwidefsky *
22ba8a9229SMartin Schwidefsky * Pages used for the page tables is a different story. FIXME: more
231da177e4SLinus Torvalds */
24ba8a9229SMartin Schwidefsky
259de7d833SMartin Schwidefsky static inline void tlb_flush(struct mmu_gather *tlb);
269de7d833SMartin Schwidefsky static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
279de7d833SMartin Schwidefsky struct page *page, bool delay_rmap, int page_size);
28c30d6bc8SDavid Hildenbrand static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
29*d7f861b9SDavid Hildenbrand struct page *page, unsigned int nr_pages, bool delay_rmap);
30*d7f861b9SDavid Hildenbrand
319de7d833SMartin Schwidefsky #define tlb_flush tlb_flush
329de7d833SMartin Schwidefsky #define pte_free_tlb pte_free_tlb
339de7d833SMartin Schwidefsky #define pmd_free_tlb pmd_free_tlb
349de7d833SMartin Schwidefsky #define p4d_free_tlb p4d_free_tlb
359de7d833SMartin Schwidefsky #define pud_free_tlb pud_free_tlb
369de7d833SMartin Schwidefsky
379de7d833SMartin Schwidefsky #include <asm/tlbflush.h>
38ba8a9229SMartin Schwidefsky #include <asm-generic/tlb.h>
399de7d833SMartin Schwidefsky #include <asm/gmap.h>
401da177e4SLinus Torvalds
411da177e4SLinus Torvalds /*
42ba8a9229SMartin Schwidefsky * Release the page cache reference for a pte removed by
4368f03921SPeter Zijlstra * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
44ba8a9229SMartin Schwidefsky * has already been freed, so just do free_folio_and_swap_cache.
455df397deSLinus Torvalds *
46c30d6bc8SDavid Hildenbrand * s390 doesn't delay rmap removal.
471da177e4SLinus Torvalds */
__tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,bool delay_rmap,int page_size)48e77b0852SAneesh Kumar K.V static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
49c30d6bc8SDavid Hildenbrand struct page *page, bool delay_rmap, int page_size)
50e77b0852SAneesh Kumar K.V {
51c30d6bc8SDavid Hildenbrand VM_WARN_ON_ONCE(delay_rmap);
52c30d6bc8SDavid Hildenbrand
53c30d6bc8SDavid Hildenbrand free_folio_and_swap_cache(page_folio(page));
549de7d833SMartin Schwidefsky return false;
55e77b0852SAneesh Kumar K.V }
56e77b0852SAneesh Kumar K.V
__tlb_remove_folio_pages(struct mmu_gather * tlb,struct page * page,unsigned int nr_pages,bool delay_rmap)57*d7f861b9SDavid Hildenbrand static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
58*d7f861b9SDavid Hildenbrand struct page *page, unsigned int nr_pages, bool delay_rmap)
59*d7f861b9SDavid Hildenbrand {
60*d7f861b9SDavid Hildenbrand struct encoded_page *encoded_pages[] = {
61*d7f861b9SDavid Hildenbrand encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT),
62*d7f861b9SDavid Hildenbrand encode_nr_pages(nr_pages),
63*d7f861b9SDavid Hildenbrand };
64*d7f861b9SDavid Hildenbrand
65*d7f861b9SDavid Hildenbrand VM_WARN_ON_ONCE(delay_rmap);
66*d7f861b9SDavid Hildenbrand VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));
67*d7f861b9SDavid Hildenbrand
68*d7f861b9SDavid Hildenbrand free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages));
69*d7f861b9SDavid Hildenbrand return false;
70*d7f861b9SDavid Hildenbrand }
71*d7f861b9SDavid Hildenbrand
tlb_flush(struct mmu_gather * tlb)729de7d833SMartin Schwidefsky static inline void tlb_flush(struct mmu_gather *tlb)
73e77b0852SAneesh Kumar K.V {
749de7d833SMartin Schwidefsky __tlb_flush_mm_lazy(tlb->mm);
75e77b0852SAneesh Kumar K.V }
76e77b0852SAneesh Kumar K.V
77ba8a9229SMartin Schwidefsky /*
78ba8a9229SMartin Schwidefsky * pte_free_tlb frees a pte table and clears the CRSTE for the
79ba8a9229SMartin Schwidefsky * page table from the tlb.
80ba8a9229SMartin Schwidefsky */
pte_free_tlb(struct mmu_gather * tlb,pgtable_t pte,unsigned long address)819e1b32caSBenjamin Herrenschmidt static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
829e1b32caSBenjamin Herrenschmidt unsigned long address)
83ba8a9229SMartin Schwidefsky {
849de7d833SMartin Schwidefsky __tlb_adjust_range(tlb, address, PAGE_SIZE);
859de7d833SMartin Schwidefsky tlb->mm->context.flush_mm = 1;
869de7d833SMartin Schwidefsky tlb->freed_tables = 1;
87f95b2c45SAlexander Gordeev tlb->cleared_pmds = 1;
8802e790eeSAlexander Gordeev if (mm_has_pgste(tlb->mm))
8902e790eeSAlexander Gordeev gmap_unlink(tlb->mm, (unsigned long *)pte, address);
9002e790eeSAlexander Gordeev tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte));
91ba8a9229SMartin Schwidefsky }
921da177e4SLinus Torvalds
93ba8a9229SMartin Schwidefsky /*
94ba8a9229SMartin Schwidefsky * pmd_free_tlb frees a pmd table and clears the CRSTE for the
95ba8a9229SMartin Schwidefsky * segment table entry from the tlb.
966252d702SMartin Schwidefsky * If the mm uses a two level page table the single pmd is freed
976252d702SMartin Schwidefsky * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
986252d702SMartin Schwidefsky * to avoid the double free of the pmd in this case.
99ba8a9229SMartin Schwidefsky */
pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)1009e1b32caSBenjamin Herrenschmidt static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
1019e1b32caSBenjamin Herrenschmidt unsigned long address)
102ba8a9229SMartin Schwidefsky {
103e12e4044SMartin Schwidefsky if (mm_pmd_folded(tlb->mm))
1046252d702SMartin Schwidefsky return;
1056326c26cSVishal Moola (Oracle) __tlb_adjust_range(tlb, address, PAGE_SIZE);
1069de7d833SMartin Schwidefsky tlb->mm->context.flush_mm = 1;
1079de7d833SMartin Schwidefsky tlb->freed_tables = 1;
1089de7d833SMartin Schwidefsky tlb->cleared_puds = 1;
1099de7d833SMartin Schwidefsky tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
1106326c26cSVishal Moola (Oracle) }
111ba8a9229SMartin Schwidefsky
112ba8a9229SMartin Schwidefsky /*
1135a216a20SMartin Schwidefsky * p4d_free_tlb frees a pud table and clears the CRSTE for the
1141aea9b3fSMartin Schwidefsky * region second table entry from the tlb.
1151aea9b3fSMartin Schwidefsky * If the mm uses a four level page table the single p4d is freed
1161aea9b3fSMartin Schwidefsky * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
1171aea9b3fSMartin Schwidefsky * to avoid the double free of the p4d in this case.
1181aea9b3fSMartin Schwidefsky */
p4d_free_tlb(struct mmu_gather * tlb,p4d_t * p4d,unsigned long address)1191aea9b3fSMartin Schwidefsky static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
1201aea9b3fSMartin Schwidefsky unsigned long address)
1211aea9b3fSMartin Schwidefsky {
1221aea9b3fSMartin Schwidefsky if (mm_p4d_folded(tlb->mm))
123e12e4044SMartin Schwidefsky return;
1241aea9b3fSMartin Schwidefsky __tlb_adjust_range(tlb, address, PAGE_SIZE);
1259de7d833SMartin Schwidefsky tlb->mm->context.flush_mm = 1;
1269de7d833SMartin Schwidefsky tlb->freed_tables = 1;
1279de7d833SMartin Schwidefsky tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
1280547e0bdSAlexander Gordeev }
1291aea9b3fSMartin Schwidefsky
1301aea9b3fSMartin Schwidefsky /*
1311aea9b3fSMartin Schwidefsky * pud_free_tlb frees a pud table and clears the CRSTE for the
1325a216a20SMartin Schwidefsky * region third table entry from the tlb.
1335a216a20SMartin Schwidefsky * If the mm uses a three level page table the single pud is freed
1346252d702SMartin Schwidefsky * as the pgd. pud_free_tlb checks the asce_limit against 4TB
1356252d702SMartin Schwidefsky * to avoid the double free of the pud in this case.
1366252d702SMartin Schwidefsky */
pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)1375a216a20SMartin Schwidefsky static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
1389e1b32caSBenjamin Herrenschmidt unsigned long address)
1399e1b32caSBenjamin Herrenschmidt {
1405a216a20SMartin Schwidefsky if (mm_pud_folded(tlb->mm))
141e12e4044SMartin Schwidefsky return;
1426252d702SMartin Schwidefsky __tlb_adjust_range(tlb, address, PAGE_SIZE);
1439de7d833SMartin Schwidefsky tlb->mm->context.flush_mm = 1;
1449de7d833SMartin Schwidefsky tlb->freed_tables = 1;
145f95b2c45SAlexander Gordeev tlb->cleared_p4ds = 1;
1460547e0bdSAlexander Gordeev tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
1475a216a20SMartin Schwidefsky }
148190a1d72SMartin Schwidefsky
14907e32661SAneesh Kumar K.V #endif /* _S390_TLB_H */
150ba8a9229SMartin Schwidefsky