1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/cpufeature.h>
18 #include <linux/page-flags.h>
19 #include <linux/radix-tree.h>
20 #include <linux/atomic.h>
21 #include <asm/ctlreg.h>
22 #include <asm/bug.h>
23 #include <asm/page.h>
24 #include <asm/uv.h>
25
26 extern pgd_t swapper_pg_dir[];
27 extern pgd_t invalid_pg_dir[];
28 extern void paging_init(void);
29 extern struct ctlreg s390_invalid_asce;
30
31 enum {
32 PG_DIRECT_MAP_4K = 0,
33 PG_DIRECT_MAP_1M,
34 PG_DIRECT_MAP_2G,
35 PG_DIRECT_MAP_MAX
36 };
37
38 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
39
update_page_count(int level,long count)40 static inline void update_page_count(int level, long count)
41 {
42 if (IS_ENABLED(CONFIG_PROC_FS))
43 atomic_long_add(count, &direct_pages_count[level]);
44 }
45
46 /*
47 * The S390 doesn't have any external MMU info: the kernel page
48 * tables contain all the necessary information.
49 */
50 #define update_mmu_cache(vma, address, ptep) do { } while (0)
51 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54 /*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
61
62 #define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
66
67 /* TODO: s390 cannot support io_remap_pfn_range... */
68
69 #define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71 #define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73 #define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75 #define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77 #define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80 /*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88 extern unsigned long VMALLOC_START;
89 extern unsigned long VMALLOC_END;
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91 extern struct page *vmemmap;
92 extern unsigned long vmemmap_size;
93
94 extern unsigned long MODULES_VADDR;
95 extern unsigned long MODULES_END;
96 #define MODULES_VADDR MODULES_VADDR
97 #define MODULES_END MODULES_END
98 #define MODULES_LEN (1UL << 31)
99
is_module_addr(void * addr)100 static inline int is_module_addr(void *addr)
101 {
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108 }
109
110 #ifdef CONFIG_KMSAN
111 #define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
112 #define KMSAN_VMALLOC_SHADOW_START VMALLOC_END
113 #define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE)
114 #define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END
115 #define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE)
116 #define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END
117 #define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
118 #define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END
119 #define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN)
120 #endif
121
122 #ifdef CONFIG_RANDOMIZE_BASE
123 #define KASLR_LEN (1UL << 31)
124 #else
125 #define KASLR_LEN 0UL
126 #endif
127
128 void setup_protection_map(void);
129
130 /*
131 * A 64 bit pagetable entry of S390 has following format:
132 * | PFRA |0IPC| OS |
133 * 0000000000111111111122222222223333333333444444444455555555556666
134 * 0123456789012345678901234567890123456789012345678901234567890123
135 *
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
138 * C Change-bit override: HW is not required to set change bit
139 *
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
144 *
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
148 * TT Type 00
149 *
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
152 * 0000000000111111111122222222223333333333444444444455555555556666
153 * 0123456789012345678901234567890123456789012345678901234567890123
154 *
155 * I Segment-Invalid Bit: Segment is not available for address-translation
156 * TT Type 01
157 * TF
158 * TL Table length
159 *
160 * The 64 bit regiontable origin of S390 has following format:
161 * | region table origon | DTTL
162 * 0000000000111111111122222222223333333333444444444455555555556666
163 * 0123456789012345678901234567890123456789012345678901234567890123
164 *
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
169 * R Real space
170 * TL Table-Length:
171 *
172 * A storage key has the following format:
173 * | ACC |F|R|C|0|
174 * 0 3 4 5 6 7
175 * ACC: access key
176 * F : fetch protection bit
177 * R : referenced bit
178 * C : changed bit
179 */
180
181 /* Hardware bits in the page table entry */
182 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
183 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
184 #define _PAGE_INVALID 0x400 /* HW invalid bit */
185 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
186
187 /* Software bits in the page table entry */
188 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
189 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
190 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
191 #define _PAGE_READ 0x010 /* SW pte read bit */
192 #define _PAGE_WRITE 0x020 /* SW pte write bit */
193 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
194 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
195
196 #ifdef CONFIG_MEM_SOFT_DIRTY
197 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
198 #else
199 #define _PAGE_SOFT_DIRTY 0x000
200 #endif
201
202 #define _PAGE_SW_BITS 0xffUL /* All SW bits */
203
204 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
205
206 /* Set of bits not changed in pte_modify */
207 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
208 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
209
210 /*
211 * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
212 * HW bit and all SW bits.
213 */
214 #define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS)
215
216 /*
217 * handle_pte_fault uses pte_present and pte_none to find out the pte type
218 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
219 * distinguish present from not-present ptes. It is changed only with the page
220 * table lock held.
221 *
222 * The following table gives the different possible bit combinations for
223 * the pte hardware and software bits in the last 12 bits of a pte
224 * (. unassigned bit, x don't care, t swap type):
225 *
226 * 842100000000
227 * 000084210000
228 * 000000008421
229 * .IR.uswrdy.p
230 * empty .10.00000000
231 * swap .11..ttttt.0
232 * prot-none, clean, old .11.xx0000.1
233 * prot-none, clean, young .11.xx0001.1
234 * prot-none, dirty, old .11.xx0010.1
235 * prot-none, dirty, young .11.xx0011.1
236 * read-only, clean, old .11.xx0100.1
237 * read-only, clean, young .01.xx0101.1
238 * read-only, dirty, old .11.xx0110.1
239 * read-only, dirty, young .01.xx0111.1
240 * read-write, clean, old .11.xx1100.1
241 * read-write, clean, young .01.xx1101.1
242 * read-write, dirty, old .10.xx1110.1
243 * read-write, dirty, young .00.xx1111.1
244 * HW-bits: R read-only, I invalid
245 * SW-bits: p present, y young, d dirty, r read, w write, s special,
246 * u unused, l large
247 *
248 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
249 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
250 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
251 */
252
253 /* Bits in the segment/region table address-space-control-element */
254 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
255 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
256 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
257 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
258 #define _ASCE_REAL_SPACE 0x20 /* real space control */
259 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
260 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
261 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
262 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
263 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
264 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
265
266 /* Bits in the region table entry */
267 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
268 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
269 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
270 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
271 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
272 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
273 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
274 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
275 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
276 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
277
278 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
279 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
280 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
281 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
282 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \
283 _REGION3_ENTRY_PRESENT)
284 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
285
286 #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
287 #define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
288 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
289 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
290 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
291 #define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */
292 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
293 #define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */
294 #define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */
295
296 #ifdef CONFIG_MEM_SOFT_DIRTY
297 #define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */
298 #else
299 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
300 #endif
301
302 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
303
304 /*
305 * SW region present bit. For non-leaf region-third-table entries, bits 62-63
306 * indicate the TABLE LENGTH and both must be set to 1. But such entries
307 * would always be considered as present, so it is safe to use bit 63 as
308 * PRESENT bit for PUD.
309 */
310 #define _REGION3_ENTRY_PRESENT 0x0001
311
312 /* Bits in the segment table entry */
313 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
314 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
315 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
316 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
317 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
318 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
319 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
320 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
321 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
322
323 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT)
324 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
325
326 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
327 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
328
329 #define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */
330 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
331 #define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */
332 #define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */
333
334 #ifdef CONFIG_MEM_SOFT_DIRTY
335 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */
336 #else
337 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
338 #endif
339
340 #define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */
341
342 /* Common bits in region and segment table entries, for swap entries */
343 #define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */
344 #define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */
345
346 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
347 #define _PAGE_ENTRIES 256 /* number of page table entries */
348
349 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
350 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
351
352 #define _REGION1_SHIFT 53
353 #define _REGION2_SHIFT 42
354 #define _REGION3_SHIFT 31
355 #define _SEGMENT_SHIFT 20
356
357 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
358 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
359 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
360 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
361 #define _PAGE_INDEX (0xffUL << PAGE_SHIFT)
362
363 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
364 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
365 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
366 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
367
368 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
369 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
370 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
371 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
372
373 #define PMD_SHIFT _SEGMENT_SHIFT
374 #define PUD_SHIFT _REGION3_SHIFT
375 #define P4D_SHIFT _REGION2_SHIFT
376 #define PGDIR_SHIFT _REGION1_SHIFT
377
378 #define PMD_SIZE _SEGMENT_SIZE
379 #define PUD_SIZE _REGION3_SIZE
380 #define P4D_SIZE _REGION2_SIZE
381 #define PGDIR_SIZE _REGION1_SIZE
382
383 #define PMD_MASK _SEGMENT_MASK
384 #define PUD_MASK _REGION3_MASK
385 #define P4D_MASK _REGION2_MASK
386 #define PGDIR_MASK _REGION1_MASK
387
388 #define PTRS_PER_PTE _PAGE_ENTRIES
389 #define PTRS_PER_PMD _CRST_ENTRIES
390 #define PTRS_PER_PUD _CRST_ENTRIES
391 #define PTRS_PER_P4D _CRST_ENTRIES
392 #define PTRS_PER_PGD _CRST_ENTRIES
393
394 /*
395 * Segment table and region3 table entry encoding
396 * (R = read-only, I = invalid, y = young bit):
397 * dy..R...I...wr
398 * prot-none, clean, old 00..1...1...00
399 * prot-none, clean, young 01..1...1...00
400 * prot-none, dirty, old 10..1...1...00
401 * prot-none, dirty, young 11..1...1...00
402 * read-only, clean, old 00..1...1...01
403 * read-only, clean, young 01..1...0...01
404 * read-only, dirty, old 10..1...1...01
405 * read-only, dirty, young 11..1...0...01
406 * read-write, clean, old 00..1...1...11
407 * read-write, clean, young 01..1...0...11
408 * read-write, dirty, old 10..0...1...11
409 * read-write, dirty, young 11..0...0...11
410 * The segment table origin is used to distinguish empty (origin==0) from
411 * read-write, old segment table entries (origin!=0)
412 * HW-bits: R read-only, I invalid
413 * SW-bits: y young, d dirty, r read, w write
414 */
415
416 /*
417 * A user page table pointer has the space-switch-event bit, the
418 * private-space-control bit and the storage-alteration-event-control
419 * bit set. A kernel page table pointer doesn't need them.
420 */
421 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
422 _ASCE_ALT_EVENT)
423
424 /*
425 * Page protection definitions.
426 */
427 #define __PAGE_NONE (_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
428 #define __PAGE_RO (_PAGE_PRESENT | _PAGE_READ | \
429 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
430 #define __PAGE_RX (_PAGE_PRESENT | _PAGE_READ | \
431 _PAGE_INVALID | _PAGE_PROTECT)
432 #define __PAGE_RW (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
433 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
434 #define __PAGE_RWX (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
435 _PAGE_INVALID | _PAGE_PROTECT)
436 #define __PAGE_SHARED (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
437 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
438 #define __PAGE_KERNEL (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
439 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
440 #define __PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
441 _PAGE_PROTECT | _PAGE_NOEXEC)
442
443 extern unsigned long page_noexec_mask;
444
445 #define __pgprot_page_mask(x) __pgprot((x) & page_noexec_mask)
446
447 #define PAGE_NONE __pgprot_page_mask(__PAGE_NONE)
448 #define PAGE_RO __pgprot_page_mask(__PAGE_RO)
449 #define PAGE_RX __pgprot_page_mask(__PAGE_RX)
450 #define PAGE_RW __pgprot_page_mask(__PAGE_RW)
451 #define PAGE_RWX __pgprot_page_mask(__PAGE_RWX)
452 #define PAGE_SHARED __pgprot_page_mask(__PAGE_SHARED)
453 #define PAGE_KERNEL __pgprot_page_mask(__PAGE_KERNEL)
454 #define PAGE_KERNEL_RO __pgprot_page_mask(__PAGE_KERNEL_RO)
455
456 /*
457 * Segment entry (large page) protection definitions.
458 */
459 #define __SEGMENT_NONE (_SEGMENT_ENTRY_PRESENT | \
460 _SEGMENT_ENTRY_INVALID | \
461 _SEGMENT_ENTRY_PROTECT)
462 #define __SEGMENT_RO (_SEGMENT_ENTRY_PRESENT | \
463 _SEGMENT_ENTRY_PROTECT | \
464 _SEGMENT_ENTRY_READ | \
465 _SEGMENT_ENTRY_NOEXEC)
466 #define __SEGMENT_RX (_SEGMENT_ENTRY_PRESENT | \
467 _SEGMENT_ENTRY_PROTECT | \
468 _SEGMENT_ENTRY_READ)
469 #define __SEGMENT_RW (_SEGMENT_ENTRY_PRESENT | \
470 _SEGMENT_ENTRY_READ | \
471 _SEGMENT_ENTRY_WRITE | \
472 _SEGMENT_ENTRY_NOEXEC)
473 #define __SEGMENT_RWX (_SEGMENT_ENTRY_PRESENT | \
474 _SEGMENT_ENTRY_READ | \
475 _SEGMENT_ENTRY_WRITE)
476 #define __SEGMENT_KERNEL (_SEGMENT_ENTRY | \
477 _SEGMENT_ENTRY_LARGE | \
478 _SEGMENT_ENTRY_READ | \
479 _SEGMENT_ENTRY_WRITE | \
480 _SEGMENT_ENTRY_YOUNG | \
481 _SEGMENT_ENTRY_DIRTY | \
482 _SEGMENT_ENTRY_NOEXEC)
483 #define __SEGMENT_KERNEL_RO (_SEGMENT_ENTRY | \
484 _SEGMENT_ENTRY_LARGE | \
485 _SEGMENT_ENTRY_READ | \
486 _SEGMENT_ENTRY_YOUNG | \
487 _SEGMENT_ENTRY_PROTECT | \
488 _SEGMENT_ENTRY_NOEXEC)
489
490 extern unsigned long segment_noexec_mask;
491
492 #define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask)
493
494 #define SEGMENT_NONE __pgprot_segment_mask(__SEGMENT_NONE)
495 #define SEGMENT_RO __pgprot_segment_mask(__SEGMENT_RO)
496 #define SEGMENT_RX __pgprot_segment_mask(__SEGMENT_RX)
497 #define SEGMENT_RW __pgprot_segment_mask(__SEGMENT_RW)
498 #define SEGMENT_RWX __pgprot_segment_mask(__SEGMENT_RWX)
499 #define SEGMENT_KERNEL __pgprot_segment_mask(__SEGMENT_KERNEL)
500 #define SEGMENT_KERNEL_RO __pgprot_segment_mask(__SEGMENT_KERNEL_RO)
501
502 /*
503 * Region3 entry (large page) protection definitions.
504 */
505
506 #define __REGION3_KERNEL (_REGION_ENTRY_TYPE_R3 | \
507 _REGION3_ENTRY_PRESENT | \
508 _REGION3_ENTRY_LARGE | \
509 _REGION3_ENTRY_READ | \
510 _REGION3_ENTRY_WRITE | \
511 _REGION3_ENTRY_YOUNG | \
512 _REGION3_ENTRY_DIRTY | \
513 _REGION_ENTRY_NOEXEC)
514 #define __REGION3_KERNEL_RO (_REGION_ENTRY_TYPE_R3 | \
515 _REGION3_ENTRY_PRESENT | \
516 _REGION3_ENTRY_LARGE | \
517 _REGION3_ENTRY_READ | \
518 _REGION3_ENTRY_YOUNG | \
519 _REGION_ENTRY_PROTECT | \
520 _REGION_ENTRY_NOEXEC)
521
522 extern unsigned long region_noexec_mask;
523
524 #define __pgprot_region_mask(x) __pgprot((x) & region_noexec_mask)
525
526 #define REGION3_KERNEL __pgprot_region_mask(__REGION3_KERNEL)
527 #define REGION3_KERNEL_RO __pgprot_region_mask(__REGION3_KERNEL_RO)
528
mm_p4d_folded(struct mm_struct * mm)529 static inline bool mm_p4d_folded(struct mm_struct *mm)
530 {
531 return mm->context.asce_limit <= _REGION1_SIZE;
532 }
533 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
534
mm_pud_folded(struct mm_struct * mm)535 static inline bool mm_pud_folded(struct mm_struct *mm)
536 {
537 return mm->context.asce_limit <= _REGION2_SIZE;
538 }
539 #define mm_pud_folded(mm) mm_pud_folded(mm)
540
mm_pmd_folded(struct mm_struct * mm)541 static inline bool mm_pmd_folded(struct mm_struct *mm)
542 {
543 return mm->context.asce_limit <= _REGION3_SIZE;
544 }
545 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
546
mm_is_protected(struct mm_struct * mm)547 static inline int mm_is_protected(struct mm_struct *mm)
548 {
549 #if IS_ENABLED(CONFIG_KVM)
550 if (unlikely(atomic_read(&mm->context.protected_count)))
551 return 1;
552 #endif
553 return 0;
554 }
555
clear_pte_bit(pte_t pte,pgprot_t prot)556 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
557 {
558 return __pte(pte_val(pte) & ~pgprot_val(prot));
559 }
560
set_pte_bit(pte_t pte,pgprot_t prot)561 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
562 {
563 return __pte(pte_val(pte) | pgprot_val(prot));
564 }
565
clear_pmd_bit(pmd_t pmd,pgprot_t prot)566 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
567 {
568 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
569 }
570
set_pmd_bit(pmd_t pmd,pgprot_t prot)571 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
572 {
573 return __pmd(pmd_val(pmd) | pgprot_val(prot));
574 }
575
clear_pud_bit(pud_t pud,pgprot_t prot)576 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
577 {
578 return __pud(pud_val(pud) & ~pgprot_val(prot));
579 }
580
set_pud_bit(pud_t pud,pgprot_t prot)581 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
582 {
583 return __pud(pud_val(pud) | pgprot_val(prot));
584 }
585
586 /*
587 * As soon as the guest uses storage keys or enables PV, we deduplicate all
588 * mapped shared zeropages and prevent new shared zeropages from getting
589 * mapped.
590 */
591 #define mm_forbids_zeropage mm_forbids_zeropage
mm_forbids_zeropage(struct mm_struct * mm)592 static inline int mm_forbids_zeropage(struct mm_struct *mm)
593 {
594 #if IS_ENABLED(CONFIG_KVM)
595 if (!mm->context.allow_cow_sharing)
596 return 1;
597 #endif
598 return 0;
599 }
600
601 /**
602 * cspg() - Compare and Swap and Purge (CSPG)
603 * @ptr: Pointer to the value to be exchanged
604 * @old: The expected old value
605 * @new: The new value
606 *
607 * Return: True if compare and swap was successful, otherwise false.
608 */
cspg(unsigned long * ptr,unsigned long old,unsigned long new)609 static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new)
610 {
611 union register_pair r1 = { .even = old, .odd = new, };
612 unsigned long address = (unsigned long)ptr | 1;
613
614 asm volatile(
615 " cspg %[r1],%[address]"
616 : [r1] "+&d" (r1.pair), "+m" (*ptr)
617 : [address] "d" (address)
618 : "cc");
619 return old == r1.even;
620 }
621
622 #define CRDTE_DTT_PAGE 0x00UL
623 #define CRDTE_DTT_SEGMENT 0x10UL
624 #define CRDTE_DTT_REGION3 0x14UL
625 #define CRDTE_DTT_REGION2 0x18UL
626 #define CRDTE_DTT_REGION1 0x1cUL
627
628 /**
629 * crdte() - Compare and Replace DAT Table Entry
630 * @old: The expected old value
631 * @new: The new value
632 * @table: Pointer to the value to be exchanged
633 * @dtt: Table type of the table to be exchanged
634 * @address: The address mapped by the entry to be replaced
635 * @asce: The ASCE of this entry
636 *
637 * Return: True if compare and replace was successful, otherwise false.
638 */
crdte(unsigned long old,unsigned long new,unsigned long * table,unsigned long dtt,unsigned long address,unsigned long asce)639 static inline bool crdte(unsigned long old, unsigned long new,
640 unsigned long *table, unsigned long dtt,
641 unsigned long address, unsigned long asce)
642 {
643 union register_pair r1 = { .even = old, .odd = new, };
644 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
645
646 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
647 : [r1] "+&d" (r1.pair)
648 : [r2] "d" (r2.pair), [asce] "a" (asce)
649 : "memory", "cc");
650 return old == r1.even;
651 }
652
653 /*
654 * pgd/p4d/pud/pmd/pte query functions
655 */
pgd_folded(pgd_t pgd)656 static inline int pgd_folded(pgd_t pgd)
657 {
658 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
659 }
660
pgd_present(pgd_t pgd)661 static inline int pgd_present(pgd_t pgd)
662 {
663 if (pgd_folded(pgd))
664 return 1;
665 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
666 }
667
pgd_none(pgd_t pgd)668 static inline int pgd_none(pgd_t pgd)
669 {
670 if (pgd_folded(pgd))
671 return 0;
672 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
673 }
674
pgd_bad(pgd_t pgd)675 static inline int pgd_bad(pgd_t pgd)
676 {
677 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
678 return 0;
679 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
680 }
681
pgd_pfn(pgd_t pgd)682 static inline unsigned long pgd_pfn(pgd_t pgd)
683 {
684 unsigned long origin_mask;
685
686 origin_mask = _REGION_ENTRY_ORIGIN;
687 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
688 }
689
p4d_folded(p4d_t p4d)690 static inline int p4d_folded(p4d_t p4d)
691 {
692 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
693 }
694
p4d_present(p4d_t p4d)695 static inline int p4d_present(p4d_t p4d)
696 {
697 if (p4d_folded(p4d))
698 return 1;
699 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
700 }
701
p4d_none(p4d_t p4d)702 static inline int p4d_none(p4d_t p4d)
703 {
704 if (p4d_folded(p4d))
705 return 0;
706 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
707 }
708
p4d_pfn(p4d_t p4d)709 static inline unsigned long p4d_pfn(p4d_t p4d)
710 {
711 unsigned long origin_mask;
712
713 origin_mask = _REGION_ENTRY_ORIGIN;
714 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
715 }
716
pud_folded(pud_t pud)717 static inline int pud_folded(pud_t pud)
718 {
719 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
720 }
721
pud_present(pud_t pud)722 static inline int pud_present(pud_t pud)
723 {
724 if (pud_folded(pud))
725 return 1;
726 return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0;
727 }
728
pud_none(pud_t pud)729 static inline int pud_none(pud_t pud)
730 {
731 if (pud_folded(pud))
732 return 0;
733 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
734 }
735
736 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)737 static inline bool pud_leaf(pud_t pud)
738 {
739 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
740 return 0;
741 return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0);
742 }
743
pmd_present(pmd_t pmd)744 static inline int pmd_present(pmd_t pmd)
745 {
746 return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0;
747 }
748
749 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)750 static inline bool pmd_leaf(pmd_t pmd)
751 {
752 return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0);
753 }
754
pmd_bad(pmd_t pmd)755 static inline int pmd_bad(pmd_t pmd)
756 {
757 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
758 return 1;
759 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
760 }
761
pud_bad(pud_t pud)762 static inline int pud_bad(pud_t pud)
763 {
764 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
765
766 if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
767 return 1;
768 if (type < _REGION_ENTRY_TYPE_R3)
769 return 0;
770 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
771 }
772
p4d_bad(p4d_t p4d)773 static inline int p4d_bad(p4d_t p4d)
774 {
775 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
776
777 if (type > _REGION_ENTRY_TYPE_R2)
778 return 1;
779 if (type < _REGION_ENTRY_TYPE_R2)
780 return 0;
781 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
782 }
783
pmd_none(pmd_t pmd)784 static inline int pmd_none(pmd_t pmd)
785 {
786 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
787 }
788
789 #define pmd_write pmd_write
pmd_write(pmd_t pmd)790 static inline int pmd_write(pmd_t pmd)
791 {
792 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
793 }
794
795 #define pud_write pud_write
pud_write(pud_t pud)796 static inline int pud_write(pud_t pud)
797 {
798 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
799 }
800
801 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)802 static inline int pmd_dirty(pmd_t pmd)
803 {
804 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
805 }
806
807 #define pmd_young pmd_young
pmd_young(pmd_t pmd)808 static inline int pmd_young(pmd_t pmd)
809 {
810 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
811 }
812
pte_present(pte_t pte)813 static inline int pte_present(pte_t pte)
814 {
815 /* Bit pattern: (pte & 0x001) == 0x001 */
816 return (pte_val(pte) & _PAGE_PRESENT) != 0;
817 }
818
pte_none(pte_t pte)819 static inline int pte_none(pte_t pte)
820 {
821 /* Bit pattern: pte == 0x400 */
822 return pte_val(pte) == _PAGE_INVALID;
823 }
824
pte_swap(pte_t pte)825 static inline int pte_swap(pte_t pte)
826 {
827 /* Bit pattern: (pte & 0x201) == 0x200 */
828 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
829 == _PAGE_PROTECT;
830 }
831
pte_special(pte_t pte)832 static inline int pte_special(pte_t pte)
833 {
834 return (pte_val(pte) & _PAGE_SPECIAL);
835 }
836
837 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)838 static inline int pte_same(pte_t a, pte_t b)
839 {
840 return pte_val(a) == pte_val(b);
841 }
842
843 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)844 static inline int pte_protnone(pte_t pte)
845 {
846 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
847 }
848
pmd_protnone(pmd_t pmd)849 static inline int pmd_protnone(pmd_t pmd)
850 {
851 /* pmd_leaf(pmd) implies pmd_present(pmd) */
852 return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
853 }
854 #endif
855
pte_swp_exclusive(pte_t pte)856 static inline bool pte_swp_exclusive(pte_t pte)
857 {
858 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
859 }
860
pte_swp_mkexclusive(pte_t pte)861 static inline pte_t pte_swp_mkexclusive(pte_t pte)
862 {
863 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
864 }
865
pte_swp_clear_exclusive(pte_t pte)866 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
867 {
868 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
869 }
870
pte_soft_dirty(pte_t pte)871 static inline int pte_soft_dirty(pte_t pte)
872 {
873 return pte_val(pte) & _PAGE_SOFT_DIRTY;
874 }
875 #define pte_swp_soft_dirty pte_soft_dirty
876
pte_mksoft_dirty(pte_t pte)877 static inline pte_t pte_mksoft_dirty(pte_t pte)
878 {
879 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
880 }
881 #define pte_swp_mksoft_dirty pte_mksoft_dirty
882
pte_clear_soft_dirty(pte_t pte)883 static inline pte_t pte_clear_soft_dirty(pte_t pte)
884 {
885 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
886 }
887 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
888
pmd_soft_dirty(pmd_t pmd)889 static inline int pmd_soft_dirty(pmd_t pmd)
890 {
891 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
892 }
893
pmd_mksoft_dirty(pmd_t pmd)894 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
895 {
896 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
897 }
898
pmd_clear_soft_dirty(pmd_t pmd)899 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
900 {
901 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
902 }
903
904 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
905 #define pmd_swp_soft_dirty(pmd) pmd_soft_dirty(pmd)
906 #define pmd_swp_mksoft_dirty(pmd) pmd_mksoft_dirty(pmd)
907 #define pmd_swp_clear_soft_dirty(pmd) pmd_clear_soft_dirty(pmd)
908 #endif
909
910 /*
911 * query functions pte_write/pte_dirty/pte_young only work if
912 * pte_present() is true. Undefined behaviour if not..
913 */
pte_write(pte_t pte)914 static inline int pte_write(pte_t pte)
915 {
916 return (pte_val(pte) & _PAGE_WRITE) != 0;
917 }
918
pte_dirty(pte_t pte)919 static inline int pte_dirty(pte_t pte)
920 {
921 return (pte_val(pte) & _PAGE_DIRTY) != 0;
922 }
923
pte_young(pte_t pte)924 static inline int pte_young(pte_t pte)
925 {
926 return (pte_val(pte) & _PAGE_YOUNG) != 0;
927 }
928
929 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)930 static inline int pte_unused(pte_t pte)
931 {
932 return pte_val(pte) & _PAGE_UNUSED;
933 }
934
935 /*
936 * Extract the pgprot value from the given pte while at the same time making it
937 * usable for kernel address space mappings where fault driven dirty and
938 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
939 * must not be set.
940 */
941 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)942 static inline pgprot_t pte_pgprot(pte_t pte)
943 {
944 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
945
946 if (pte_write(pte))
947 pte_flags |= pgprot_val(PAGE_KERNEL);
948 else
949 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
950 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
951
952 return __pgprot(pte_flags);
953 }
954
955 /*
956 * pgd/pmd/pte modification functions
957 */
958
set_pgd(pgd_t * pgdp,pgd_t pgd)959 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
960 {
961 WRITE_ONCE(*pgdp, pgd);
962 }
963
set_p4d(p4d_t * p4dp,p4d_t p4d)964 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
965 {
966 WRITE_ONCE(*p4dp, p4d);
967 }
968
set_pud(pud_t * pudp,pud_t pud)969 static inline void set_pud(pud_t *pudp, pud_t pud)
970 {
971 WRITE_ONCE(*pudp, pud);
972 }
973
set_pmd(pmd_t * pmdp,pmd_t pmd)974 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
975 {
976 WRITE_ONCE(*pmdp, pmd);
977 }
978
set_pte(pte_t * ptep,pte_t pte)979 static inline void set_pte(pte_t *ptep, pte_t pte)
980 {
981 WRITE_ONCE(*ptep, pte);
982 }
983
pgd_clear(pgd_t * pgd)984 static inline void pgd_clear(pgd_t *pgd)
985 {
986 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
987 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
988 }
989
p4d_clear(p4d_t * p4d)990 static inline void p4d_clear(p4d_t *p4d)
991 {
992 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
993 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
994 }
995
pud_clear(pud_t * pud)996 static inline void pud_clear(pud_t *pud)
997 {
998 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
999 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
1000 }
1001
pmd_clear(pmd_t * pmdp)1002 static inline void pmd_clear(pmd_t *pmdp)
1003 {
1004 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1005 }
1006
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1007 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1008 {
1009 set_pte(ptep, __pte(_PAGE_INVALID));
1010 }
1011
1012 /*
1013 * The following pte modification functions only work if
1014 * pte_present() is true. Undefined behaviour if not..
1015 */
pte_modify(pte_t pte,pgprot_t newprot)1016 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1017 {
1018 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
1019 pte = set_pte_bit(pte, newprot);
1020 /*
1021 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
1022 * has the invalid bit set, clear it again for readable, young pages
1023 */
1024 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
1025 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1026 /*
1027 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
1028 * protection bit set, clear it again for writable, dirty pages
1029 */
1030 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
1031 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1032 return pte;
1033 }
1034
pte_wrprotect(pte_t pte)1035 static inline pte_t pte_wrprotect(pte_t pte)
1036 {
1037 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1038 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1039 }
1040
pte_mkwrite_novma(pte_t pte)1041 static inline pte_t pte_mkwrite_novma(pte_t pte)
1042 {
1043 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1044 if (pte_val(pte) & _PAGE_DIRTY)
1045 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1046 return pte;
1047 }
1048
pte_mkclean(pte_t pte)1049 static inline pte_t pte_mkclean(pte_t pte)
1050 {
1051 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1052 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1053 }
1054
pte_mkdirty(pte_t pte)1055 static inline pte_t pte_mkdirty(pte_t pte)
1056 {
1057 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1058 if (pte_val(pte) & _PAGE_WRITE)
1059 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1060 return pte;
1061 }
1062
pte_mkold(pte_t pte)1063 static inline pte_t pte_mkold(pte_t pte)
1064 {
1065 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1066 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1067 }
1068
pte_mkyoung(pte_t pte)1069 static inline pte_t pte_mkyoung(pte_t pte)
1070 {
1071 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1072 if (pte_val(pte) & _PAGE_READ)
1073 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1074 return pte;
1075 }
1076
pte_mkspecial(pte_t pte)1077 static inline pte_t pte_mkspecial(pte_t pte)
1078 {
1079 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1080 }
1081
1082 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)1083 static inline pte_t pte_mkhuge(pte_t pte)
1084 {
1085 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1086 }
1087 #endif
1088
sske_frame(unsigned long addr,unsigned char skey)1089 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
1090 {
1091 asm volatile("sske %[skey],%[addr],1"
1092 : [addr] "+a" (addr) : [skey] "d" (skey));
1093 return addr;
1094 }
1095
1096 #define IPTE_GLOBAL 0
1097 #define IPTE_LOCAL 1
1098
1099 #define IPTE_NODAT 0x400
1100 #define IPTE_GUEST_ASCE 0x800
1101
__ptep_rdp(unsigned long addr,pte_t * ptep,int local)1102 static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
1103 {
1104 unsigned long pto;
1105
1106 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1107 asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
1108 : "+m" (*ptep)
1109 : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
1110 [m4] "i" (local));
1111 }
1112
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1113 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1114 unsigned long opt, unsigned long asce,
1115 int local)
1116 {
1117 unsigned long pto = __pa(ptep);
1118
1119 if (__builtin_constant_p(opt) && opt == 0) {
1120 /* Invalidation + TLB flush for the pte */
1121 asm volatile(
1122 " ipte %[r1],%[r2],0,%[m4]"
1123 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1124 [m4] "i" (local));
1125 return;
1126 }
1127
1128 /* Invalidate ptes with options + TLB flush of the ptes */
1129 opt = opt | (asce & _ASCE_ORIGIN);
1130 asm volatile(
1131 " ipte %[r1],%[r2],%[r3],%[m4]"
1132 : [r2] "+a" (address), [r3] "+a" (opt)
1133 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1134 }
1135
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1136 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1137 pte_t *ptep, int local)
1138 {
1139 unsigned long pto = __pa(ptep);
1140
1141 /* Invalidate a range of ptes + TLB flush of the ptes */
1142 do {
1143 asm volatile(
1144 " ipte %[r1],%[r2],%[r3],%[m4]"
1145 : [r2] "+a" (address), [r3] "+a" (nr)
1146 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1147 } while (nr != 255);
1148 }
1149
1150 /*
1151 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1152 * both clear the TLB for the unmapped pte. The reason is that
1153 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1154 * to modify an active pte. The sequence is
1155 * 1) ptep_get_and_clear
1156 * 2) set_pte_at
1157 * 3) flush_tlb_range
1158 * On s390 the tlb needs to get flushed with the modification of the pte
1159 * if the pte is active. The only way how this can be implemented is to
1160 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1161 * is a nop.
1162 */
1163 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1164 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1165
1166 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1167 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1168 unsigned long addr, pte_t *ptep)
1169 {
1170 pte_t pte = *ptep;
1171
1172 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1173 return pte_young(pte);
1174 }
1175
1176 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1177 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1178 unsigned long address, pte_t *ptep)
1179 {
1180 return ptep_test_and_clear_young(vma, address, ptep);
1181 }
1182
1183 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1184 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1185 unsigned long addr, pte_t *ptep)
1186 {
1187 pte_t res;
1188
1189 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1190 /* At this point the reference through the mapping is still present */
1191 if (mm_is_protected(mm) && pte_present(res))
1192 WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1193 return res;
1194 }
1195
1196 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1197 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1198 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1199 pte_t *, pte_t, pte_t);
1200
1201 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1202 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1203 unsigned long addr, pte_t *ptep)
1204 {
1205 pte_t res;
1206
1207 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1208 /* At this point the reference through the mapping is still present */
1209 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1210 WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1211 return res;
1212 }
1213
1214 /*
1215 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1216 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1217 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1218 * cannot be accessed while the batched unmap is running. In this case
1219 * full==1 and a simple pte_clear is enough. See tlb.h.
1220 */
1221 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1222 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1223 unsigned long addr,
1224 pte_t *ptep, int full)
1225 {
1226 pte_t res;
1227
1228 if (full) {
1229 res = *ptep;
1230 set_pte(ptep, __pte(_PAGE_INVALID));
1231 } else {
1232 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1233 }
1234 /* Nothing to do */
1235 if (!mm_is_protected(mm) || !pte_present(res))
1236 return res;
1237 /*
1238 * At this point the reference through the mapping is still present.
1239 * The notifier should have destroyed all protected vCPUs at this
1240 * point, so the destroy should be successful.
1241 */
1242 if (full && !uv_destroy_pte(res))
1243 return res;
1244 /*
1245 * If something went wrong and the page could not be destroyed, or
1246 * if this is not a mm teardown, the slower export is used as
1247 * fallback instead. If even that fails, print a warning and leak
1248 * the page, to avoid crashing the whole system.
1249 */
1250 WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1251 return res;
1252 }
1253
1254 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1255 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1256 unsigned long addr, pte_t *ptep)
1257 {
1258 pte_t pte = *ptep;
1259
1260 if (pte_write(pte))
1261 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1262 }
1263
1264 /*
1265 * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1266 * bits in the comparison. Those might change e.g. because of dirty and young
1267 * tracking.
1268 */
pte_allow_rdp(pte_t old,pte_t new)1269 static inline int pte_allow_rdp(pte_t old, pte_t new)
1270 {
1271 /*
1272 * Only allow changes from RO to RW
1273 */
1274 if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1275 return 0;
1276
1277 return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1278 }
1279
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1280 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1281 unsigned long address,
1282 pte_t *ptep)
1283 {
1284 /*
1285 * RDP might not have propagated the PTE protection reset to all CPUs,
1286 * so there could be spurious TLB protection faults.
1287 * NOTE: This will also be called when a racing pagetable update on
1288 * another thread already installed the correct PTE. Both cases cannot
1289 * really be distinguished.
1290 * Therefore, only do the local TLB flush when RDP can be used, and the
1291 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
1292 * A local RDP can be used to do the flush.
1293 */
1294 if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
1295 __ptep_rdp(address, ptep, 1);
1296 }
1297 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1298
1299 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1300 pte_t new);
1301
1302 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1303 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1304 unsigned long addr, pte_t *ptep,
1305 pte_t entry, int dirty)
1306 {
1307 if (pte_same(*ptep, entry))
1308 return 0;
1309 if (cpu_has_rdp() && pte_allow_rdp(*ptep, entry))
1310 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1311 else
1312 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1313 return 1;
1314 }
1315
1316 #define pgprot_writecombine pgprot_writecombine
1317 pgprot_t pgprot_writecombine(pgprot_t prot);
1318
1319 #define PFN_PTE_SHIFT PAGE_SHIFT
1320
1321 /*
1322 * Set multiple PTEs to consecutive pages with a single call. All PTEs
1323 * are within the same folio, PMD and VMA.
1324 */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry,unsigned int nr)1325 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1326 pte_t *ptep, pte_t entry, unsigned int nr)
1327 {
1328 if (pte_present(entry))
1329 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1330 for (;;) {
1331 set_pte(ptep, entry);
1332 if (--nr == 0)
1333 break;
1334 ptep++;
1335 entry = __pte(pte_val(entry) + PAGE_SIZE);
1336 }
1337 }
1338 #define set_ptes set_ptes
1339
1340 /*
1341 * Conversion functions: convert a page and protection to a page entry,
1342 * and a page entry and page directory to the page they refer to.
1343 */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1344 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1345 {
1346 pte_t __pte;
1347
1348 __pte = __pte(physpage | pgprot_val(pgprot));
1349 return pte_mkyoung(__pte);
1350 }
1351
1352 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1353 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1354 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1355 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1356
1357 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1358 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1359
pmd_deref(pmd_t pmd)1360 static inline unsigned long pmd_deref(pmd_t pmd)
1361 {
1362 unsigned long origin_mask;
1363
1364 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1365 if (pmd_leaf(pmd))
1366 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1367 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1368 }
1369
pmd_pfn(pmd_t pmd)1370 static inline unsigned long pmd_pfn(pmd_t pmd)
1371 {
1372 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1373 }
1374
pud_deref(pud_t pud)1375 static inline unsigned long pud_deref(pud_t pud)
1376 {
1377 unsigned long origin_mask;
1378
1379 origin_mask = _REGION_ENTRY_ORIGIN;
1380 if (pud_leaf(pud))
1381 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1382 return (unsigned long)__va(pud_val(pud) & origin_mask);
1383 }
1384
1385 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)1386 static inline unsigned long pud_pfn(pud_t pud)
1387 {
1388 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1389 }
1390
1391 /*
1392 * The pgd_offset function *always* adds the index for the top-level
1393 * region/segment table. This is done to get a sequence like the
1394 * following to work:
1395 * pgdp = pgd_offset(current->mm, addr);
1396 * pgd = READ_ONCE(*pgdp);
1397 * p4dp = p4d_offset(&pgd, addr);
1398 * ...
1399 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1400 * only add an index if they dereferenced the pointer.
1401 */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1402 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1403 {
1404 unsigned long rste;
1405 unsigned int shift;
1406
1407 /* Get the first entry of the top level table */
1408 rste = pgd_val(*pgd);
1409 /* Pick up the shift from the table type of the first entry */
1410 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1411 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1412 }
1413
1414 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1415
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1416 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1417 {
1418 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1419 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1420 return (p4d_t *) pgdp;
1421 }
1422 #define p4d_offset_lockless p4d_offset_lockless
1423
p4d_offset(pgd_t * pgdp,unsigned long address)1424 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1425 {
1426 return p4d_offset_lockless(pgdp, *pgdp, address);
1427 }
1428
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1429 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1430 {
1431 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1432 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1433 return (pud_t *) p4dp;
1434 }
1435 #define pud_offset_lockless pud_offset_lockless
1436
pud_offset(p4d_t * p4dp,unsigned long address)1437 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1438 {
1439 return pud_offset_lockless(p4dp, *p4dp, address);
1440 }
1441 #define pud_offset pud_offset
1442
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1443 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1444 {
1445 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1446 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1447 return (pmd_t *) pudp;
1448 }
1449 #define pmd_offset_lockless pmd_offset_lockless
1450
pmd_offset(pud_t * pudp,unsigned long address)1451 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1452 {
1453 return pmd_offset_lockless(pudp, *pudp, address);
1454 }
1455 #define pmd_offset pmd_offset
1456
pmd_page_vaddr(pmd_t pmd)1457 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1458 {
1459 return (unsigned long) pmd_deref(pmd);
1460 }
1461
gup_fast_permitted(unsigned long start,unsigned long end)1462 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1463 {
1464 return end <= current->mm->context.asce_limit;
1465 }
1466 #define gup_fast_permitted gup_fast_permitted
1467
1468 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1469 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1470 #define pte_page(x) pfn_to_page(pte_pfn(x))
1471
1472 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1473 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1474 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1475 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1476
pmd_wrprotect(pmd_t pmd)1477 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1478 {
1479 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1480 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1481 }
1482
pmd_mkwrite_novma(pmd_t pmd)1483 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
1484 {
1485 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1486 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1487 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1488 return pmd;
1489 }
1490
pmd_mkclean(pmd_t pmd)1491 static inline pmd_t pmd_mkclean(pmd_t pmd)
1492 {
1493 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1494 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1495 }
1496
pmd_mkdirty(pmd_t pmd)1497 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1498 {
1499 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1500 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1501 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1502 return pmd;
1503 }
1504
pud_wrprotect(pud_t pud)1505 static inline pud_t pud_wrprotect(pud_t pud)
1506 {
1507 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1508 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1509 }
1510
pud_mkwrite(pud_t pud)1511 static inline pud_t pud_mkwrite(pud_t pud)
1512 {
1513 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1514 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1515 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1516 return pud;
1517 }
1518
pud_mkclean(pud_t pud)1519 static inline pud_t pud_mkclean(pud_t pud)
1520 {
1521 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1522 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1523 }
1524
pud_mkdirty(pud_t pud)1525 static inline pud_t pud_mkdirty(pud_t pud)
1526 {
1527 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1528 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1529 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1530 return pud;
1531 }
1532
1533 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1534 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1535 {
1536 /*
1537 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1538 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1539 */
1540 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1541 return pgprot_val(SEGMENT_NONE);
1542 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1543 return pgprot_val(SEGMENT_RO);
1544 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1545 return pgprot_val(SEGMENT_RX);
1546 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1547 return pgprot_val(SEGMENT_RW);
1548 return pgprot_val(SEGMENT_RWX);
1549 }
1550
pmd_mkyoung(pmd_t pmd)1551 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1552 {
1553 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1554 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1555 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1556 return pmd;
1557 }
1558
pmd_mkold(pmd_t pmd)1559 static inline pmd_t pmd_mkold(pmd_t pmd)
1560 {
1561 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1562 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1563 }
1564
pmd_modify(pmd_t pmd,pgprot_t newprot)1565 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1566 {
1567 unsigned long mask;
1568
1569 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1570 mask |= _SEGMENT_ENTRY_DIRTY;
1571 mask |= _SEGMENT_ENTRY_YOUNG;
1572 mask |= _SEGMENT_ENTRY_LARGE;
1573 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1574 pmd = __pmd(pmd_val(pmd) & mask);
1575 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1576 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1577 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1578 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1579 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1580 return pmd;
1581 }
1582
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1583 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1584 {
1585 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1586 }
1587
1588 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1589
__pmdp_cspg(pmd_t * pmdp)1590 static inline void __pmdp_cspg(pmd_t *pmdp)
1591 {
1592 cspg((unsigned long *)pmdp, pmd_val(*pmdp),
1593 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1594 }
1595
1596 #define IDTE_GLOBAL 0
1597 #define IDTE_LOCAL 1
1598
1599 #define IDTE_PTOA 0x0800
1600 #define IDTE_NODAT 0x1000
1601 #define IDTE_GUEST_ASCE 0x2000
1602
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1603 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1604 unsigned long opt, unsigned long asce,
1605 int local)
1606 {
1607 unsigned long sto;
1608
1609 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1610 if (__builtin_constant_p(opt) && opt == 0) {
1611 /* flush without guest asce */
1612 asm volatile(
1613 " idte %[r1],0,%[r2],%[m4]"
1614 : "+m" (*pmdp)
1615 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1616 [m4] "i" (local)
1617 : "cc" );
1618 } else {
1619 /* flush with guest asce */
1620 asm volatile(
1621 " idte %[r1],%[r3],%[r2],%[m4]"
1622 : "+m" (*pmdp)
1623 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1624 [r3] "a" (asce), [m4] "i" (local)
1625 : "cc" );
1626 }
1627 }
1628
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1629 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1630 unsigned long opt, unsigned long asce,
1631 int local)
1632 {
1633 unsigned long r3o;
1634
1635 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1636 r3o |= _ASCE_TYPE_REGION3;
1637 if (__builtin_constant_p(opt) && opt == 0) {
1638 /* flush without guest asce */
1639 asm volatile(
1640 " idte %[r1],0,%[r2],%[m4]"
1641 : "+m" (*pudp)
1642 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1643 [m4] "i" (local)
1644 : "cc");
1645 } else {
1646 /* flush with guest asce */
1647 asm volatile(
1648 " idte %[r1],%[r3],%[r2],%[m4]"
1649 : "+m" (*pudp)
1650 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1651 [r3] "a" (asce), [m4] "i" (local)
1652 : "cc" );
1653 }
1654 }
1655
1656 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1657 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1658 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1659
1660 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1661
1662 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1663 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1664 pgtable_t pgtable);
1665
1666 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1667 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1668
1669 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1670 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1671 unsigned long addr, pmd_t *pmdp,
1672 pmd_t entry, int dirty)
1673 {
1674 VM_BUG_ON(addr & ~HPAGE_MASK);
1675
1676 entry = pmd_mkyoung(entry);
1677 if (dirty)
1678 entry = pmd_mkdirty(entry);
1679 if (pmd_val(*pmdp) == pmd_val(entry))
1680 return 0;
1681 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1682 return 1;
1683 }
1684
1685 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1686 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1687 unsigned long addr, pmd_t *pmdp)
1688 {
1689 pmd_t pmd = *pmdp;
1690
1691 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1692 return pmd_young(pmd);
1693 }
1694
1695 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1696 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1697 unsigned long addr, pmd_t *pmdp)
1698 {
1699 VM_BUG_ON(addr & ~HPAGE_MASK);
1700 return pmdp_test_and_clear_young(vma, addr, pmdp);
1701 }
1702
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1703 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1704 pmd_t *pmdp, pmd_t entry)
1705 {
1706 set_pmd(pmdp, entry);
1707 }
1708
pmd_mkhuge(pmd_t pmd)1709 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1710 {
1711 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1712 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1713 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1714 }
1715
1716 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1717 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1718 unsigned long addr, pmd_t *pmdp)
1719 {
1720 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1721 }
1722
1723 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1724 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1725 unsigned long addr,
1726 pmd_t *pmdp, int full)
1727 {
1728 if (full) {
1729 pmd_t pmd = *pmdp;
1730 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1731 return pmd;
1732 }
1733 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1734 }
1735
1736 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1737 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1738 unsigned long addr, pmd_t *pmdp)
1739 {
1740 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1741 }
1742
1743 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1744 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1745 unsigned long addr, pmd_t *pmdp)
1746 {
1747 pmd_t pmd;
1748
1749 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
1750 pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1751 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1752 }
1753
1754 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1755 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1756 unsigned long addr, pmd_t *pmdp)
1757 {
1758 pmd_t pmd = *pmdp;
1759
1760 if (pmd_write(pmd))
1761 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1762 }
1763
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1764 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1765 unsigned long address,
1766 pmd_t *pmdp)
1767 {
1768 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1769 }
1770 #define pmdp_collapse_flush pmdp_collapse_flush
1771
1772 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1773
pmd_trans_huge(pmd_t pmd)1774 static inline int pmd_trans_huge(pmd_t pmd)
1775 {
1776 return pmd_leaf(pmd);
1777 }
1778
1779 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1780 static inline int has_transparent_hugepage(void)
1781 {
1782 return cpu_has_edat1() ? 1 : 0;
1783 }
1784 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1785
1786 /*
1787 * 64 bit swap entry format:
1788 * A page-table entry has some bits we have to treat in a special way.
1789 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1790 * as invalid.
1791 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1792 * | offset |E11XX|type |S0|
1793 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1794 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1795 *
1796 * Bits 0-51 store the offset.
1797 * Bit 52 (E) is used to remember PG_anon_exclusive.
1798 * Bits 57-61 store the type.
1799 * Bit 62 (S) is used for softdirty tracking.
1800 * Bits 55 and 56 (X) are unused.
1801 */
1802
1803 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1804 #define __SWP_OFFSET_SHIFT 12
1805 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1806 #define __SWP_TYPE_SHIFT 2
1807
mk_swap_pte(unsigned long type,unsigned long offset)1808 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1809 {
1810 unsigned long pteval;
1811
1812 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1813 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1814 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1815 return __pte(pteval);
1816 }
1817
__swp_type(swp_entry_t entry)1818 static inline unsigned long __swp_type(swp_entry_t entry)
1819 {
1820 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1821 }
1822
__swp_offset(swp_entry_t entry)1823 static inline unsigned long __swp_offset(swp_entry_t entry)
1824 {
1825 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1826 }
1827
__swp_entry(unsigned long type,unsigned long offset)1828 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1829 {
1830 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1831 }
1832
1833 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1834 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1835
1836 /*
1837 * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE)
1838 * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste
1839 * as invalid.
1840 * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010
1841 * | offset |Xtype |11TT|S0|
1842 * |0000000000111111111122222222223333333333444444444455|555555|5566|66|
1843 * |0123456789012345678901234567890123456789012345678901|234567|8901|23|
1844 *
1845 * Bits 0-51 store the offset.
1846 * Bits 53-57 store the type.
1847 * Bit 62 (S) is used for softdirty tracking.
1848 * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT.
1849 * Bit 52 (X) is unused.
1850 */
1851
1852 #define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1)
1853 #define __SWP_OFFSET_SHIFT_RSTE 12
1854 #define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1)
1855 #define __SWP_TYPE_SHIFT_RSTE 6
1856
1857 /*
1858 * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3
1859 * bits 0x01. See also __set_huge_pte_at().
1860 */
mk_swap_rste(unsigned long type,unsigned long offset)1861 static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset)
1862 {
1863 unsigned long rste;
1864
1865 rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM;
1866 rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE;
1867 rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE;
1868 return rste;
1869 }
1870
__swp_type_rste(swp_entry_t entry)1871 static inline unsigned long __swp_type_rste(swp_entry_t entry)
1872 {
1873 return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE;
1874 }
1875
__swp_offset_rste(swp_entry_t entry)1876 static inline unsigned long __swp_offset_rste(swp_entry_t entry)
1877 {
1878 return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE;
1879 }
1880
1881 #define __rste_to_swp_entry(rste) ((swp_entry_t) { rste })
1882
1883 /*
1884 * s390 has different layout for PTE and region / segment table entries (RSTE).
1885 * This is also true for swap entries, and their swap type and offset encoding.
1886 * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and
1887 * __swp_offset_rste() helpers to correctly handle RSTE swap entries.
1888 *
1889 * But common swap code does not know about this difference, and only uses
1890 * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between
1891 * arch-dependent and arch-independent representation of swp_entry_t for all
1892 * pagetable levels. On s390, those helpers only work for PTE swap entries.
1893 *
1894 * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry
1895 * and return the arch-dependent representation of that. Correspondingly,
1896 * implement __swp_entry_to_pmd() to convert that into a proper PMD swap
1897 * entry again. With this, the arch-dependent swp_entry_t representation will
1898 * always look like a PTE swap entry in common code.
1899 *
1900 * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only
1901 * requires conversion of the swap type and offset, and not all the possible
1902 * PTE bits.
1903 */
__pmd_to_swp_entry(pmd_t pmd)1904 static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd)
1905 {
1906 swp_entry_t arch_entry;
1907 pte_t pte;
1908
1909 arch_entry = __rste_to_swp_entry(pmd_val(pmd));
1910 pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
1911 return __pte_to_swp_entry(pte);
1912 }
1913
__swp_entry_to_pmd(swp_entry_t arch_entry)1914 static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry)
1915 {
1916 pmd_t pmd;
1917
1918 pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry)));
1919 return pmd;
1920 }
1921
1922 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1923 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1924 extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1925 extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1926 extern void vmem_unmap_4k_page(unsigned long addr);
1927 extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1928
1929 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1930 #define HAVE_ARCH_UNMAPPED_AREA
1931 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1932
1933 #define pmd_pgtable(pmd) \
1934 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1935
1936 #endif /* _S390_PAGE_H */
1937