1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/pgalloc.h"
9  *    Copyright (C) 1994  Linus Torvalds
10  */
11 
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
14 
15 #include <linux/threads.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19 
20 #define CRST_ALLOC_ORDER 2
21 
22 unsigned long *crst_table_alloc(struct mm_struct *);
23 void crst_table_free(struct mm_struct *, unsigned long *);
24 
25 unsigned long *page_table_alloc(struct mm_struct *);
26 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
27 void page_table_free(struct mm_struct *, unsigned long *);
28 void page_table_free_pgste(struct ptdesc *ptdesc);
29 
30 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
31 {
32 	memset64((u64 *)crst, entry, _CRST_ENTRIES);
33 }
34 
35 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
36 
37 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
38 					     unsigned long len)
39 {
40 	int rc;
41 
42 	if (addr + len > mm->context.asce_limit &&
43 	    addr + len <= TASK_SIZE) {
44 		rc = crst_table_upgrade(mm, addr + len);
45 		if (rc)
46 			return (unsigned long) rc;
47 	}
48 	return addr;
49 }
50 
51 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
52 {
53 	unsigned long *table = crst_table_alloc(mm);
54 
55 	if (!table)
56 		return NULL;
57 	crst_table_init(table, _REGION2_ENTRY_EMPTY);
58 	pagetable_p4d_ctor(virt_to_ptdesc(table));
59 
60 	return (p4d_t *) table;
61 }
62 
63 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
64 {
65 	if (mm_p4d_folded(mm))
66 		return;
67 
68 	pagetable_dtor(virt_to_ptdesc(p4d));
69 	crst_table_free(mm, (unsigned long *) p4d);
70 }
71 
72 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
73 {
74 	unsigned long *table = crst_table_alloc(mm);
75 
76 	if (!table)
77 		return NULL;
78 	crst_table_init(table, _REGION3_ENTRY_EMPTY);
79 	pagetable_pud_ctor(virt_to_ptdesc(table));
80 
81 	return (pud_t *) table;
82 }
83 
84 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
85 {
86 	if (mm_pud_folded(mm))
87 		return;
88 
89 	pagetable_dtor(virt_to_ptdesc(pud));
90 	crst_table_free(mm, (unsigned long *) pud);
91 }
92 
93 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
94 {
95 	unsigned long *table = crst_table_alloc(mm);
96 
97 	if (!table)
98 		return NULL;
99 	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
100 	if (!pagetable_pmd_ctor(mm, virt_to_ptdesc(table))) {
101 		crst_table_free(mm, table);
102 		return NULL;
103 	}
104 	return (pmd_t *) table;
105 }
106 
107 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
108 {
109 	if (mm_pmd_folded(mm))
110 		return;
111 	pagetable_dtor(virt_to_ptdesc(pmd));
112 	crst_table_free(mm, (unsigned long *) pmd);
113 }
114 
115 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
116 {
117 	set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
118 }
119 
120 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
121 {
122 	set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
123 }
124 
125 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
126 {
127 	set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
128 }
129 
130 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
131 {
132 	unsigned long *table = crst_table_alloc(mm);
133 
134 	if (!table)
135 		return NULL;
136 	pagetable_pgd_ctor(virt_to_ptdesc(table));
137 
138 	return (pgd_t *) table;
139 }
140 
141 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
142 {
143 	pagetable_dtor(virt_to_ptdesc(pgd));
144 	crst_table_free(mm, (unsigned long *) pgd);
145 }
146 
147 static inline void pmd_populate(struct mm_struct *mm,
148 				pmd_t *pmd, pgtable_t pte)
149 {
150 	set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
151 }
152 
153 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
154 
155 /*
156  * page table entry allocation/free routines.
157  */
158 #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
159 #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
160 
161 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
162 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
163 
164 /* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */
165 #define pte_free_defer pte_free_defer
166 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
167 
168 void vmem_map_init(void);
169 void *vmem_crst_alloc(unsigned long val);
170 pte_t *vmem_pte_alloc(void);
171 
172 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
173 void base_asce_free(unsigned long asce);
174 
175 #endif /* _S390_PGALLOC_H */
176