1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18 
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)19 static unsigned long page_table_shareable(struct vm_area_struct *svma,
20 				struct vm_area_struct *vma,
21 				unsigned long addr, pgoff_t idx)
22 {
23 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
24 				svma->vm_start;
25 	unsigned long sbase = saddr & PUD_MASK;
26 	unsigned long s_end = sbase + PUD_SIZE;
27 
28 	/* Allow segments to share if only one is marked locked */
29 	unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
30 	unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
31 
32 	/*
33 	 * match the virtual addresses, permission and the alignment of the
34 	 * page table page.
35 	 */
36 	if (pmd_index(addr) != pmd_index(saddr) ||
37 	    vm_flags != svm_flags ||
38 	    sbase < svma->vm_start || svma->vm_end < s_end)
39 		return 0;
40 
41 	return saddr;
42 }
43 
vma_shareable(struct vm_area_struct * vma,unsigned long addr)44 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45 {
46 	unsigned long base = addr & PUD_MASK;
47 	unsigned long end = base + PUD_SIZE;
48 
49 	/*
50 	 * check on proper vm_flags and page table alignment
51 	 */
52 	if (vma->vm_flags & VM_MAYSHARE &&
53 	    vma->vm_start <= base && end <= vma->vm_end)
54 		return 1;
55 	return 0;
56 }
57 
58 /*
59  * search for a shareable pmd page for hugetlb.
60  */
huge_pmd_share(struct mm_struct * mm,unsigned long addr,pud_t * pud)61 static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
62 {
63 	struct vm_area_struct *vma = find_vma(mm, addr);
64 	struct address_space *mapping = vma->vm_file->f_mapping;
65 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
66 			vma->vm_pgoff;
67 	struct prio_tree_iter iter;
68 	struct vm_area_struct *svma;
69 	unsigned long saddr;
70 	pte_t *spte = NULL;
71 
72 	if (!vma_shareable(vma, addr))
73 		return;
74 
75 	mutex_lock(&mapping->i_mmap_mutex);
76 	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
77 		if (svma == vma)
78 			continue;
79 
80 		saddr = page_table_shareable(svma, vma, addr, idx);
81 		if (saddr) {
82 			spte = huge_pte_offset(svma->vm_mm, saddr);
83 			if (spte) {
84 				get_page(virt_to_page(spte));
85 				break;
86 			}
87 		}
88 	}
89 
90 	if (!spte)
91 		goto out;
92 
93 	spin_lock(&mm->page_table_lock);
94 	if (pud_none(*pud))
95 		pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
96 	else
97 		put_page(virt_to_page(spte));
98 	spin_unlock(&mm->page_table_lock);
99 out:
100 	mutex_unlock(&mapping->i_mmap_mutex);
101 }
102 
103 /*
104  * unmap huge page backed by shared pte.
105  *
106  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
107  * indicated by page_count > 1, unmap is achieved by clearing pud and
108  * decrementing the ref count. If count == 1, the pte page is not shared.
109  *
110  * called with vma->vm_mm->page_table_lock held.
111  *
112  * returns: 1 successfully unmapped a shared pte page
113  *	    0 the underlying pte page is not shared, or it is the last user
114  */
huge_pmd_unshare(struct mm_struct * mm,unsigned long * addr,pte_t * ptep)115 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
116 {
117 	pgd_t *pgd = pgd_offset(mm, *addr);
118 	pud_t *pud = pud_offset(pgd, *addr);
119 
120 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
121 	if (page_count(virt_to_page(ptep)) == 1)
122 		return 0;
123 
124 	pud_clear(pud);
125 	put_page(virt_to_page(ptep));
126 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
127 	return 1;
128 }
129 
huge_pte_alloc(struct mm_struct * mm,unsigned long addr,unsigned long sz)130 pte_t *huge_pte_alloc(struct mm_struct *mm,
131 			unsigned long addr, unsigned long sz)
132 {
133 	pgd_t *pgd;
134 	pud_t *pud;
135 	pte_t *pte = NULL;
136 
137 	pgd = pgd_offset(mm, addr);
138 	pud = pud_alloc(mm, pgd, addr);
139 	if (pud) {
140 		if (sz == PUD_SIZE) {
141 			pte = (pte_t *)pud;
142 		} else {
143 			BUG_ON(sz != PMD_SIZE);
144 			if (pud_none(*pud))
145 				huge_pmd_share(mm, addr, pud);
146 			pte = (pte_t *) pmd_alloc(mm, pud, addr);
147 		}
148 	}
149 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
150 
151 	return pte;
152 }
153 
huge_pte_offset(struct mm_struct * mm,unsigned long addr)154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
155 {
156 	pgd_t *pgd;
157 	pud_t *pud;
158 	pmd_t *pmd = NULL;
159 
160 	pgd = pgd_offset(mm, addr);
161 	if (pgd_present(*pgd)) {
162 		pud = pud_offset(pgd, addr);
163 		if (pud_present(*pud)) {
164 			if (pud_large(*pud))
165 				return (pte_t *)pud;
166 			pmd = pmd_offset(pud, addr);
167 		}
168 	}
169 	return (pte_t *) pmd;
170 }
171 
172 #if 0	/* This is just for testing */
173 struct page *
174 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
175 {
176 	unsigned long start = address;
177 	int length = 1;
178 	int nr;
179 	struct page *page;
180 	struct vm_area_struct *vma;
181 
182 	vma = find_vma(mm, addr);
183 	if (!vma || !is_vm_hugetlb_page(vma))
184 		return ERR_PTR(-EINVAL);
185 
186 	pte = huge_pte_offset(mm, address);
187 
188 	/* hugetlb should be locked, and hence, prefaulted */
189 	WARN_ON(!pte || pte_none(*pte));
190 
191 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
192 
193 	WARN_ON(!PageHead(page));
194 
195 	return page;
196 }
197 
198 int pmd_huge(pmd_t pmd)
199 {
200 	return 0;
201 }
202 
203 int pud_huge(pud_t pud)
204 {
205 	return 0;
206 }
207 
208 struct page *
209 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
210 		pmd_t *pmd, int write)
211 {
212 	return NULL;
213 }
214 
215 #else
216 
217 struct page *
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)218 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
219 {
220 	return ERR_PTR(-EINVAL);
221 }
222 
pmd_huge(pmd_t pmd)223 int pmd_huge(pmd_t pmd)
224 {
225 	return !!(pmd_val(pmd) & _PAGE_PSE);
226 }
227 
pud_huge(pud_t pud)228 int pud_huge(pud_t pud)
229 {
230 	return !!(pud_val(pud) & _PAGE_PSE);
231 }
232 
233 struct page *
follow_huge_pmd(struct mm_struct * mm,unsigned long address,pmd_t * pmd,int write)234 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
235 		pmd_t *pmd, int write)
236 {
237 	struct page *page;
238 
239 	page = pte_page(*(pte_t *)pmd);
240 	if (page)
241 		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
242 	return page;
243 }
244 
245 struct page *
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int write)246 follow_huge_pud(struct mm_struct *mm, unsigned long address,
247 		pud_t *pud, int write)
248 {
249 	struct page *page;
250 
251 	page = pte_page(*(pte_t *)pud);
252 	if (page)
253 		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
254 	return page;
255 }
256 
257 #endif
258 
259 /* x86_64 also uses this file */
260 
261 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
hugetlb_get_unmapped_area_bottomup(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)262 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
263 		unsigned long addr, unsigned long len,
264 		unsigned long pgoff, unsigned long flags)
265 {
266 	struct hstate *h = hstate_file(file);
267 	struct mm_struct *mm = current->mm;
268 	struct vm_area_struct *vma;
269 	unsigned long start_addr;
270 
271 	if (len > mm->cached_hole_size) {
272 	        start_addr = mm->free_area_cache;
273 	} else {
274 	        start_addr = TASK_UNMAPPED_BASE;
275 	        mm->cached_hole_size = 0;
276 	}
277 
278 full_search:
279 	addr = ALIGN(start_addr, huge_page_size(h));
280 
281 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
282 		/* At this point:  (!vma || addr < vma->vm_end). */
283 		if (TASK_SIZE - len < addr) {
284 			/*
285 			 * Start a new search - just in case we missed
286 			 * some holes.
287 			 */
288 			if (start_addr != TASK_UNMAPPED_BASE) {
289 				start_addr = TASK_UNMAPPED_BASE;
290 				mm->cached_hole_size = 0;
291 				goto full_search;
292 			}
293 			return -ENOMEM;
294 		}
295 		if (!vma || addr + len <= vma->vm_start) {
296 			mm->free_area_cache = addr + len;
297 			return addr;
298 		}
299 		if (addr + mm->cached_hole_size < vma->vm_start)
300 		        mm->cached_hole_size = vma->vm_start - addr;
301 		addr = ALIGN(vma->vm_end, huge_page_size(h));
302 	}
303 }
304 
hugetlb_get_unmapped_area_topdown(struct file * file,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)305 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
306 		unsigned long addr0, unsigned long len,
307 		unsigned long pgoff, unsigned long flags)
308 {
309 	struct hstate *h = hstate_file(file);
310 	struct mm_struct *mm = current->mm;
311 	struct vm_area_struct *vma, *prev_vma;
312 	unsigned long base = mm->mmap_base, addr = addr0;
313 	unsigned long largest_hole = mm->cached_hole_size;
314 	int first_time = 1;
315 
316 	/* don't allow allocations above current base */
317 	if (mm->free_area_cache > base)
318 		mm->free_area_cache = base;
319 
320 	if (len <= largest_hole) {
321 	        largest_hole = 0;
322 		mm->free_area_cache  = base;
323 	}
324 try_again:
325 	/* make sure it can fit in the remaining address space */
326 	if (mm->free_area_cache < len)
327 		goto fail;
328 
329 	/* either no address requested or can't fit in requested address hole */
330 	addr = (mm->free_area_cache - len) & huge_page_mask(h);
331 	do {
332 		/*
333 		 * Lookup failure means no vma is above this address,
334 		 * i.e. return with success:
335 		 */
336 		vma = find_vma(mm, addr);
337 		if (!vma)
338 			return addr;
339 
340 		/*
341 		 * new region fits between prev_vma->vm_end and
342 		 * vma->vm_start, use it:
343 		 */
344 		prev_vma = vma->vm_prev;
345 		if (addr + len <= vma->vm_start &&
346 		            (!prev_vma || (addr >= prev_vma->vm_end))) {
347 			/* remember the address as a hint for next time */
348 		        mm->cached_hole_size = largest_hole;
349 		        return (mm->free_area_cache = addr);
350 		} else {
351 			/* pull free_area_cache down to the first hole */
352 		        if (mm->free_area_cache == vma->vm_end) {
353 				mm->free_area_cache = vma->vm_start;
354 				mm->cached_hole_size = largest_hole;
355 			}
356 		}
357 
358 		/* remember the largest hole we saw so far */
359 		if (addr + largest_hole < vma->vm_start)
360 		        largest_hole = vma->vm_start - addr;
361 
362 		/* try just below the current vma->vm_start */
363 		addr = (vma->vm_start - len) & huge_page_mask(h);
364 	} while (len <= vma->vm_start);
365 
366 fail:
367 	/*
368 	 * if hint left us with no space for the requested
369 	 * mapping then try again:
370 	 */
371 	if (first_time) {
372 		mm->free_area_cache = base;
373 		largest_hole = 0;
374 		first_time = 0;
375 		goto try_again;
376 	}
377 	/*
378 	 * A failed mmap() very likely causes application failure,
379 	 * so fall back to the bottom-up function here. This scenario
380 	 * can happen with large stack limits and large mmap()
381 	 * allocations.
382 	 */
383 	mm->free_area_cache = TASK_UNMAPPED_BASE;
384 	mm->cached_hole_size = ~0UL;
385 	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
386 			len, pgoff, flags);
387 
388 	/*
389 	 * Restore the topdown base:
390 	 */
391 	mm->free_area_cache = base;
392 	mm->cached_hole_size = ~0UL;
393 
394 	return addr;
395 }
396 
397 unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)398 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
399 		unsigned long len, unsigned long pgoff, unsigned long flags)
400 {
401 	struct hstate *h = hstate_file(file);
402 	struct mm_struct *mm = current->mm;
403 	struct vm_area_struct *vma;
404 
405 	if (len & ~huge_page_mask(h))
406 		return -EINVAL;
407 	if (len > TASK_SIZE)
408 		return -ENOMEM;
409 
410 	if (flags & MAP_FIXED) {
411 		if (prepare_hugepage_range(file, addr, len))
412 			return -EINVAL;
413 		return addr;
414 	}
415 
416 	if (addr) {
417 		addr = ALIGN(addr, huge_page_size(h));
418 		vma = find_vma(mm, addr);
419 		if (TASK_SIZE - len >= addr &&
420 		    (!vma || addr + len <= vma->vm_start))
421 			return addr;
422 	}
423 	if (mm->get_unmapped_area == arch_get_unmapped_area)
424 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
425 				pgoff, flags);
426 	else
427 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
428 				pgoff, flags);
429 }
430 
431 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
432 
433 #ifdef CONFIG_X86_64
setup_hugepagesz(char * opt)434 static __init int setup_hugepagesz(char *opt)
435 {
436 	unsigned long ps = memparse(opt, &opt);
437 	if (ps == PMD_SIZE) {
438 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
439 	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
440 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
441 	} else {
442 		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
443 			ps >> 20);
444 		return 0;
445 	}
446 	return 1;
447 }
448 __setup("hugepagesz=", setup_hugepagesz);
449 #endif
450