1 /* mm/ashmem.c
2 **
3 ** Anonymous Shared Memory Subsystem, ashmem
4 **
5 ** Copyright (C) 2008 Google, Inc.
6 **
7 ** Robert Love <rlove@google.com>
8 **
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
12 **
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 ** GNU General Public License for more details.
17 */
18 
19 #include <linux/module.h>
20 #include <linux/file.h>
21 #include <linux/fs.h>
22 #include <linux/miscdevice.h>
23 #include <linux/security.h>
24 #include <linux/mm.h>
25 #include <linux/mman.h>
26 #include <linux/uaccess.h>
27 #include <linux/personality.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/shmem_fs.h>
31 #include "ashmem.h"
32 
33 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
34 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
36 
37 /*
38  * ashmem_area - anonymous shared memory area
39  * Lifecycle: From our parent file's open() until its release()
40  * Locking: Protected by `ashmem_mutex'
41  * Big Note: Mappings do NOT pin this structure; it dies on close()
42  */
43 struct ashmem_area {
44 	char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
45 	struct list_head unpinned_list;	 /* list of all ashmem areas */
46 	struct file *file;		 /* the shmem-based backing file */
47 	size_t size;			 /* size of the mapping, in bytes */
48 	unsigned long prot_mask;	 /* allowed prot bits, as vm_flags */
49 };
50 
51 /*
52  * ashmem_range - represents an interval of unpinned (evictable) pages
53  * Lifecycle: From unpin to pin
54  * Locking: Protected by `ashmem_mutex'
55  */
56 struct ashmem_range {
57 	struct list_head lru;		/* entry in LRU list */
58 	struct list_head unpinned;	/* entry in its area's unpinned list */
59 	struct ashmem_area *asma;	/* associated area */
60 	size_t pgstart;			/* starting page, inclusive */
61 	size_t pgend;			/* ending page, inclusive */
62 	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
63 };
64 
65 /* LRU list of unpinned pages, protected by ashmem_mutex */
66 static LIST_HEAD(ashmem_lru_list);
67 
68 /* Count of pages on our LRU list, protected by ashmem_mutex */
69 static unsigned long lru_count;
70 
71 /*
72  * ashmem_mutex - protects the list of and each individual ashmem_area
73  *
74  * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
75  */
76 static DEFINE_MUTEX(ashmem_mutex);
77 
78 static struct kmem_cache *ashmem_area_cachep __read_mostly;
79 static struct kmem_cache *ashmem_range_cachep __read_mostly;
80 
81 #define range_size(range) \
82 	((range)->pgend - (range)->pgstart + 1)
83 
84 #define range_on_lru(range) \
85 	((range)->purged == ASHMEM_NOT_PURGED)
86 
87 #define page_range_subsumes_range(range, start, end) \
88 	(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
89 
90 #define page_range_subsumed_by_range(range, start, end) \
91 	(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
92 
93 #define page_in_range(range, page) \
94 	(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
95 
96 #define page_range_in_range(range, start, end) \
97 	(page_in_range(range, start) || page_in_range(range, end) || \
98 		page_range_subsumes_range(range, start, end))
99 
100 #define range_before_page(range, page) \
101 	((range)->pgend < (page))
102 
103 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
104 
lru_add(struct ashmem_range * range)105 static inline void lru_add(struct ashmem_range *range)
106 {
107 	list_add_tail(&range->lru, &ashmem_lru_list);
108 	lru_count += range_size(range);
109 }
110 
lru_del(struct ashmem_range * range)111 static inline void lru_del(struct ashmem_range *range)
112 {
113 	list_del(&range->lru);
114 	lru_count -= range_size(range);
115 }
116 
117 /*
118  * range_alloc - allocate and initialize a new ashmem_range structure
119  *
120  * 'asma' - associated ashmem_area
121  * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122  * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123  * 'start' - starting page, inclusive
124  * 'end' - ending page, inclusive
125  *
126  * Caller must hold ashmem_mutex.
127  */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end)128 static int range_alloc(struct ashmem_area *asma,
129 		       struct ashmem_range *prev_range, unsigned int purged,
130 		       size_t start, size_t end)
131 {
132 	struct ashmem_range *range;
133 
134 	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
135 	if (unlikely(!range))
136 		return -ENOMEM;
137 
138 	range->asma = asma;
139 	range->pgstart = start;
140 	range->pgend = end;
141 	range->purged = purged;
142 
143 	list_add_tail(&range->unpinned, &prev_range->unpinned);
144 
145 	if (range_on_lru(range))
146 		lru_add(range);
147 
148 	return 0;
149 }
150 
range_del(struct ashmem_range * range)151 static void range_del(struct ashmem_range *range)
152 {
153 	list_del(&range->unpinned);
154 	if (range_on_lru(range))
155 		lru_del(range);
156 	kmem_cache_free(ashmem_range_cachep, range);
157 }
158 
159 /*
160  * range_shrink - shrinks a range
161  *
162  * Caller must hold ashmem_mutex.
163  */
range_shrink(struct ashmem_range * range,size_t start,size_t end)164 static inline void range_shrink(struct ashmem_range *range,
165 				size_t start, size_t end)
166 {
167 	size_t pre = range_size(range);
168 
169 	range->pgstart = start;
170 	range->pgend = end;
171 
172 	if (range_on_lru(range))
173 		lru_count -= pre - range_size(range);
174 }
175 
ashmem_open(struct inode * inode,struct file * file)176 static int ashmem_open(struct inode *inode, struct file *file)
177 {
178 	struct ashmem_area *asma;
179 	int ret;
180 
181 	ret = generic_file_open(inode, file);
182 	if (unlikely(ret))
183 		return ret;
184 
185 	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
186 	if (unlikely(!asma))
187 		return -ENOMEM;
188 
189 	INIT_LIST_HEAD(&asma->unpinned_list);
190 	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
191 	asma->prot_mask = PROT_MASK;
192 	file->private_data = asma;
193 
194 	return 0;
195 }
196 
ashmem_release(struct inode * ignored,struct file * file)197 static int ashmem_release(struct inode *ignored, struct file *file)
198 {
199 	struct ashmem_area *asma = file->private_data;
200 	struct ashmem_range *range, *next;
201 
202 	mutex_lock(&ashmem_mutex);
203 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
204 		range_del(range);
205 	mutex_unlock(&ashmem_mutex);
206 
207 	if (asma->file)
208 		fput(asma->file);
209 	kmem_cache_free(ashmem_area_cachep, asma);
210 
211 	return 0;
212 }
213 
ashmem_read(struct file * file,char __user * buf,size_t len,loff_t * pos)214 static ssize_t ashmem_read(struct file *file, char __user *buf,
215 			   size_t len, loff_t *pos)
216 {
217 	struct ashmem_area *asma = file->private_data;
218 	int ret = 0;
219 
220 	mutex_lock(&ashmem_mutex);
221 
222 	/* If size is not set, or set to 0, always return EOF. */
223 	if (asma->size == 0)
224 		goto out;
225 
226 	if (!asma->file) {
227 		ret = -EBADF;
228 		goto out;
229 	}
230 
231 	ret = asma->file->f_op->read(asma->file, buf, len, pos);
232 	if (ret < 0)
233 		goto out;
234 
235 	/** Update backing file pos, since f_ops->read() doesn't */
236 	asma->file->f_pos = *pos;
237 
238 out:
239 	mutex_unlock(&ashmem_mutex);
240 	return ret;
241 }
242 
ashmem_llseek(struct file * file,loff_t offset,int origin)243 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
244 {
245 	struct ashmem_area *asma = file->private_data;
246 	int ret;
247 
248 	mutex_lock(&ashmem_mutex);
249 
250 	if (asma->size == 0) {
251 		ret = -EINVAL;
252 		goto out;
253 	}
254 
255 	if (!asma->file) {
256 		ret = -EBADF;
257 		goto out;
258 	}
259 
260 	ret = asma->file->f_op->llseek(asma->file, offset, origin);
261 	if (ret < 0)
262 		goto out;
263 
264 	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
265 	file->f_pos = asma->file->f_pos;
266 
267 out:
268 	mutex_unlock(&ashmem_mutex);
269 	return ret;
270 }
271 
calc_vm_may_flags(unsigned long prot)272 static inline unsigned long calc_vm_may_flags(unsigned long prot)
273 {
274 	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
275 	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
276 	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
277 }
278 
ashmem_mmap(struct file * file,struct vm_area_struct * vma)279 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
280 {
281 	struct ashmem_area *asma = file->private_data;
282 	int ret = 0;
283 
284 	mutex_lock(&ashmem_mutex);
285 
286 	/* user needs to SET_SIZE before mapping */
287 	if (unlikely(!asma->size)) {
288 		ret = -EINVAL;
289 		goto out;
290 	}
291 
292 	/* requested protection bits must match our allowed protection mask */
293 	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
294 		     calc_vm_prot_bits(PROT_MASK))) {
295 		ret = -EPERM;
296 		goto out;
297 	}
298 	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
299 
300 	if (!asma->file) {
301 		char *name = ASHMEM_NAME_DEF;
302 		struct file *vmfile;
303 
304 		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
305 			name = asma->name;
306 
307 		/* ... and allocate the backing shmem file */
308 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
309 		if (unlikely(IS_ERR(vmfile))) {
310 			ret = PTR_ERR(vmfile);
311 			goto out;
312 		}
313 		asma->file = vmfile;
314 	}
315 	get_file(asma->file);
316 
317 	/*
318 	 * XXX - Reworked to use shmem_zero_setup() instead of
319 	 * shmem_set_file while we're in staging. -jstultz
320 	 */
321 	if (vma->vm_flags & VM_SHARED) {
322 		ret = shmem_zero_setup(vma);
323 		if (ret) {
324 			fput(asma->file);
325 			goto out;
326 		}
327 	}
328 
329 	if (vma->vm_file)
330 		fput(vma->vm_file);
331 	vma->vm_file = asma->file;
332 	vma->vm_flags |= VM_CAN_NONLINEAR;
333 
334 out:
335 	mutex_unlock(&ashmem_mutex);
336 	return ret;
337 }
338 
339 /*
340  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
341  *
342  * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
343  * many objects (pages) we have in total.
344  *
345  * 'gfp_mask' is the mask of the allocation that got us into this mess.
346  *
347  * Return value is the number of objects (pages) remaining, or -1 if we cannot
348  * proceed without risk of deadlock (due to gfp_mask).
349  *
350  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
351  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
352  * pages freed.
353  */
ashmem_shrink(struct shrinker * s,struct shrink_control * sc)354 static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
355 {
356 	struct ashmem_range *range, *next;
357 
358 	/* We might recurse into filesystem code, so bail out if necessary */
359 	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
360 		return -1;
361 	if (!sc->nr_to_scan)
362 		return lru_count;
363 
364 	mutex_lock(&ashmem_mutex);
365 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
366 		struct inode *inode = range->asma->file->f_dentry->d_inode;
367 		loff_t start = range->pgstart * PAGE_SIZE;
368 		loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
369 
370 		vmtruncate_range(inode, start, end);
371 		range->purged = ASHMEM_WAS_PURGED;
372 		lru_del(range);
373 
374 		sc->nr_to_scan -= range_size(range);
375 		if (sc->nr_to_scan <= 0)
376 			break;
377 	}
378 	mutex_unlock(&ashmem_mutex);
379 
380 	return lru_count;
381 }
382 
383 static struct shrinker ashmem_shrinker = {
384 	.shrink = ashmem_shrink,
385 	.seeks = DEFAULT_SEEKS * 4,
386 };
387 
set_prot_mask(struct ashmem_area * asma,unsigned long prot)388 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
389 {
390 	int ret = 0;
391 
392 	mutex_lock(&ashmem_mutex);
393 
394 	/* the user can only remove, not add, protection bits */
395 	if (unlikely((asma->prot_mask & prot) != prot)) {
396 		ret = -EINVAL;
397 		goto out;
398 	}
399 
400 	/* does the application expect PROT_READ to imply PROT_EXEC? */
401 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
402 		prot |= PROT_EXEC;
403 
404 	asma->prot_mask = prot;
405 
406 out:
407 	mutex_unlock(&ashmem_mutex);
408 	return ret;
409 }
410 
set_name(struct ashmem_area * asma,void __user * name)411 static int set_name(struct ashmem_area *asma, void __user *name)
412 {
413 	int ret = 0;
414 
415 	mutex_lock(&ashmem_mutex);
416 
417 	/* cannot change an existing mapping's name */
418 	if (unlikely(asma->file)) {
419 		ret = -EINVAL;
420 		goto out;
421 	}
422 
423 	if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
424 				    name, ASHMEM_NAME_LEN)))
425 		ret = -EFAULT;
426 	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
427 
428 out:
429 	mutex_unlock(&ashmem_mutex);
430 
431 	return ret;
432 }
433 
get_name(struct ashmem_area * asma,void __user * name)434 static int get_name(struct ashmem_area *asma, void __user *name)
435 {
436 	int ret = 0;
437 
438 	mutex_lock(&ashmem_mutex);
439 	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
440 		size_t len;
441 
442 		/*
443 		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
444 		 * prevents us from revealing one user's stack to another.
445 		 */
446 		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
447 		if (unlikely(copy_to_user(name,
448 				asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
449 			ret = -EFAULT;
450 	} else {
451 		if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
452 					  sizeof(ASHMEM_NAME_DEF))))
453 			ret = -EFAULT;
454 	}
455 	mutex_unlock(&ashmem_mutex);
456 
457 	return ret;
458 }
459 
460 /*
461  * ashmem_pin - pin the given ashmem region, returning whether it was
462  * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
463  *
464  * Caller must hold ashmem_mutex.
465  */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend)466 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
467 {
468 	struct ashmem_range *range, *next;
469 	int ret = ASHMEM_NOT_PURGED;
470 
471 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
472 		/* moved past last applicable page; we can short circuit */
473 		if (range_before_page(range, pgstart))
474 			break;
475 
476 		/*
477 		 * The user can ask us to pin pages that span multiple ranges,
478 		 * or to pin pages that aren't even unpinned, so this is messy.
479 		 *
480 		 * Four cases:
481 		 * 1. The requested range subsumes an existing range, so we
482 		 *    just remove the entire matching range.
483 		 * 2. The requested range overlaps the start of an existing
484 		 *    range, so we just update that range.
485 		 * 3. The requested range overlaps the end of an existing
486 		 *    range, so we just update that range.
487 		 * 4. The requested range punches a hole in an existing range,
488 		 *    so we have to update one side of the range and then
489 		 *    create a new range for the other side.
490 		 */
491 		if (page_range_in_range(range, pgstart, pgend)) {
492 			ret |= range->purged;
493 
494 			/* Case #1: Easy. Just nuke the whole thing. */
495 			if (page_range_subsumes_range(range, pgstart, pgend)) {
496 				range_del(range);
497 				continue;
498 			}
499 
500 			/* Case #2: We overlap from the start, so adjust it */
501 			if (range->pgstart >= pgstart) {
502 				range_shrink(range, pgend + 1, range->pgend);
503 				continue;
504 			}
505 
506 			/* Case #3: We overlap from the rear, so adjust it */
507 			if (range->pgend <= pgend) {
508 				range_shrink(range, range->pgstart, pgstart-1);
509 				continue;
510 			}
511 
512 			/*
513 			 * Case #4: We eat a chunk out of the middle. A bit
514 			 * more complicated, we allocate a new range for the
515 			 * second half and adjust the first chunk's endpoint.
516 			 */
517 			range_alloc(asma, range, range->purged,
518 				    pgend + 1, range->pgend);
519 			range_shrink(range, range->pgstart, pgstart - 1);
520 			break;
521 		}
522 	}
523 
524 	return ret;
525 }
526 
527 /*
528  * ashmem_unpin - unpin the given range of pages. Returns zero on success.
529  *
530  * Caller must hold ashmem_mutex.
531  */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend)532 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
533 {
534 	struct ashmem_range *range, *next;
535 	unsigned int purged = ASHMEM_NOT_PURGED;
536 
537 restart:
538 	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
539 		/* short circuit: this is our insertion point */
540 		if (range_before_page(range, pgstart))
541 			break;
542 
543 		/*
544 		 * The user can ask us to unpin pages that are already entirely
545 		 * or partially pinned. We handle those two cases here.
546 		 */
547 		if (page_range_subsumed_by_range(range, pgstart, pgend))
548 			return 0;
549 		if (page_range_in_range(range, pgstart, pgend)) {
550 			pgstart = min_t(size_t, range->pgstart, pgstart),
551 			pgend = max_t(size_t, range->pgend, pgend);
552 			purged |= range->purged;
553 			range_del(range);
554 			goto restart;
555 		}
556 	}
557 
558 	return range_alloc(asma, range, purged, pgstart, pgend);
559 }
560 
561 /*
562  * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
563  * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
564  *
565  * Caller must hold ashmem_mutex.
566  */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)567 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
568 				 size_t pgend)
569 {
570 	struct ashmem_range *range;
571 	int ret = ASHMEM_IS_PINNED;
572 
573 	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
574 		if (range_before_page(range, pgstart))
575 			break;
576 		if (page_range_in_range(range, pgstart, pgend)) {
577 			ret = ASHMEM_IS_UNPINNED;
578 			break;
579 		}
580 	}
581 
582 	return ret;
583 }
584 
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)585 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
586 			    void __user *p)
587 {
588 	struct ashmem_pin pin;
589 	size_t pgstart, pgend;
590 	int ret = -EINVAL;
591 
592 	if (unlikely(!asma->file))
593 		return -EINVAL;
594 
595 	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
596 		return -EFAULT;
597 
598 	/* per custom, you can pass zero for len to mean "everything onward" */
599 	if (!pin.len)
600 		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
601 
602 	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
603 		return -EINVAL;
604 
605 	if (unlikely(((__u32) -1) - pin.offset < pin.len))
606 		return -EINVAL;
607 
608 	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
609 		return -EINVAL;
610 
611 	pgstart = pin.offset / PAGE_SIZE;
612 	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
613 
614 	mutex_lock(&ashmem_mutex);
615 
616 	switch (cmd) {
617 	case ASHMEM_PIN:
618 		ret = ashmem_pin(asma, pgstart, pgend);
619 		break;
620 	case ASHMEM_UNPIN:
621 		ret = ashmem_unpin(asma, pgstart, pgend);
622 		break;
623 	case ASHMEM_GET_PIN_STATUS:
624 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
625 		break;
626 	}
627 
628 	mutex_unlock(&ashmem_mutex);
629 
630 	return ret;
631 }
632 
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)633 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
634 {
635 	struct ashmem_area *asma = file->private_data;
636 	long ret = -ENOTTY;
637 
638 	switch (cmd) {
639 	case ASHMEM_SET_NAME:
640 		ret = set_name(asma, (void __user *) arg);
641 		break;
642 	case ASHMEM_GET_NAME:
643 		ret = get_name(asma, (void __user *) arg);
644 		break;
645 	case ASHMEM_SET_SIZE:
646 		ret = -EINVAL;
647 		if (!asma->file) {
648 			ret = 0;
649 			asma->size = (size_t) arg;
650 		}
651 		break;
652 	case ASHMEM_GET_SIZE:
653 		ret = asma->size;
654 		break;
655 	case ASHMEM_SET_PROT_MASK:
656 		ret = set_prot_mask(asma, arg);
657 		break;
658 	case ASHMEM_GET_PROT_MASK:
659 		ret = asma->prot_mask;
660 		break;
661 	case ASHMEM_PIN:
662 	case ASHMEM_UNPIN:
663 	case ASHMEM_GET_PIN_STATUS:
664 		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
665 		break;
666 	case ASHMEM_PURGE_ALL_CACHES:
667 		ret = -EPERM;
668 		if (capable(CAP_SYS_ADMIN)) {
669 			struct shrink_control sc = {
670 				.gfp_mask = GFP_KERNEL,
671 				.nr_to_scan = 0,
672 			};
673 			ret = ashmem_shrink(&ashmem_shrinker, &sc);
674 			sc.nr_to_scan = ret;
675 			ashmem_shrink(&ashmem_shrinker, &sc);
676 		}
677 		break;
678 	}
679 
680 	return ret;
681 }
682 
683 static struct file_operations ashmem_fops = {
684 	.owner = THIS_MODULE,
685 	.open = ashmem_open,
686 	.release = ashmem_release,
687 	.read = ashmem_read,
688 	.llseek = ashmem_llseek,
689 	.mmap = ashmem_mmap,
690 	.unlocked_ioctl = ashmem_ioctl,
691 	.compat_ioctl = ashmem_ioctl,
692 };
693 
694 static struct miscdevice ashmem_misc = {
695 	.minor = MISC_DYNAMIC_MINOR,
696 	.name = "ashmem",
697 	.fops = &ashmem_fops,
698 };
699 
ashmem_init(void)700 static int __init ashmem_init(void)
701 {
702 	int ret;
703 
704 	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
705 					  sizeof(struct ashmem_area),
706 					  0, 0, NULL);
707 	if (unlikely(!ashmem_area_cachep)) {
708 		printk(KERN_ERR "ashmem: failed to create slab cache\n");
709 		return -ENOMEM;
710 	}
711 
712 	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
713 					  sizeof(struct ashmem_range),
714 					  0, 0, NULL);
715 	if (unlikely(!ashmem_range_cachep)) {
716 		printk(KERN_ERR "ashmem: failed to create slab cache\n");
717 		return -ENOMEM;
718 	}
719 
720 	ret = misc_register(&ashmem_misc);
721 	if (unlikely(ret)) {
722 		printk(KERN_ERR "ashmem: failed to register misc device!\n");
723 		return ret;
724 	}
725 
726 	register_shrinker(&ashmem_shrinker);
727 
728 	printk(KERN_INFO "ashmem: initialized\n");
729 
730 	return 0;
731 }
732 
ashmem_exit(void)733 static void __exit ashmem_exit(void)
734 {
735 	int ret;
736 
737 	unregister_shrinker(&ashmem_shrinker);
738 
739 	ret = misc_deregister(&ashmem_misc);
740 	if (unlikely(ret))
741 		printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
742 
743 	kmem_cache_destroy(ashmem_range_cachep);
744 	kmem_cache_destroy(ashmem_area_cachep);
745 
746 	printk(KERN_INFO "ashmem: unloaded\n");
747 }
748 
749 module_init(ashmem_init);
750 module_exit(ashmem_exit);
751 
752 MODULE_LICENSE("GPL");
753