1 // SPDX-License-Identifier: GPL-2.0
4 * Anonymous Shared Memory Subsystem, ashmem
6 * Copyright (C) 2008 Google, Inc.
8 * Robert Love <rlove@google.com>
11 #define pr_fmt(fmt) "ashmem: " fmt
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/personality.h>
24 #include <linux/bitops.h>
25 #include <linux/mutex.h>
26 #include <linux/shmem_fs.h>
29 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
30 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
31 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
34 * struct ashmem_area - The anonymous shared memory area
35 * @name: The optional name in /proc/pid/maps
36 * @unpinned_list: The list of all ashmem areas
37 * @file: The shmem-based backing file
38 * @size: The size of the mapping, in bytes
39 * @prot_mask: The allowed protection bits, as vm_flags
41 * The lifecycle of this structure is from our parent file's open() until
42 * its release(). It is also protected by 'ashmem_mutex'
44 * Warning: Mappings do NOT pin this structure; It dies on close()
47 char name[ASHMEM_FULL_NAME_LEN];
48 struct list_head unpinned_list;
51 unsigned long prot_mask;
55 * struct ashmem_range - A range of unpinned/evictable pages
56 * @lru: The entry in the LRU list
57 * @unpinned: The entry in its area's unpinned list
58 * @asma: The associated anonymous shared memory area.
59 * @pgstart: The starting page (inclusive)
60 * @pgend: The ending page (inclusive)
61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
63 * The lifecycle of this structure is from unpin to pin.
64 * It is protected by 'ashmem_mutex'
68 struct list_head unpinned;
69 struct ashmem_area *asma;
75 /* LRU list of unpinned pages, protected by ashmem_mutex */
76 static LIST_HEAD(ashmem_lru_list);
78 static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
79 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
82 * long lru_count - The count of pages on our LRU list.
84 * This is protected by ashmem_mutex.
86 static unsigned long lru_count;
89 * ashmem_mutex - protects the list of and each individual ashmem_area
91 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
93 static DEFINE_MUTEX(ashmem_mutex);
95 static struct kmem_cache *ashmem_area_cachep __read_mostly;
96 static struct kmem_cache *ashmem_range_cachep __read_mostly;
99 * A separate lockdep class for the backing shmem inodes to resolve the lockdep
100 * warning about the race between kswapd taking fs_reclaim before inode_lock
101 * and write syscall taking inode_lock and then fs_reclaim.
102 * Note that such race is impossible because ashmem does not support write
103 * syscalls operating on the backing shmem.
105 static struct lock_class_key backing_shmem_inode_class;
107 static inline unsigned long range_size(struct ashmem_range *range)
109 return range->pgend - range->pgstart + 1;
112 static inline bool range_on_lru(struct ashmem_range *range)
114 return range->purged == ASHMEM_NOT_PURGED;
117 static inline bool page_range_subsumes_range(struct ashmem_range *range,
118 size_t start, size_t end)
120 return (range->pgstart >= start) && (range->pgend <= end);
123 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
124 size_t start, size_t end)
126 return (range->pgstart <= start) && (range->pgend >= end);
129 static inline bool page_in_range(struct ashmem_range *range, size_t page)
131 return (range->pgstart <= page) && (range->pgend >= page);
134 static inline bool page_range_in_range(struct ashmem_range *range,
135 size_t start, size_t end)
137 return page_in_range(range, start) || page_in_range(range, end) ||
138 page_range_subsumes_range(range, start, end);
141 static inline bool range_before_page(struct ashmem_range *range, size_t page)
143 return range->pgend < page;
146 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
149 * lru_add() - Adds a range of memory to the LRU list
150 * @range: The memory range being added.
152 * The range is first added to the end (tail) of the LRU list.
153 * After this, the size of the range is added to @lru_count
155 static inline void lru_add(struct ashmem_range *range)
157 list_add_tail(&range->lru, &ashmem_lru_list);
158 lru_count += range_size(range);
162 * lru_del() - Removes a range of memory from the LRU list
163 * @range: The memory range being removed
165 * The range is first deleted from the LRU list.
166 * After this, the size of the range is removed from @lru_count
168 static inline void lru_del(struct ashmem_range *range)
170 list_del(&range->lru);
171 lru_count -= range_size(range);
175 * range_alloc() - Allocates and initializes a new ashmem_range structure
176 * @asma: The associated ashmem_area
177 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
178 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
179 * @start: The starting page (inclusive)
180 * @end: The ending page (inclusive)
182 * This function is protected by ashmem_mutex.
184 static void range_alloc(struct ashmem_area *asma,
185 struct ashmem_range *prev_range, unsigned int purged,
186 size_t start, size_t end,
187 struct ashmem_range **new_range)
189 struct ashmem_range *range = *new_range;
193 range->pgstart = start;
195 range->purged = purged;
197 list_add_tail(&range->unpinned, &prev_range->unpinned);
199 if (range_on_lru(range))
204 * range_del() - Deletes and dealloctes an ashmem_range structure
205 * @range: The associated ashmem_range that has previously been allocated
207 static void range_del(struct ashmem_range *range)
209 list_del(&range->unpinned);
210 if (range_on_lru(range))
212 kmem_cache_free(ashmem_range_cachep, range);
216 * range_shrink() - Shrinks an ashmem_range
217 * @range: The associated ashmem_range being shrunk
218 * @start: The starting byte of the new range
219 * @end: The ending byte of the new range
221 * This does not modify the data inside the existing range in any way - It
222 * simply shrinks the boundaries of the range.
224 * Theoretically, with a little tweaking, this could eventually be changed
225 * to range_resize, and expand the lru_count if the new range is larger.
227 static inline void range_shrink(struct ashmem_range *range,
228 size_t start, size_t end)
230 size_t pre = range_size(range);
232 range->pgstart = start;
235 if (range_on_lru(range))
236 lru_count -= pre - range_size(range);
240 * ashmem_open() - Opens an Anonymous Shared Memory structure
241 * @inode: The backing file's index node(?)
242 * @file: The backing file
244 * Please note that the ashmem_area is not returned by this function - It is
245 * instead written to "file->private_data".
247 * Return: 0 if successful, or another code if unsuccessful.
249 static int ashmem_open(struct inode *inode, struct file *file)
251 struct ashmem_area *asma;
254 ret = generic_file_open(inode, file);
258 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
262 INIT_LIST_HEAD(&asma->unpinned_list);
263 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
264 asma->prot_mask = PROT_MASK;
265 file->private_data = asma;
271 * ashmem_release() - Releases an Anonymous Shared Memory structure
272 * @ignored: The backing file's Index Node(?) - It is ignored here.
273 * @file: The backing file
275 * Return: 0 if successful. If it is anything else, go have a coffee and
278 static int ashmem_release(struct inode *ignored, struct file *file)
280 struct ashmem_area *asma = file->private_data;
281 struct ashmem_range *range, *next;
283 mutex_lock(&ashmem_mutex);
284 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
286 mutex_unlock(&ashmem_mutex);
290 kmem_cache_free(ashmem_area_cachep, asma);
295 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
297 struct ashmem_area *asma = iocb->ki_filp->private_data;
300 mutex_lock(&ashmem_mutex);
302 /* If size is not set, or set to 0, always return EOF. */
312 * asma and asma->file are used outside the lock here. We assume
313 * once asma->file is set it will never be changed, and will not
314 * be destroyed until all references to the file are dropped and
315 * ashmem_release is called.
317 mutex_unlock(&ashmem_mutex);
318 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
319 mutex_lock(&ashmem_mutex);
321 asma->file->f_pos = iocb->ki_pos;
323 mutex_unlock(&ashmem_mutex);
327 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
329 struct ashmem_area *asma = file->private_data;
332 mutex_lock(&ashmem_mutex);
334 if (asma->size == 0) {
335 mutex_unlock(&ashmem_mutex);
340 mutex_unlock(&ashmem_mutex);
344 mutex_unlock(&ashmem_mutex);
346 ret = vfs_llseek(asma->file, offset, origin);
350 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
351 file->f_pos = asma->file->f_pos;
355 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
357 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
358 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
359 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
362 static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
364 /* do not allow to mmap ashmem backing shmem file directly */
369 ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
370 unsigned long len, unsigned long pgoff,
373 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
376 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
378 static struct file_operations vmfile_fops;
379 struct ashmem_area *asma = file->private_data;
382 mutex_lock(&ashmem_mutex);
384 /* user needs to SET_SIZE before mapping */
390 /* requested mapping size larger than object size */
391 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
396 /* requested protection bits must match our allowed protection mask */
397 if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
398 calc_vm_prot_bits(PROT_MASK, 0)) {
402 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
405 char *name = ASHMEM_NAME_DEF;
409 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
412 /* ... and allocate the backing shmem file */
413 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
414 if (IS_ERR(vmfile)) {
415 ret = PTR_ERR(vmfile);
418 vmfile->f_mode |= FMODE_LSEEK;
419 inode = file_inode(vmfile);
420 lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
423 * override mmap operation of the vmfile so that it can't be
424 * remapped which would lead to creation of a new vma with no
425 * asma permission checks. Have to override get_unmapped_area
426 * as well to prevent VM_BUG_ON check for f_ops modification.
428 if (!vmfile_fops.mmap) {
429 vmfile_fops = *vmfile->f_op;
430 vmfile_fops.mmap = ashmem_vmfile_mmap;
431 vmfile_fops.get_unmapped_area =
432 ashmem_vmfile_get_unmapped_area;
434 vmfile->f_op = &vmfile_fops;
436 get_file(asma->file);
439 * XXX - Reworked to use shmem_zero_setup() instead of
440 * shmem_set_file while we're in staging. -jstultz
442 if (vma->vm_flags & VM_SHARED) {
443 ret = shmem_zero_setup(vma);
449 vma_set_anonymous(vma);
454 vma->vm_file = asma->file;
457 mutex_unlock(&ashmem_mutex);
462 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
464 * 'nr_to_scan' is the number of objects to scan for freeing.
466 * 'gfp_mask' is the mask of the allocation that got us into this mess.
468 * Return value is the number of objects freed or -1 if we cannot
469 * proceed without risk of deadlock (due to gfp_mask).
471 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
472 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
476 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
478 unsigned long freed = 0;
480 /* We might recurse into filesystem code, so bail out if necessary */
481 if (!(sc->gfp_mask & __GFP_FS))
484 if (!mutex_trylock(&ashmem_mutex))
487 while (!list_empty(&ashmem_lru_list)) {
488 struct ashmem_range *range =
489 list_first_entry(&ashmem_lru_list, typeof(*range), lru);
490 loff_t start = range->pgstart * PAGE_SIZE;
491 loff_t end = (range->pgend + 1) * PAGE_SIZE;
492 struct file *f = range->asma->file;
495 atomic_inc(&ashmem_shrink_inflight);
496 range->purged = ASHMEM_WAS_PURGED;
499 freed += range_size(range);
500 mutex_unlock(&ashmem_mutex);
501 f->f_op->fallocate(f,
502 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
505 if (atomic_dec_and_test(&ashmem_shrink_inflight))
506 wake_up_all(&ashmem_shrink_wait);
507 if (!mutex_trylock(&ashmem_mutex))
509 if (--sc->nr_to_scan <= 0)
512 mutex_unlock(&ashmem_mutex);
518 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
521 * note that lru_count is count of pages on the lru, not a count of
522 * objects on the list. This means the scan function needs to return the
523 * number of pages freed, not the number of objects scanned.
528 static struct shrinker ashmem_shrinker = {
529 .count_objects = ashmem_shrink_count,
530 .scan_objects = ashmem_shrink_scan,
532 * XXX (dchinner): I wish people would comment on why they need on
533 * significant changes to the default value here
535 .seeks = DEFAULT_SEEKS * 4,
538 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
542 mutex_lock(&ashmem_mutex);
544 /* the user can only remove, not add, protection bits */
545 if ((asma->prot_mask & prot) != prot) {
550 /* does the application expect PROT_READ to imply PROT_EXEC? */
551 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
554 asma->prot_mask = prot;
557 mutex_unlock(&ashmem_mutex);
561 static int set_name(struct ashmem_area *asma, void __user *name)
565 char local_name[ASHMEM_NAME_LEN];
568 * Holding the ashmem_mutex while doing a copy_from_user might cause
569 * an data abort which would try to access mmap_sem. If another
570 * thread has invoked ashmem_mmap then it will be holding the
571 * semaphore and will be waiting for ashmem_mutex, there by leading to
572 * deadlock. We'll release the mutex and take the name to a local
573 * variable that does not need protection and later copy the local
574 * variable to the structure member with lock held.
576 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
579 if (len == ASHMEM_NAME_LEN)
580 local_name[ASHMEM_NAME_LEN - 1] = '\0';
581 mutex_lock(&ashmem_mutex);
582 /* cannot change an existing mapping's name */
586 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
588 mutex_unlock(&ashmem_mutex);
592 static int get_name(struct ashmem_area *asma, void __user *name)
597 * Have a local variable to which we'll copy the content
598 * from asma with the lock held. Later we can copy this to the user
599 * space safely without holding any locks. So even if we proceed to
600 * wait for mmap_sem, it won't lead to deadlock.
602 char local_name[ASHMEM_NAME_LEN];
604 mutex_lock(&ashmem_mutex);
605 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
607 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
608 * prevents us from revealing one user's stack to another.
610 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
611 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
613 len = sizeof(ASHMEM_NAME_DEF);
614 memcpy(local_name, ASHMEM_NAME_DEF, len);
616 mutex_unlock(&ashmem_mutex);
619 * Now we are just copying from the stack variable to userland
622 if (copy_to_user(name, local_name, len))
628 * ashmem_pin - pin the given ashmem region, returning whether it was
629 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
631 * Caller must hold ashmem_mutex.
633 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
634 struct ashmem_range **new_range)
636 struct ashmem_range *range, *next;
637 int ret = ASHMEM_NOT_PURGED;
639 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
640 /* moved past last applicable page; we can short circuit */
641 if (range_before_page(range, pgstart))
645 * The user can ask us to pin pages that span multiple ranges,
646 * or to pin pages that aren't even unpinned, so this is messy.
649 * 1. The requested range subsumes an existing range, so we
650 * just remove the entire matching range.
651 * 2. The requested range overlaps the start of an existing
652 * range, so we just update that range.
653 * 3. The requested range overlaps the end of an existing
654 * range, so we just update that range.
655 * 4. The requested range punches a hole in an existing range,
656 * so we have to update one side of the range and then
657 * create a new range for the other side.
659 if (page_range_in_range(range, pgstart, pgend)) {
660 ret |= range->purged;
662 /* Case #1: Easy. Just nuke the whole thing. */
663 if (page_range_subsumes_range(range, pgstart, pgend)) {
668 /* Case #2: We overlap from the start, so adjust it */
669 if (range->pgstart >= pgstart) {
670 range_shrink(range, pgend + 1, range->pgend);
674 /* Case #3: We overlap from the rear, so adjust it */
675 if (range->pgend <= pgend) {
676 range_shrink(range, range->pgstart,
682 * Case #4: We eat a chunk out of the middle. A bit
683 * more complicated, we allocate a new range for the
684 * second half and adjust the first chunk's endpoint.
686 range_alloc(asma, range, range->purged,
687 pgend + 1, range->pgend, new_range);
688 range_shrink(range, range->pgstart, pgstart - 1);
697 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
699 * Caller must hold ashmem_mutex.
701 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
702 struct ashmem_range **new_range)
704 struct ashmem_range *range, *next;
705 unsigned int purged = ASHMEM_NOT_PURGED;
708 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
709 /* short circuit: this is our insertion point */
710 if (range_before_page(range, pgstart))
714 * The user can ask us to unpin pages that are already entirely
715 * or partially pinned. We handle those two cases here.
717 if (page_range_subsumed_by_range(range, pgstart, pgend))
719 if (page_range_in_range(range, pgstart, pgend)) {
720 pgstart = min(range->pgstart, pgstart);
721 pgend = max(range->pgend, pgend);
722 purged |= range->purged;
728 range_alloc(asma, range, purged, pgstart, pgend, new_range);
733 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
734 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
736 * Caller must hold ashmem_mutex.
738 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
741 struct ashmem_range *range;
742 int ret = ASHMEM_IS_PINNED;
744 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
745 if (range_before_page(range, pgstart))
747 if (page_range_in_range(range, pgstart, pgend)) {
748 ret = ASHMEM_IS_UNPINNED;
756 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
759 struct ashmem_pin pin;
760 size_t pgstart, pgend;
762 struct ashmem_range *range = NULL;
764 if (copy_from_user(&pin, p, sizeof(pin)))
767 if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
768 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
773 mutex_lock(&ashmem_mutex);
774 wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
779 /* per custom, you can pass zero for len to mean "everything onward" */
781 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
783 if ((pin.offset | pin.len) & ~PAGE_MASK)
786 if (((__u32)-1) - pin.offset < pin.len)
789 if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
792 pgstart = pin.offset / PAGE_SIZE;
793 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
797 ret = ashmem_pin(asma, pgstart, pgend, &range);
800 ret = ashmem_unpin(asma, pgstart, pgend, &range);
802 case ASHMEM_GET_PIN_STATUS:
803 ret = ashmem_get_pin_status(asma, pgstart, pgend);
808 mutex_unlock(&ashmem_mutex);
810 kmem_cache_free(ashmem_range_cachep, range);
815 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
817 struct ashmem_area *asma = file->private_data;
821 case ASHMEM_SET_NAME:
822 ret = set_name(asma, (void __user *)arg);
824 case ASHMEM_GET_NAME:
825 ret = get_name(asma, (void __user *)arg);
827 case ASHMEM_SET_SIZE:
829 mutex_lock(&ashmem_mutex);
832 asma->size = (size_t)arg;
834 mutex_unlock(&ashmem_mutex);
836 case ASHMEM_GET_SIZE:
839 case ASHMEM_SET_PROT_MASK:
840 ret = set_prot_mask(asma, arg);
842 case ASHMEM_GET_PROT_MASK:
843 ret = asma->prot_mask;
847 case ASHMEM_GET_PIN_STATUS:
848 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
850 case ASHMEM_PURGE_ALL_CACHES:
852 if (capable(CAP_SYS_ADMIN)) {
853 struct shrink_control sc = {
854 .gfp_mask = GFP_KERNEL,
855 .nr_to_scan = LONG_MAX,
857 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
858 ashmem_shrink_scan(&ashmem_shrinker, &sc);
866 /* support of 32bit userspace on 64bit platforms */
868 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
872 case COMPAT_ASHMEM_SET_SIZE:
873 cmd = ASHMEM_SET_SIZE;
875 case COMPAT_ASHMEM_SET_PROT_MASK:
876 cmd = ASHMEM_SET_PROT_MASK;
879 return ashmem_ioctl(file, cmd, arg);
882 #ifdef CONFIG_PROC_FS
883 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
885 struct ashmem_area *asma = file->private_data;
887 mutex_lock(&ashmem_mutex);
890 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
892 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
893 seq_printf(m, "name:\t%s\n",
894 asma->name + ASHMEM_NAME_PREFIX_LEN);
896 mutex_unlock(&ashmem_mutex);
899 static const struct file_operations ashmem_fops = {
900 .owner = THIS_MODULE,
902 .release = ashmem_release,
903 .read_iter = ashmem_read_iter,
904 .llseek = ashmem_llseek,
906 .unlocked_ioctl = ashmem_ioctl,
908 .compat_ioctl = compat_ashmem_ioctl,
910 #ifdef CONFIG_PROC_FS
911 .show_fdinfo = ashmem_show_fdinfo,
915 static struct miscdevice ashmem_misc = {
916 .minor = MISC_DYNAMIC_MINOR,
918 .fops = &ashmem_fops,
921 static int __init ashmem_init(void)
925 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
926 sizeof(struct ashmem_area),
928 if (!ashmem_area_cachep) {
929 pr_err("failed to create slab cache\n");
933 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
934 sizeof(struct ashmem_range),
936 if (!ashmem_range_cachep) {
937 pr_err("failed to create slab cache\n");
941 ret = misc_register(&ashmem_misc);
943 pr_err("failed to register misc device!\n");
947 ret = register_shrinker(&ashmem_shrinker);
949 pr_err("failed to register shrinker!\n");
953 pr_info("initialized\n");
958 misc_deregister(&ashmem_misc);
960 kmem_cache_destroy(ashmem_range_cachep);
962 kmem_cache_destroy(ashmem_area_cachep);
966 device_initcall(ashmem_init);