2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
20 static inline int is_kernel_rodata(unsigned long addr)
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
30 * Function calls kfree only if @x is not in .rodata section.
32 void kfree_const(const void *x)
34 if (!is_kernel_rodata((unsigned long)x))
37 EXPORT_SYMBOL(kfree_const);
40 * kstrdup - allocate space for and copy an existing string
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 char *kstrdup(const char *s, gfp_t gfp)
53 buf = kmalloc_track_caller(len, gfp);
58 EXPORT_SYMBOL(kstrdup);
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
69 const char *kstrdup_const(const char *s, gfp_t gfp)
71 if (is_kernel_rodata((unsigned long)s))
74 return kstrdup(s, gfp);
76 EXPORT_SYMBOL(kstrdup_const);
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
84 * Note: Use kmemdup_nul() instead if the size is known exactly.
86 char *kstrndup(const char *s, size_t max, gfp_t gfp)
94 len = strnlen(s, max);
95 buf = kmalloc_track_caller(len+1, gfp);
102 EXPORT_SYMBOL(kstrndup);
105 * kmemdup - duplicate region of memory
107 * @src: memory region to duplicate
108 * @len: memory region length
109 * @gfp: GFP mask to use
111 void *kmemdup(const void *src, size_t len, gfp_t gfp)
115 p = kmalloc_track_caller(len, gfp);
120 EXPORT_SYMBOL(kmemdup);
123 * kmemdup_nul - Create a NUL-terminated string from unterminated data
124 * @s: The data to stringify
125 * @len: The size of the data
126 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
128 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
135 buf = kmalloc_track_caller(len + 1, gfp);
142 EXPORT_SYMBOL(kmemdup_nul);
145 * memdup_user - duplicate memory region from user space
147 * @src: source address in user space
148 * @len: number of bytes to copy
150 * Returns an ERR_PTR() on failure.
152 void *memdup_user(const void __user *src, size_t len)
157 * Always use GFP_KERNEL, since copy_from_user() can sleep and
158 * cause pagefault, which makes it pointless to use GFP_NOFS
161 p = kmalloc_track_caller(len, GFP_KERNEL);
163 return ERR_PTR(-ENOMEM);
165 if (copy_from_user(p, src, len)) {
167 return ERR_PTR(-EFAULT);
172 EXPORT_SYMBOL(memdup_user);
175 * strndup_user - duplicate an existing string from user space
176 * @s: The string to duplicate
177 * @n: Maximum number of bytes to copy, including the trailing NUL.
179 char *strndup_user(const char __user *s, long n)
184 length = strnlen_user(s, n);
187 return ERR_PTR(-EFAULT);
190 return ERR_PTR(-EINVAL);
192 p = memdup_user(s, length);
197 p[length - 1] = '\0';
201 EXPORT_SYMBOL(strndup_user);
203 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
204 struct vm_area_struct *prev, struct rb_node *rb_parent)
206 struct vm_area_struct *next;
210 next = prev->vm_next;
215 next = rb_entry(rb_parent,
216 struct vm_area_struct, vm_rb);
225 /* Check if the vma is being used as a stack by this task */
226 int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
228 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
231 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
232 void arch_pick_mmap_layout(struct mm_struct *mm)
234 mm->mmap_base = TASK_UNMAPPED_BASE;
235 mm->get_unmapped_area = arch_get_unmapped_area;
240 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
241 * back to the regular GUP.
242 * If the architecture not support this function, simply return with no
245 int __weak __get_user_pages_fast(unsigned long start,
246 int nr_pages, int write, struct page **pages)
250 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
253 * get_user_pages_fast() - pin user pages in memory
254 * @start: starting user address
255 * @nr_pages: number of pages from start to pin
256 * @write: whether pages will be written to
257 * @pages: array that receives pointers to the pages pinned.
258 * Should be at least nr_pages long.
260 * Returns number of pages pinned. This may be fewer than the number
261 * requested. If nr_pages is 0 or negative, returns 0. If no pages
262 * were pinned, returns -errno.
264 * get_user_pages_fast provides equivalent functionality to get_user_pages,
265 * operating on current and current->mm, with force=0 and vma=NULL. However
266 * unlike get_user_pages, it must be called without mmap_sem held.
268 * get_user_pages_fast may take mmap_sem and page table locks, so no
269 * assumptions can be made about lack of locking. get_user_pages_fast is to be
270 * implemented in a way that is advantageous (vs get_user_pages()) when the
271 * user memory area is already faulted in and present in ptes. However if the
272 * pages have to be faulted in, it may turn out to be slightly slower so
273 * callers need to carefully consider what to use. On many architectures,
274 * get_user_pages_fast simply falls back to get_user_pages.
276 int __weak get_user_pages_fast(unsigned long start,
277 int nr_pages, int write, struct page **pages)
279 struct mm_struct *mm = current->mm;
280 return get_user_pages_unlocked(current, mm, start, nr_pages,
281 pages, write ? FOLL_WRITE : 0);
283 EXPORT_SYMBOL_GPL(get_user_pages_fast);
285 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
286 unsigned long len, unsigned long prot,
287 unsigned long flag, unsigned long pgoff)
290 struct mm_struct *mm = current->mm;
291 unsigned long populate;
293 ret = security_mmap_file(file, prot, flag);
295 down_write(&mm->mmap_sem);
296 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
298 up_write(&mm->mmap_sem);
300 mm_populate(ret, populate);
305 unsigned long vm_mmap(struct file *file, unsigned long addr,
306 unsigned long len, unsigned long prot,
307 unsigned long flag, unsigned long offset)
309 if (unlikely(offset + PAGE_ALIGN(len) < offset))
311 if (unlikely(offset_in_page(offset)))
314 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
316 EXPORT_SYMBOL(vm_mmap);
318 void kvfree(const void *addr)
320 if (is_vmalloc_addr(addr))
325 EXPORT_SYMBOL(kvfree);
327 static inline void *__page_rmapping(struct page *page)
329 unsigned long mapping;
331 mapping = (unsigned long)page->mapping;
332 mapping &= ~PAGE_MAPPING_FLAGS;
334 return (void *)mapping;
337 /* Neutral page->mapping pointer to address_space or anon_vma or other */
338 void *page_rmapping(struct page *page)
340 page = compound_head(page);
341 return __page_rmapping(page);
344 struct anon_vma *page_anon_vma(struct page *page)
346 unsigned long mapping;
348 page = compound_head(page);
349 mapping = (unsigned long)page->mapping;
350 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
352 return __page_rmapping(page);
355 struct address_space *page_mapping(struct page *page)
357 unsigned long mapping;
359 /* This happens if someone calls flush_dcache_page on slab page */
360 if (unlikely(PageSlab(page)))
363 if (unlikely(PageSwapCache(page))) {
366 entry.val = page_private(page);
367 return swap_address_space(entry);
370 mapping = (unsigned long)page->mapping;
371 if (mapping & PAGE_MAPPING_FLAGS)
373 return page->mapping;
376 int overcommit_ratio_handler(struct ctl_table *table, int write,
377 void __user *buffer, size_t *lenp,
382 ret = proc_dointvec(table, write, buffer, lenp, ppos);
383 if (ret == 0 && write)
384 sysctl_overcommit_kbytes = 0;
388 int overcommit_kbytes_handler(struct ctl_table *table, int write,
389 void __user *buffer, size_t *lenp,
394 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
395 if (ret == 0 && write)
396 sysctl_overcommit_ratio = 0;
401 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
403 unsigned long vm_commit_limit(void)
405 unsigned long allowed;
407 if (sysctl_overcommit_kbytes)
408 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
410 allowed = ((totalram_pages - hugetlb_total_pages())
411 * sysctl_overcommit_ratio / 100);
412 allowed += total_swap_pages;
418 * get_cmdline() - copy the cmdline value to a buffer.
419 * @task: the task whose cmdline value to copy.
420 * @buffer: the buffer to copy to.
421 * @buflen: the length of the buffer. Larger cmdline values are truncated
423 * Returns the size of the cmdline field copied. Note that the copy does
424 * not guarantee an ending NULL byte.
426 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
430 struct mm_struct *mm = get_task_mm(task);
431 unsigned long arg_start, arg_end, env_start, env_end;
435 goto out_mm; /* Shh! No looking before we're done */
437 down_read(&mm->mmap_sem);
438 arg_start = mm->arg_start;
439 arg_end = mm->arg_end;
440 env_start = mm->env_start;
441 env_end = mm->env_end;
442 up_read(&mm->mmap_sem);
444 len = arg_end - arg_start;
449 res = access_process_vm(task, arg_start, buffer, len, 0);
452 * If the nul at the end of args has been overwritten, then
453 * assume application is using setproctitle(3).
455 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
456 len = strnlen(buffer, res);
460 len = env_end - env_start;
461 if (len > buflen - res)
463 res += access_process_vm(task, env_start,
465 res = strnlen(buffer, res);