4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
10 * The routines in this file are used to kill a process when
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
20 #include <linux/oom.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/coredump.h>
27 #include <linux/sched/task.h>
28 #include <linux/swap.h>
29 #include <linux/timex.h>
30 #include <linux/jiffies.h>
31 #include <linux/cpuset.h>
32 #include <linux/export.h>
33 #include <linux/notifier.h>
34 #include <linux/memcontrol.h>
35 #include <linux/mempolicy.h>
36 #include <linux/security.h>
37 #include <linux/ptrace.h>
38 #include <linux/freezer.h>
39 #include <linux/ftrace.h>
40 #include <linux/ratelimit.h>
41 #include <linux/kthread.h>
42 #include <linux/init.h>
43 #include <linux/mmu_notifier.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/oom.h>
51 int sysctl_panic_on_oom;
52 int sysctl_oom_kill_allocating_task;
53 int sysctl_oom_dump_tasks = 1;
55 DEFINE_MUTEX(oom_lock);
56 /* Serializes oom_score_adj and oom_score_adj_min updates */
57 DEFINE_MUTEX(oom_adj_mutex);
61 * has_intersects_mems_allowed() - check task eligiblity for kill
62 * @start: task struct of which task to consider
63 * @mask: nodemask passed to page allocator for mempolicy ooms
65 * Task eligibility is determined by whether or not a candidate task, @tsk,
66 * shares the same mempolicy nodes as current if it is bound by such a policy
67 * and whether or not it has the same set of allowed cpuset nodes.
69 static bool has_intersects_mems_allowed(struct task_struct *start,
70 const nodemask_t *mask)
72 struct task_struct *tsk;
76 for_each_thread(start, tsk) {
79 * If this is a mempolicy constrained oom, tsk's
80 * cpuset is irrelevant. Only return true if its
81 * mempolicy intersects current, otherwise it may be
84 ret = mempolicy_nodemask_intersects(tsk, mask);
87 * This is not a mempolicy constrained oom, so only
88 * check the mems of tsk's cpuset.
90 ret = cpuset_mems_allowed_intersects(current, tsk);
100 static bool has_intersects_mems_allowed(struct task_struct *tsk,
101 const nodemask_t *mask)
105 #endif /* CONFIG_NUMA */
108 * The process p may have detached its own ->mm while exiting or through
109 * use_mm(), but one or more of its subthreads may still have a valid
110 * pointer. Return p, or any of its subthreads with a valid ->mm, with
113 struct task_struct *find_lock_task_mm(struct task_struct *p)
115 struct task_struct *t;
119 for_each_thread(p, t) {
133 * order == -1 means the oom kill is required by sysrq, otherwise only
134 * for display purposes.
136 static inline bool is_sysrq_oom(struct oom_control *oc)
138 return oc->order == -1;
141 static inline bool is_memcg_oom(struct oom_control *oc)
143 return oc->memcg != NULL;
146 /* return true if the task is not adequate as candidate victim task. */
147 static bool oom_unkillable_task(struct task_struct *p,
148 struct mem_cgroup *memcg, const nodemask_t *nodemask)
150 if (is_global_init(p))
152 if (p->flags & PF_KTHREAD)
155 /* When mem_cgroup_out_of_memory() and p is not member of the group */
156 if (memcg && !task_in_mem_cgroup(p, memcg))
159 /* p may not have freeable memory in nodemask */
160 if (!has_intersects_mems_allowed(p, nodemask))
167 * oom_badness - heuristic function to determine which candidate task to kill
168 * @p: task struct of which task we should calculate
169 * @totalpages: total present RAM allowed for page allocation
171 * The heuristic for determining which task to kill is made to be as simple and
172 * predictable as possible. The goal is to return the highest value for the
173 * task consuming the most memory to avoid subsequent oom failures.
175 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
176 const nodemask_t *nodemask, unsigned long totalpages)
181 if (oom_unkillable_task(p, memcg, nodemask))
184 p = find_lock_task_mm(p);
189 * Do not even consider tasks which are explicitly marked oom
190 * unkillable or have been already oom reaped or the are in
191 * the middle of vfork
193 adj = (long)p->signal->oom_score_adj;
194 if (adj == OOM_SCORE_ADJ_MIN ||
195 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
202 * The baseline for the badness score is the proportion of RAM that each
203 * task's rss, pagetable and swap space use.
205 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
206 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
210 * Root processes get 3% bonus, just like the __vm_enough_memory()
211 * implementation used by LSMs.
213 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
214 points -= (points * 3) / 100;
216 /* Normalize to oom_score_adj units */
217 adj *= totalpages / 1000;
221 * Never return 0 for an eligible task regardless of the root bonus and
222 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
224 return points > 0 ? points : 1;
227 enum oom_constraint {
230 CONSTRAINT_MEMORY_POLICY,
235 * Determine the type of allocation constraint.
237 static enum oom_constraint constrained_alloc(struct oom_control *oc)
241 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
242 bool cpuset_limited = false;
245 if (is_memcg_oom(oc)) {
246 oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
247 return CONSTRAINT_MEMCG;
250 /* Default to all available memory */
251 oc->totalpages = totalram_pages + total_swap_pages;
253 if (!IS_ENABLED(CONFIG_NUMA))
254 return CONSTRAINT_NONE;
257 return CONSTRAINT_NONE;
259 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
260 * to kill current.We have to random task kill in this case.
261 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
263 if (oc->gfp_mask & __GFP_THISNODE)
264 return CONSTRAINT_NONE;
267 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
268 * the page allocator means a mempolicy is in effect. Cpuset policy
269 * is enforced in get_page_from_freelist().
272 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
273 oc->totalpages = total_swap_pages;
274 for_each_node_mask(nid, *oc->nodemask)
275 oc->totalpages += node_spanned_pages(nid);
276 return CONSTRAINT_MEMORY_POLICY;
279 /* Check this allocation failure is caused by cpuset's wall function */
280 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
281 high_zoneidx, oc->nodemask)
282 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
283 cpuset_limited = true;
285 if (cpuset_limited) {
286 oc->totalpages = total_swap_pages;
287 for_each_node_mask(nid, cpuset_current_mems_allowed)
288 oc->totalpages += node_spanned_pages(nid);
289 return CONSTRAINT_CPUSET;
291 return CONSTRAINT_NONE;
294 static int oom_evaluate_task(struct task_struct *task, void *arg)
296 struct oom_control *oc = arg;
297 unsigned long points;
299 if (oom_unkillable_task(task, NULL, oc->nodemask))
303 * This task already has access to memory reserves and is being killed.
304 * Don't allow any other task to have access to the reserves unless
305 * the task has MMF_OOM_SKIP because chances that it would release
306 * any memory is quite low.
308 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
309 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
315 * If task is allocating a lot of memory and has been marked to be
316 * killed first if it triggers an oom, then select it.
318 if (oom_task_origin(task)) {
323 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
324 if (!points || points < oc->chosen_points)
327 /* Prefer thread group leaders for display purposes */
328 if (points == oc->chosen_points && thread_group_leader(oc->chosen))
332 put_task_struct(oc->chosen);
333 get_task_struct(task);
335 oc->chosen_points = points;
340 put_task_struct(oc->chosen);
341 oc->chosen = (void *)-1UL;
346 * Simple selection loop. We choose the process with the highest number of
347 * 'points'. In case scan was aborted, oc->chosen is set to -1.
349 static void select_bad_process(struct oom_control *oc)
351 if (is_memcg_oom(oc))
352 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
354 struct task_struct *p;
358 if (oom_evaluate_task(p, oc))
363 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
367 * dump_tasks - dump current memory state of all system tasks
368 * @memcg: current's memory controller, if constrained
369 * @nodemask: nodemask passed to page allocator for mempolicy ooms
371 * Dumps the current memory state of all eligible tasks. Tasks not in the same
372 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
374 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
375 * swapents, oom_score_adj value, and name.
377 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
379 struct task_struct *p;
380 struct task_struct *task;
382 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
384 for_each_process(p) {
385 if (oom_unkillable_task(p, memcg, nodemask))
388 task = find_lock_task_mm(p);
391 * This is a kthread or all of p's threads have already
392 * detached their mm's. There's no need to report
393 * them; they can't be oom killed anyway.
398 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
399 task->pid, from_kuid(&init_user_ns, task_uid(task)),
400 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
401 atomic_long_read(&task->mm->nr_ptes),
402 mm_nr_pmds(task->mm),
403 get_mm_counter(task->mm, MM_SWAPENTS),
404 task->signal->oom_score_adj, task->comm);
410 static void dump_header(struct oom_control *oc, struct task_struct *p)
412 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=",
413 current->comm, oc->gfp_mask, &oc->gfp_mask);
415 pr_cont("%*pbl", nodemask_pr_args(oc->nodemask));
418 pr_cont(", order=%d, oom_score_adj=%hd\n",
419 oc->order, current->signal->oom_score_adj);
420 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
421 pr_warn("COMPACTION is disabled!!!\n");
423 cpuset_print_current_mems_allowed();
426 mem_cgroup_print_oom_info(oc->memcg, p);
428 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
429 if (sysctl_oom_dump_tasks)
430 dump_tasks(oc->memcg, oc->nodemask);
434 * Number of OOM victims in flight
436 static atomic_t oom_victims = ATOMIC_INIT(0);
437 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
439 static bool oom_killer_disabled __read_mostly;
441 #define K(x) ((x) << (PAGE_SHIFT-10))
444 * task->mm can be NULL if the task is the exited group leader. So to
445 * determine whether the task is using a particular mm, we examine all the
446 * task's threads: if one of those is using this mm then this task was also
449 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
451 struct task_struct *t;
453 for_each_thread(p, t) {
454 struct mm_struct *t_mm = READ_ONCE(t->mm);
463 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
464 * victim (if that is possible) to help the OOM killer to move on.
466 static struct task_struct *oom_reaper_th;
467 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
468 static struct task_struct *oom_reaper_list;
469 static DEFINE_SPINLOCK(oom_reaper_lock);
471 void __oom_reap_task_mm(struct mm_struct *mm)
473 struct vm_area_struct *vma;
476 * Tell all users of get_user/copy_from_user etc... that the content
477 * is no longer stable. No barriers really needed because unmapping
478 * should imply barriers already and the reader would hit a page fault
479 * if it stumbled over a reaped memory.
481 set_bit(MMF_UNSTABLE, &mm->flags);
483 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
484 if (!can_madv_dontneed_vma(vma))
488 * Only anonymous pages have a good chance to be dropped
489 * without additional steps which we cannot afford as we
492 * We do not even care about fs backed pages because all
493 * which are reclaimable have already been reclaimed and
494 * we do not want to block exit_mmap by keeping mm ref
495 * count elevated without a good reason.
497 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
498 struct mmu_gather tlb;
500 tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
501 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
503 tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
508 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
513 * We have to make sure to not race with the victim exit path
514 * and cause premature new oom victim selection:
515 * oom_reap_task_mm exit_mm
518 * atomic_dec_and_test
523 * # no TIF_MEMDIE task selects new victim
524 * unmap_page_range # frees some memory
526 mutex_lock(&oom_lock);
528 if (!down_read_trylock(&mm->mmap_sem)) {
530 trace_skip_task_reaping(tsk->pid);
535 * If the mm has notifiers then we would need to invalidate them around
536 * unmap_page_range and that is risky because notifiers can sleep and
537 * what they do is basically undeterministic. So let's have a short
538 * sleep to give the oom victim some more time.
539 * TODO: we really want to get rid of this ugly hack and make sure that
540 * notifiers cannot block for unbounded amount of time and add
541 * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
543 if (mm_has_notifiers(mm)) {
544 up_read(&mm->mmap_sem);
545 schedule_timeout_idle(HZ);
550 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
551 * work on the mm anymore. The check for MMF_OOM_SKIP must run
552 * under mmap_sem for reading because it serializes against the
553 * down_write();up_write() cycle in exit_mmap().
555 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
556 up_read(&mm->mmap_sem);
557 trace_skip_task_reaping(tsk->pid);
561 trace_start_task_reaping(tsk->pid);
563 __oom_reap_task_mm(mm);
565 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
566 task_pid_nr(tsk), tsk->comm,
567 K(get_mm_counter(mm, MM_ANONPAGES)),
568 K(get_mm_counter(mm, MM_FILEPAGES)),
569 K(get_mm_counter(mm, MM_SHMEMPAGES)));
570 up_read(&mm->mmap_sem);
572 trace_finish_task_reaping(tsk->pid);
574 mutex_unlock(&oom_lock);
578 #define MAX_OOM_REAP_RETRIES 10
579 static void oom_reap_task(struct task_struct *tsk)
582 struct mm_struct *mm = tsk->signal->oom_mm;
584 /* Retry the down_read_trylock(mmap_sem) a few times */
585 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
586 schedule_timeout_idle(HZ/10);
588 if (attempts <= MAX_OOM_REAP_RETRIES)
591 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
592 task_pid_nr(tsk), tsk->comm);
593 debug_show_all_locks();
596 tsk->oom_reaper_list = NULL;
599 * Hide this mm from OOM killer because it has been either reaped or
600 * somebody can't call up_write(mmap_sem).
602 set_bit(MMF_OOM_SKIP, &mm->flags);
604 /* Drop a reference taken by wake_oom_reaper */
605 put_task_struct(tsk);
608 static int oom_reaper(void *unused)
611 struct task_struct *tsk = NULL;
613 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
614 spin_lock(&oom_reaper_lock);
615 if (oom_reaper_list != NULL) {
616 tsk = oom_reaper_list;
617 oom_reaper_list = tsk->oom_reaper_list;
619 spin_unlock(&oom_reaper_lock);
628 static void wake_oom_reaper(struct task_struct *tsk)
633 /* mm is already queued? */
634 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
637 get_task_struct(tsk);
639 spin_lock(&oom_reaper_lock);
640 tsk->oom_reaper_list = oom_reaper_list;
641 oom_reaper_list = tsk;
642 spin_unlock(&oom_reaper_lock);
643 trace_wake_reaper(tsk->pid);
644 wake_up(&oom_reaper_wait);
647 static int __init oom_init(void)
649 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
650 if (IS_ERR(oom_reaper_th)) {
651 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
652 PTR_ERR(oom_reaper_th));
653 oom_reaper_th = NULL;
657 subsys_initcall(oom_init)
659 static inline void wake_oom_reaper(struct task_struct *tsk)
662 #endif /* CONFIG_MMU */
665 * mark_oom_victim - mark the given task as OOM victim
668 * Has to be called with oom_lock held and never after
669 * oom has been disabled already.
671 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
672 * under task_lock or operate on the current).
674 static void mark_oom_victim(struct task_struct *tsk)
676 struct mm_struct *mm = tsk->mm;
678 WARN_ON(oom_killer_disabled);
679 /* OOM killer might race with memcg OOM */
680 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
683 /* oom_mm is bound to the signal struct life time. */
684 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
685 mmgrab(tsk->signal->oom_mm);
686 set_bit(MMF_OOM_VICTIM, &mm->flags);
690 * Make sure that the task is woken up from uninterruptible sleep
691 * if it is frozen because OOM killer wouldn't be able to free
692 * any memory and livelock. freezing_slow_path will tell the freezer
693 * that TIF_MEMDIE tasks should be ignored.
696 atomic_inc(&oom_victims);
697 trace_mark_victim(tsk->pid);
701 * exit_oom_victim - note the exit of an OOM victim
703 void exit_oom_victim(void)
705 clear_thread_flag(TIF_MEMDIE);
707 if (!atomic_dec_return(&oom_victims))
708 wake_up_all(&oom_victims_wait);
712 * oom_killer_enable - enable OOM killer
714 void oom_killer_enable(void)
716 oom_killer_disabled = false;
717 pr_info("OOM killer enabled.\n");
721 * oom_killer_disable - disable OOM killer
722 * @timeout: maximum timeout to wait for oom victims in jiffies
724 * Forces all page allocations to fail rather than trigger OOM killer.
725 * Will block and wait until all OOM victims are killed or the given
728 * The function cannot be called when there are runnable user tasks because
729 * the userspace would see unexpected allocation failures as a result. Any
730 * new usage of this function should be consulted with MM people.
732 * Returns true if successful and false if the OOM killer cannot be
735 bool oom_killer_disable(signed long timeout)
740 * Make sure to not race with an ongoing OOM killer. Check that the
741 * current is not killed (possibly due to sharing the victim's memory).
743 if (mutex_lock_killable(&oom_lock))
745 oom_killer_disabled = true;
746 mutex_unlock(&oom_lock);
748 ret = wait_event_interruptible_timeout(oom_victims_wait,
749 !atomic_read(&oom_victims), timeout);
754 pr_info("OOM killer disabled.\n");
759 static inline bool __task_will_free_mem(struct task_struct *task)
761 struct signal_struct *sig = task->signal;
764 * A coredumping process may sleep for an extended period in exit_mm(),
765 * so the oom killer cannot assume that the process will promptly exit
766 * and release memory.
768 if (sig->flags & SIGNAL_GROUP_COREDUMP)
771 if (sig->flags & SIGNAL_GROUP_EXIT)
774 if (thread_group_empty(task) && (task->flags & PF_EXITING))
781 * Checks whether the given task is dying or exiting and likely to
782 * release its address space. This means that all threads and processes
783 * sharing the same mm have to be killed or exiting.
784 * Caller has to make sure that task->mm is stable (hold task_lock or
785 * it operates on the current).
787 static bool task_will_free_mem(struct task_struct *task)
789 struct mm_struct *mm = task->mm;
790 struct task_struct *p;
794 * Skip tasks without mm because it might have passed its exit_mm and
795 * exit_oom_victim. oom_reaper could have rescued that but do not rely
796 * on that for now. We can consider find_lock_task_mm in future.
801 if (!__task_will_free_mem(task))
805 * This task has already been drained by the oom reaper so there are
806 * only small chances it will free some more
808 if (test_bit(MMF_OOM_SKIP, &mm->flags))
811 if (atomic_read(&mm->mm_users) <= 1)
815 * Make sure that all tasks which share the mm with the given tasks
816 * are dying as well to make sure that a) nobody pins its mm and
817 * b) the task is also reapable by the oom reaper.
820 for_each_process(p) {
821 if (!process_shares_mm(p, mm))
823 if (same_thread_group(task, p))
825 ret = __task_will_free_mem(p);
834 static void oom_kill_process(struct oom_control *oc, const char *message)
836 struct task_struct *p = oc->chosen;
837 unsigned int points = oc->chosen_points;
838 struct task_struct *victim = p;
839 struct task_struct *child;
840 struct task_struct *t;
841 struct mm_struct *mm;
842 unsigned int victim_points = 0;
843 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
844 DEFAULT_RATELIMIT_BURST);
845 bool can_oom_reap = true;
848 * If the task is already exiting, don't alarm the sysadmin or kill
849 * its children or threads, just give it access to memory reserves
850 * so it can die quickly
853 if (task_will_free_mem(p)) {
862 if (__ratelimit(&oom_rs))
865 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
866 message, task_pid_nr(p), p->comm, points);
869 * If any of p's children has a different mm and is eligible for kill,
870 * the one with the highest oom_badness() score is sacrificed for its
871 * parent. This attempts to lose the minimal amount of work done while
872 * still freeing memory.
874 read_lock(&tasklist_lock);
877 * The task 'p' might have already exited before reaching here. The
878 * put_task_struct() will free task_struct 'p' while the loop still try
879 * to access the field of 'p', so, get an extra reference.
882 for_each_thread(p, t) {
883 list_for_each_entry(child, &t->children, sibling) {
884 unsigned int child_points;
886 if (process_shares_mm(child, p->mm))
889 * oom_badness() returns 0 if the thread is unkillable
891 child_points = oom_badness(child,
892 oc->memcg, oc->nodemask, oc->totalpages);
893 if (child_points > victim_points) {
894 put_task_struct(victim);
896 victim_points = child_points;
897 get_task_struct(victim);
902 read_unlock(&tasklist_lock);
904 p = find_lock_task_mm(victim);
906 put_task_struct(victim);
908 } else if (victim != p) {
910 put_task_struct(victim);
914 /* Get a reference to safely compare mm after task_unlock(victim) */
918 /* Raise event before sending signal: task reaper must see this */
919 count_vm_event(OOM_KILL);
920 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
923 * We should send SIGKILL before granting access to memory reserves
924 * in order to prevent the OOM victim from depleting the memory
925 * reserves from the user space under its control.
927 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
928 mark_oom_victim(victim);
929 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
930 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
931 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
932 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
933 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
937 * Kill all user processes sharing victim->mm in other thread groups, if
938 * any. They don't get access to memory reserves, though, to avoid
939 * depletion of all memory. This prevents mm->mmap_sem livelock when an
940 * oom killed thread cannot exit because it requires the semaphore and
941 * its contended by another thread trying to allocate memory itself.
942 * That thread will now get access to memory reserves since it has a
943 * pending fatal signal.
946 for_each_process(p) {
947 if (!process_shares_mm(p, mm))
949 if (same_thread_group(p, victim))
951 if (is_global_init(p)) {
952 can_oom_reap = false;
953 set_bit(MMF_OOM_SKIP, &mm->flags);
954 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
955 task_pid_nr(victim), victim->comm,
956 task_pid_nr(p), p->comm);
960 * No use_mm() user needs to read from the userspace so we are
963 if (unlikely(p->flags & PF_KTHREAD))
965 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
970 wake_oom_reaper(victim);
973 put_task_struct(victim);
978 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
980 static void check_panic_on_oom(struct oom_control *oc,
981 enum oom_constraint constraint)
983 if (likely(!sysctl_panic_on_oom))
985 if (sysctl_panic_on_oom != 2) {
987 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
988 * does not panic for cpuset, mempolicy, or memcg allocation
991 if (constraint != CONSTRAINT_NONE)
994 /* Do not panic for oom kills triggered by sysrq */
995 if (is_sysrq_oom(oc))
997 dump_header(oc, NULL);
998 panic("Out of memory: %s panic_on_oom is enabled\n",
999 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1002 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1004 int register_oom_notifier(struct notifier_block *nb)
1006 return blocking_notifier_chain_register(&oom_notify_list, nb);
1008 EXPORT_SYMBOL_GPL(register_oom_notifier);
1010 int unregister_oom_notifier(struct notifier_block *nb)
1012 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1014 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1017 * out_of_memory - kill the "best" process when we run out of memory
1018 * @oc: pointer to struct oom_control
1020 * If we run out of memory, we have the choice between either
1021 * killing a random task (bad), letting the system crash (worse)
1022 * OR try to be smart about which process to kill. Note that we
1023 * don't have to be perfect here, we just have to be good.
1025 bool out_of_memory(struct oom_control *oc)
1027 unsigned long freed = 0;
1028 enum oom_constraint constraint = CONSTRAINT_NONE;
1030 if (oom_killer_disabled)
1033 if (!is_memcg_oom(oc)) {
1034 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1036 /* Got some memory back in the last second. */
1041 * If current has a pending SIGKILL or is exiting, then automatically
1042 * select it. The goal is to allow it to allocate so that it may
1043 * quickly exit and free its memory.
1045 if (task_will_free_mem(current)) {
1046 mark_oom_victim(current);
1047 wake_oom_reaper(current);
1052 * The OOM killer does not compensate for IO-less reclaim.
1053 * pagefault_out_of_memory lost its gfp context so we have to
1054 * make sure exclude 0 mask - all other users should have at least
1055 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1056 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1058 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1062 * Check if there were limitations on the allocation (only relevant for
1063 * NUMA and memcg) that may require different handling.
1065 constraint = constrained_alloc(oc);
1066 if (constraint != CONSTRAINT_MEMORY_POLICY)
1067 oc->nodemask = NULL;
1068 check_panic_on_oom(oc, constraint);
1070 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1071 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1072 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1073 get_task_struct(current);
1074 oc->chosen = current;
1075 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1079 select_bad_process(oc);
1080 /* Found nothing?!?! Either we hang forever, or we panic. */
1081 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1082 dump_header(oc, NULL);
1083 panic("Out of memory and no killable processes...\n");
1085 if (oc->chosen && oc->chosen != (void *)-1UL) {
1086 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1087 "Memory cgroup out of memory");
1089 * Give the killed process a good chance to exit before trying
1090 * to allocate memory again.
1092 schedule_timeout_killable(1);
1094 return !!oc->chosen;
1098 * The pagefault handler calls here because it is out of memory, so kill a
1099 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1100 * killing is already in progress so do nothing.
1102 void pagefault_out_of_memory(void)
1104 struct oom_control oc = {
1112 if (mem_cgroup_oom_synchronize(true))
1115 if (!mutex_trylock(&oom_lock))
1118 mutex_unlock(&oom_lock);