1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
21 #include <linux/oom.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/debug.h>
30 #include <linux/swap.h>
31 #include <linux/syscalls.h>
32 #include <linux/timex.h>
33 #include <linux/jiffies.h>
34 #include <linux/cpuset.h>
35 #include <linux/export.h>
36 #include <linux/notifier.h>
37 #include <linux/memcontrol.h>
38 #include <linux/mempolicy.h>
39 #include <linux/security.h>
40 #include <linux/ptrace.h>
41 #include <linux/freezer.h>
42 #include <linux/ftrace.h>
43 #include <linux/ratelimit.h>
44 #include <linux/kthread.h>
45 #include <linux/init.h>
46 #include <linux/mmu_notifier.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/oom.h>
55 int sysctl_panic_on_oom;
56 int sysctl_oom_kill_allocating_task;
57 int sysctl_oom_dump_tasks = 1;
60 * Serializes oom killer invocations (out_of_memory()) from all contexts to
61 * prevent from over eager oom killing (e.g. when the oom killer is invoked
62 * from different domains).
64 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
67 DEFINE_MUTEX(oom_lock);
68 /* Serializes oom_score_adj and oom_score_adj_min updates */
69 DEFINE_MUTEX(oom_adj_mutex);
71 static inline bool is_memcg_oom(struct oom_control *oc)
73 return oc->memcg != NULL;
78 * oom_cpuset_eligible() - check task eligibility for kill
79 * @start: task struct of which task to consider
80 * @oc: pointer to struct oom_control
82 * Task eligibility is determined by whether or not a candidate task, @tsk,
83 * shares the same mempolicy nodes as current if it is bound by such a policy
84 * and whether or not it has the same set of allowed cpuset nodes.
86 * This function is assuming oom-killer context and 'current' has triggered
89 static bool oom_cpuset_eligible(struct task_struct *start,
90 struct oom_control *oc)
92 struct task_struct *tsk;
94 const nodemask_t *mask = oc->nodemask;
100 for_each_thread(start, tsk) {
103 * If this is a mempolicy constrained oom, tsk's
104 * cpuset is irrelevant. Only return true if its
105 * mempolicy intersects current, otherwise it may be
108 ret = mempolicy_in_oom_domain(tsk, mask);
111 * This is not a mempolicy constrained oom, so only
112 * check the mems of tsk's cpuset.
114 ret = cpuset_mems_allowed_intersects(current, tsk);
124 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
128 #endif /* CONFIG_NUMA */
131 * The process p may have detached its own ->mm while exiting or through
132 * kthread_use_mm(), but one or more of its subthreads may still have a valid
133 * pointer. Return p, or any of its subthreads with a valid ->mm, with
136 struct task_struct *find_lock_task_mm(struct task_struct *p)
138 struct task_struct *t;
142 for_each_thread(p, t) {
156 * order == -1 means the oom kill is required by sysrq, otherwise only
157 * for display purposes.
159 static inline bool is_sysrq_oom(struct oom_control *oc)
161 return oc->order == -1;
164 /* return true if the task is not adequate as candidate victim task. */
165 static bool oom_unkillable_task(struct task_struct *p)
167 if (is_global_init(p))
169 if (p->flags & PF_KTHREAD)
175 * Check whether unreclaimable slab amount is greater than
176 * all user memory(LRU pages).
177 * dump_unreclaimable_slab() could help in the case that
178 * oom due to too much unreclaimable slab used by kernel.
180 static bool should_dump_unreclaim_slab(void)
182 unsigned long nr_lru;
184 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
185 global_node_page_state(NR_INACTIVE_ANON) +
186 global_node_page_state(NR_ACTIVE_FILE) +
187 global_node_page_state(NR_INACTIVE_FILE) +
188 global_node_page_state(NR_ISOLATED_ANON) +
189 global_node_page_state(NR_ISOLATED_FILE) +
190 global_node_page_state(NR_UNEVICTABLE);
192 return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
196 * oom_badness - heuristic function to determine which candidate task to kill
197 * @p: task struct of which task we should calculate
198 * @totalpages: total present RAM allowed for page allocation
200 * The heuristic for determining which task to kill is made to be as simple and
201 * predictable as possible. The goal is to return the highest value for the
202 * task consuming the most memory to avoid subsequent oom failures.
204 long oom_badness(struct task_struct *p, unsigned long totalpages)
209 if (oom_unkillable_task(p))
212 p = find_lock_task_mm(p);
217 * Do not even consider tasks which are explicitly marked oom
218 * unkillable or have been already oom reaped or the are in
219 * the middle of vfork
221 adj = (long)p->signal->oom_score_adj;
222 if (adj == OOM_SCORE_ADJ_MIN ||
223 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
230 * The baseline for the badness score is the proportion of RAM that each
231 * task's rss, pagetable and swap space use.
233 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
234 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
237 /* Normalize to oom_score_adj units */
238 adj *= totalpages / 1000;
244 static const char * const oom_constraint_text[] = {
245 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
246 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
247 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
248 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
252 * Determine the type of allocation constraint.
254 static enum oom_constraint constrained_alloc(struct oom_control *oc)
258 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
259 bool cpuset_limited = false;
262 if (is_memcg_oom(oc)) {
263 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
264 return CONSTRAINT_MEMCG;
267 /* Default to all available memory */
268 oc->totalpages = totalram_pages() + total_swap_pages;
270 if (!IS_ENABLED(CONFIG_NUMA))
271 return CONSTRAINT_NONE;
274 return CONSTRAINT_NONE;
276 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
277 * to kill current.We have to random task kill in this case.
278 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
280 if (oc->gfp_mask & __GFP_THISNODE)
281 return CONSTRAINT_NONE;
284 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
285 * the page allocator means a mempolicy is in effect. Cpuset policy
286 * is enforced in get_page_from_freelist().
289 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
290 oc->totalpages = total_swap_pages;
291 for_each_node_mask(nid, *oc->nodemask)
292 oc->totalpages += node_present_pages(nid);
293 return CONSTRAINT_MEMORY_POLICY;
296 /* Check this allocation failure is caused by cpuset's wall function */
297 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
298 highest_zoneidx, oc->nodemask)
299 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
300 cpuset_limited = true;
302 if (cpuset_limited) {
303 oc->totalpages = total_swap_pages;
304 for_each_node_mask(nid, cpuset_current_mems_allowed)
305 oc->totalpages += node_present_pages(nid);
306 return CONSTRAINT_CPUSET;
308 return CONSTRAINT_NONE;
311 static int oom_evaluate_task(struct task_struct *task, void *arg)
313 struct oom_control *oc = arg;
316 if (oom_unkillable_task(task))
319 /* p may not have freeable memory in nodemask */
320 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
324 * This task already has access to memory reserves and is being killed.
325 * Don't allow any other task to have access to the reserves unless
326 * the task has MMF_OOM_SKIP because chances that it would release
327 * any memory is quite low.
329 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
330 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
336 * If task is allocating a lot of memory and has been marked to be
337 * killed first if it triggers an oom, then select it.
339 if (oom_task_origin(task)) {
344 points = oom_badness(task, oc->totalpages);
345 if (points == LONG_MIN || points < oc->chosen_points)
350 put_task_struct(oc->chosen);
351 get_task_struct(task);
353 oc->chosen_points = points;
358 put_task_struct(oc->chosen);
359 oc->chosen = (void *)-1UL;
364 * Simple selection loop. We choose the process with the highest number of
365 * 'points'. In case scan was aborted, oc->chosen is set to -1.
367 static void select_bad_process(struct oom_control *oc)
369 oc->chosen_points = LONG_MIN;
371 if (is_memcg_oom(oc))
372 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
374 struct task_struct *p;
378 if (oom_evaluate_task(p, oc))
384 static int dump_task(struct task_struct *p, void *arg)
386 struct oom_control *oc = arg;
387 struct task_struct *task;
389 if (oom_unkillable_task(p))
392 /* p may not have freeable memory in nodemask */
393 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
396 task = find_lock_task_mm(p);
399 * All of p's threads have already detached their mm's. There's
400 * no need to report them; they can't be oom killed anyway.
405 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
406 task->pid, from_kuid(&init_user_ns, task_uid(task)),
407 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
408 mm_pgtables_bytes(task->mm),
409 get_mm_counter(task->mm, MM_SWAPENTS),
410 task->signal->oom_score_adj, task->comm);
417 * dump_tasks - dump current memory state of all system tasks
418 * @oc: pointer to struct oom_control
420 * Dumps the current memory state of all eligible tasks. Tasks not in the same
421 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
423 * State information includes task's pid, uid, tgid, vm size, rss,
424 * pgtables_bytes, swapents, oom_score_adj value, and name.
426 static void dump_tasks(struct oom_control *oc)
428 pr_info("Tasks state (memory values in pages):\n");
429 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
431 if (is_memcg_oom(oc))
432 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
434 struct task_struct *p;
443 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
445 /* one line summary of the oom killer context. */
446 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
447 oom_constraint_text[oc->constraint],
448 nodemask_pr_args(oc->nodemask));
449 cpuset_print_current_mems_allowed();
450 mem_cgroup_print_oom_context(oc->memcg, victim);
451 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
452 from_kuid(&init_user_ns, task_uid(victim)));
455 static void dump_header(struct oom_control *oc, struct task_struct *p)
457 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
458 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
459 current->signal->oom_score_adj);
460 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
461 pr_warn("COMPACTION is disabled!!!\n");
464 if (is_memcg_oom(oc))
465 mem_cgroup_print_oom_meminfo(oc->memcg);
467 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
468 if (should_dump_unreclaim_slab())
469 dump_unreclaimable_slab();
471 if (sysctl_oom_dump_tasks)
474 dump_oom_summary(oc, p);
478 * Number of OOM victims in flight
480 static atomic_t oom_victims = ATOMIC_INIT(0);
481 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
483 static bool oom_killer_disabled __read_mostly;
485 #define K(x) ((x) << (PAGE_SHIFT-10))
488 * task->mm can be NULL if the task is the exited group leader. So to
489 * determine whether the task is using a particular mm, we examine all the
490 * task's threads: if one of those is using this mm then this task was also
493 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
495 struct task_struct *t;
497 for_each_thread(p, t) {
498 struct mm_struct *t_mm = READ_ONCE(t->mm);
507 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
508 * victim (if that is possible) to help the OOM killer to move on.
510 static struct task_struct *oom_reaper_th;
511 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
512 static struct task_struct *oom_reaper_list;
513 static DEFINE_SPINLOCK(oom_reaper_lock);
515 bool __oom_reap_task_mm(struct mm_struct *mm)
517 struct vm_area_struct *vma;
521 * Tell all users of get_user/copy_from_user etc... that the content
522 * is no longer stable. No barriers really needed because unmapping
523 * should imply barriers already and the reader would hit a page fault
524 * if it stumbled over a reaped memory.
526 set_bit(MMF_UNSTABLE, &mm->flags);
528 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
529 if (!can_madv_lru_vma(vma))
533 * Only anonymous pages have a good chance to be dropped
534 * without additional steps which we cannot afford as we
537 * We do not even care about fs backed pages because all
538 * which are reclaimable have already been reclaimed and
539 * we do not want to block exit_mmap by keeping mm ref
540 * count elevated without a good reason.
542 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
543 struct mmu_notifier_range range;
544 struct mmu_gather tlb;
546 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
547 vma, mm, vma->vm_start,
549 tlb_gather_mmu(&tlb, mm);
550 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
551 tlb_finish_mmu(&tlb);
555 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
556 mmu_notifier_invalidate_range_end(&range);
557 tlb_finish_mmu(&tlb);
565 * Reaps the address space of the give task.
567 * Returns true on success and false if none or part of the address space
568 * has been reclaimed and the caller should retry later.
570 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
574 if (!mmap_read_trylock(mm)) {
575 trace_skip_task_reaping(tsk->pid);
580 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
581 * work on the mm anymore. The check for MMF_OOM_SKIP must run
582 * under mmap_lock for reading because it serializes against the
583 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
585 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
586 trace_skip_task_reaping(tsk->pid);
590 trace_start_task_reaping(tsk->pid);
592 /* failed to reap part of the address space. Try again later */
593 ret = __oom_reap_task_mm(mm);
597 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
598 task_pid_nr(tsk), tsk->comm,
599 K(get_mm_counter(mm, MM_ANONPAGES)),
600 K(get_mm_counter(mm, MM_FILEPAGES)),
601 K(get_mm_counter(mm, MM_SHMEMPAGES)));
603 trace_finish_task_reaping(tsk->pid);
605 mmap_read_unlock(mm);
610 #define MAX_OOM_REAP_RETRIES 10
611 static void oom_reap_task(struct task_struct *tsk)
614 struct mm_struct *mm = tsk->signal->oom_mm;
616 /* Retry the mmap_read_trylock(mm) a few times */
617 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
618 schedule_timeout_idle(HZ/10);
620 if (attempts <= MAX_OOM_REAP_RETRIES ||
621 test_bit(MMF_OOM_SKIP, &mm->flags))
624 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
625 task_pid_nr(tsk), tsk->comm);
626 sched_show_task(tsk);
627 debug_show_all_locks();
630 tsk->oom_reaper_list = NULL;
633 * Hide this mm from OOM killer because it has been either reaped or
634 * somebody can't call mmap_write_unlock(mm).
636 set_bit(MMF_OOM_SKIP, &mm->flags);
638 /* Drop a reference taken by queue_oom_reaper */
639 put_task_struct(tsk);
642 static int oom_reaper(void *unused)
647 struct task_struct *tsk = NULL;
649 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
650 spin_lock_irq(&oom_reaper_lock);
651 if (oom_reaper_list != NULL) {
652 tsk = oom_reaper_list;
653 oom_reaper_list = tsk->oom_reaper_list;
655 spin_unlock_irq(&oom_reaper_lock);
664 static void wake_oom_reaper(struct timer_list *timer)
666 struct task_struct *tsk = container_of(timer, struct task_struct,
668 struct mm_struct *mm = tsk->signal->oom_mm;
671 /* The victim managed to terminate on its own - see exit_mmap */
672 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
673 put_task_struct(tsk);
677 spin_lock_irqsave(&oom_reaper_lock, flags);
678 tsk->oom_reaper_list = oom_reaper_list;
679 oom_reaper_list = tsk;
680 spin_unlock_irqrestore(&oom_reaper_lock, flags);
681 trace_wake_reaper(tsk->pid);
682 wake_up(&oom_reaper_wait);
686 * Give the OOM victim time to exit naturally before invoking the oom_reaping.
687 * The timers timeout is arbitrary... the longer it is, the longer the worst
688 * case scenario for the OOM can take. If it is too small, the oom_reaper can
689 * get in the way and release resources needed by the process exit path.
690 * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
691 * before the exit path is able to wake the futex waiters.
693 #define OOM_REAPER_DELAY (2*HZ)
694 static void queue_oom_reaper(struct task_struct *tsk)
696 /* mm is already queued? */
697 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
700 get_task_struct(tsk);
701 timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
702 tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
703 add_timer(&tsk->oom_reaper_timer);
706 static int __init oom_init(void)
708 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
711 subsys_initcall(oom_init)
713 static inline void queue_oom_reaper(struct task_struct *tsk)
716 #endif /* CONFIG_MMU */
719 * mark_oom_victim - mark the given task as OOM victim
722 * Has to be called with oom_lock held and never after
723 * oom has been disabled already.
725 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
726 * under task_lock or operate on the current).
728 static void mark_oom_victim(struct task_struct *tsk)
730 struct mm_struct *mm = tsk->mm;
732 WARN_ON(oom_killer_disabled);
733 /* OOM killer might race with memcg OOM */
734 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
737 /* oom_mm is bound to the signal struct life time. */
738 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
739 mmgrab(tsk->signal->oom_mm);
740 set_bit(MMF_OOM_VICTIM, &mm->flags);
744 * Make sure that the task is woken up from uninterruptible sleep
745 * if it is frozen because OOM killer wouldn't be able to free
746 * any memory and livelock. freezing_slow_path will tell the freezer
747 * that TIF_MEMDIE tasks should be ignored.
750 atomic_inc(&oom_victims);
751 trace_mark_victim(tsk->pid);
755 * exit_oom_victim - note the exit of an OOM victim
757 void exit_oom_victim(void)
759 clear_thread_flag(TIF_MEMDIE);
761 if (!atomic_dec_return(&oom_victims))
762 wake_up_all(&oom_victims_wait);
766 * oom_killer_enable - enable OOM killer
768 void oom_killer_enable(void)
770 oom_killer_disabled = false;
771 pr_info("OOM killer enabled.\n");
775 * oom_killer_disable - disable OOM killer
776 * @timeout: maximum timeout to wait for oom victims in jiffies
778 * Forces all page allocations to fail rather than trigger OOM killer.
779 * Will block and wait until all OOM victims are killed or the given
782 * The function cannot be called when there are runnable user tasks because
783 * the userspace would see unexpected allocation failures as a result. Any
784 * new usage of this function should be consulted with MM people.
786 * Returns true if successful and false if the OOM killer cannot be
789 bool oom_killer_disable(signed long timeout)
794 * Make sure to not race with an ongoing OOM killer. Check that the
795 * current is not killed (possibly due to sharing the victim's memory).
797 if (mutex_lock_killable(&oom_lock))
799 oom_killer_disabled = true;
800 mutex_unlock(&oom_lock);
802 ret = wait_event_interruptible_timeout(oom_victims_wait,
803 !atomic_read(&oom_victims), timeout);
808 pr_info("OOM killer disabled.\n");
813 static inline bool __task_will_free_mem(struct task_struct *task)
815 struct signal_struct *sig = task->signal;
818 * A coredumping process may sleep for an extended period in
819 * coredump_task_exit(), so the oom killer cannot assume that
820 * the process will promptly exit and release memory.
825 if (sig->flags & SIGNAL_GROUP_EXIT)
828 if (thread_group_empty(task) && (task->flags & PF_EXITING))
835 * Checks whether the given task is dying or exiting and likely to
836 * release its address space. This means that all threads and processes
837 * sharing the same mm have to be killed or exiting.
838 * Caller has to make sure that task->mm is stable (hold task_lock or
839 * it operates on the current).
841 static bool task_will_free_mem(struct task_struct *task)
843 struct mm_struct *mm = task->mm;
844 struct task_struct *p;
848 * Skip tasks without mm because it might have passed its exit_mm and
849 * exit_oom_victim. oom_reaper could have rescued that but do not rely
850 * on that for now. We can consider find_lock_task_mm in future.
855 if (!__task_will_free_mem(task))
859 * This task has already been drained by the oom reaper so there are
860 * only small chances it will free some more
862 if (test_bit(MMF_OOM_SKIP, &mm->flags))
865 if (atomic_read(&mm->mm_users) <= 1)
869 * Make sure that all tasks which share the mm with the given tasks
870 * are dying as well to make sure that a) nobody pins its mm and
871 * b) the task is also reapable by the oom reaper.
874 for_each_process(p) {
875 if (!process_shares_mm(p, mm))
877 if (same_thread_group(task, p))
879 ret = __task_will_free_mem(p);
888 static void __oom_kill_process(struct task_struct *victim, const char *message)
890 struct task_struct *p;
891 struct mm_struct *mm;
892 bool can_oom_reap = true;
894 p = find_lock_task_mm(victim);
896 pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
897 message, task_pid_nr(victim), victim->comm);
898 put_task_struct(victim);
900 } else if (victim != p) {
902 put_task_struct(victim);
906 /* Get a reference to safely compare mm after task_unlock(victim) */
910 /* Raise event before sending signal: task reaper must see this */
911 count_vm_event(OOM_KILL);
912 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
915 * We should send SIGKILL before granting access to memory reserves
916 * in order to prevent the OOM victim from depleting the memory
917 * reserves from the user space under its control.
919 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
920 mark_oom_victim(victim);
921 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
922 message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
923 K(get_mm_counter(mm, MM_ANONPAGES)),
924 K(get_mm_counter(mm, MM_FILEPAGES)),
925 K(get_mm_counter(mm, MM_SHMEMPAGES)),
926 from_kuid(&init_user_ns, task_uid(victim)),
927 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
931 * Kill all user processes sharing victim->mm in other thread groups, if
932 * any. They don't get access to memory reserves, though, to avoid
933 * depletion of all memory. This prevents mm->mmap_lock livelock when an
934 * oom killed thread cannot exit because it requires the semaphore and
935 * its contended by another thread trying to allocate memory itself.
936 * That thread will now get access to memory reserves since it has a
937 * pending fatal signal.
940 for_each_process(p) {
941 if (!process_shares_mm(p, mm))
943 if (same_thread_group(p, victim))
945 if (is_global_init(p)) {
946 can_oom_reap = false;
947 set_bit(MMF_OOM_SKIP, &mm->flags);
948 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
949 task_pid_nr(victim), victim->comm,
950 task_pid_nr(p), p->comm);
954 * No kthread_use_mm() user needs to read from the userspace so
955 * we are ok to reap it.
957 if (unlikely(p->flags & PF_KTHREAD))
959 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
964 queue_oom_reaper(victim);
967 put_task_struct(victim);
972 * Kill provided task unless it's secured by setting
973 * oom_score_adj to OOM_SCORE_ADJ_MIN.
975 static int oom_kill_memcg_member(struct task_struct *task, void *message)
977 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
978 !is_global_init(task)) {
979 get_task_struct(task);
980 __oom_kill_process(task, message);
985 static void oom_kill_process(struct oom_control *oc, const char *message)
987 struct task_struct *victim = oc->chosen;
988 struct mem_cgroup *oom_group;
989 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
990 DEFAULT_RATELIMIT_BURST);
993 * If the task is already exiting, don't alarm the sysadmin or kill
994 * its children or threads, just give it access to memory reserves
995 * so it can die quickly
998 if (task_will_free_mem(victim)) {
999 mark_oom_victim(victim);
1000 queue_oom_reaper(victim);
1001 task_unlock(victim);
1002 put_task_struct(victim);
1005 task_unlock(victim);
1007 if (__ratelimit(&oom_rs))
1008 dump_header(oc, victim);
1011 * Do we need to kill the entire memory cgroup?
1012 * Or even one of the ancestor memory cgroups?
1013 * Check this out before killing the victim task.
1015 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1017 __oom_kill_process(victim, message);
1020 * If necessary, kill all tasks in the selected memory cgroup.
1023 memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
1024 mem_cgroup_print_oom_group(oom_group);
1025 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1027 mem_cgroup_put(oom_group);
1032 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1034 static void check_panic_on_oom(struct oom_control *oc)
1036 if (likely(!sysctl_panic_on_oom))
1038 if (sysctl_panic_on_oom != 2) {
1040 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1041 * does not panic for cpuset, mempolicy, or memcg allocation
1044 if (oc->constraint != CONSTRAINT_NONE)
1047 /* Do not panic for oom kills triggered by sysrq */
1048 if (is_sysrq_oom(oc))
1050 dump_header(oc, NULL);
1051 panic("Out of memory: %s panic_on_oom is enabled\n",
1052 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1055 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1057 int register_oom_notifier(struct notifier_block *nb)
1059 return blocking_notifier_chain_register(&oom_notify_list, nb);
1061 EXPORT_SYMBOL_GPL(register_oom_notifier);
1063 int unregister_oom_notifier(struct notifier_block *nb)
1065 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1067 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1070 * out_of_memory - kill the "best" process when we run out of memory
1071 * @oc: pointer to struct oom_control
1073 * If we run out of memory, we have the choice between either
1074 * killing a random task (bad), letting the system crash (worse)
1075 * OR try to be smart about which process to kill. Note that we
1076 * don't have to be perfect here, we just have to be good.
1078 bool out_of_memory(struct oom_control *oc)
1080 unsigned long freed = 0;
1082 if (oom_killer_disabled)
1085 if (!is_memcg_oom(oc)) {
1086 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1087 if (freed > 0 && !is_sysrq_oom(oc))
1088 /* Got some memory back in the last second. */
1093 * If current has a pending SIGKILL or is exiting, then automatically
1094 * select it. The goal is to allow it to allocate so that it may
1095 * quickly exit and free its memory.
1097 if (task_will_free_mem(current)) {
1098 mark_oom_victim(current);
1099 queue_oom_reaper(current);
1104 * The OOM killer does not compensate for IO-less reclaim.
1105 * pagefault_out_of_memory lost its gfp context so we have to
1106 * make sure exclude 0 mask - all other users should have at least
1107 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1108 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1110 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1114 * Check if there were limitations on the allocation (only relevant for
1115 * NUMA and memcg) that may require different handling.
1117 oc->constraint = constrained_alloc(oc);
1118 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1119 oc->nodemask = NULL;
1120 check_panic_on_oom(oc);
1122 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1123 current->mm && !oom_unkillable_task(current) &&
1124 oom_cpuset_eligible(current, oc) &&
1125 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1126 get_task_struct(current);
1127 oc->chosen = current;
1128 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1132 select_bad_process(oc);
1133 /* Found nothing?!?! */
1135 dump_header(oc, NULL);
1136 pr_warn("Out of memory and no killable processes...\n");
1138 * If we got here due to an actual allocation at the
1139 * system level, we cannot survive this and will enter
1140 * an endless loop in the allocator. Bail out now.
1142 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1143 panic("System is deadlocked on memory\n");
1145 if (oc->chosen && oc->chosen != (void *)-1UL)
1146 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1147 "Memory cgroup out of memory");
1148 return !!oc->chosen;
1152 * The pagefault handler calls here because some allocation has failed. We have
1153 * to take care of the memcg OOM here because this is the only safe context without
1154 * any locks held but let the oom killer triggered from the allocation context care
1155 * about the global OOM.
1157 void pagefault_out_of_memory(void)
1159 static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1160 DEFAULT_RATELIMIT_BURST);
1162 if (mem_cgroup_oom_synchronize(true))
1165 if (fatal_signal_pending(current))
1168 if (__ratelimit(&pfoom_rs))
1169 pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1172 SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1175 struct mm_struct *mm = NULL;
1176 struct task_struct *task;
1177 struct task_struct *p;
1178 unsigned int f_flags;
1185 task = pidfd_get_task(pidfd, &f_flags);
1187 return PTR_ERR(task);
1190 * Make sure to choose a thread which still has a reference to mm
1191 * during the group exit
1193 p = find_lock_task_mm(task);
1202 if (task_will_free_mem(p))
1205 /* Error only if the work has not been done already */
1206 if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1214 if (mmap_read_lock_killable(mm)) {
1219 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1220 * possible change in exit_mmap is seen
1222 if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
1224 mmap_read_unlock(mm);
1229 put_task_struct(task);
1233 #endif /* CONFIG_MMU */