Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torvalds/linux.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3291e82d4352..5340f6b91312 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -258,8 +258,6 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
bool force_kill)
{
- if (task->exit_state)
- return OOM_SCAN_CONTINUE;
if (oom_unkillable_task(task, NULL, nodemask))
return OOM_SCAN_CONTINUE;
@@ -406,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
dump_tasks(memcg, nodemask);
}
+/*
+ * Number of OOM killer invocations (including memcg OOM killer).
+ * Primarily used by PM freezer to check for potential races with
+ * OOM killed frozen task.
+ */
+static atomic_t oom_kills = ATOMIC_INIT(0);
+
+int oom_kills_count(void)
+{
+ return atomic_read(&oom_kills);
+}
+
+void note_oom_kill(void)
+{
+ atomic_inc(&oom_kills);
+}
+
#define K(x) ((x) << (PAGE_SHIFT-10))
/*
* Must be called while holding a reference to p, which will be released upon
@@ -559,28 +574,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
* if a parallel OOM killing is already taking place that includes a zone in
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
*/
-int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
+bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
- int ret = 1;
+ bool ret = true;
spin_lock(&zone_scan_lock);
- for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
- if (zone_is_oom_locked(zone)) {
- ret = 0;
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
+ if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) {
+ ret = false;
goto out;
}
- }
- for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
- /*
- * Lock each zone in the zonelist under zone_scan_lock so a
- * parallel invocation of try_set_zonelist_oom() doesn't succeed
- * when it shouldn't.
- */
- zone_set_flag(zone, ZONE_OOM_LOCKED);
- }
+ /*
+ * Lock each zone in the zonelist under zone_scan_lock so a parallel
+ * call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
+ */
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
+ set_bit(ZONE_OOM_LOCKED, &zone->flags);
out:
spin_unlock(&zone_scan_lock);
@@ -592,15 +604,14 @@ out:
* allocation attempts with zonelists containing them may now recall the OOM
* killer, if necessary.
*/
-void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
+void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
spin_lock(&zone_scan_lock);
- for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
- zone_clear_flag(zone, ZONE_OOM_LOCKED);
- }
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
+ clear_bit(ZONE_OOM_LOCKED, &zone->flags);
spin_unlock(&zone_scan_lock);
}
@@ -694,9 +705,9 @@ void pagefault_out_of_memory(void)
if (mem_cgroup_oom_synchronize(true))
return;
- zonelist = node_zonelist(first_online_node, GFP_KERNEL);
- if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
+ zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
+ if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
out_of_memory(NULL, 0, 0, NULL, false);
- clear_zonelist_oom(zonelist, GFP_KERNEL);
+ oom_zonelist_unlock(zonelist, GFP_KERNEL);
}
}