aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Murray <timmurray@google.com>2016-02-29 10:10:34 -0800
committerMoyster <oysterized@gmail.com>2019-05-03 14:43:37 +0200
commit006808c40aaad8d3814bd78915bf37c307bddcf7 (patch)
treefb0470076705ea8f7b4a8c8f8d4d68b7c65a57ab
parente0bc10623f326f2919faa00a4b59c57040664b9a (diff)
mm: adjust page migration heuristic
The page allocator's heuristic to decide when to migrate page blocks to unmovable seems to have been tuned on architectures that do not have kernel drivers that would make unmovable allocations of several megabytes or greater--ie, no cameras or shared-memory GPUs. The number of allocations from these drivers may be unbounded and may occupy a significant percentage of overall system memory (>50%). As a result, every Android device has suffered to some extent from increasing fragmentation due to unmovable page block migration over time. This change adjusts the page migration heuristic to only migrate page blocks for unmovable allocations when the order of the requested allocation is order-5 or greater. This prevents migration due to GPU and ion allocations so long as kernel drivers allocate memory at runtime using order-4 or smaller pages. Experimental results running the Android longevity test suite on a Nexus 5X for 10 hours: old heuristic: 116 unmovable blocks after boot -> 281 unmovable blocks new heuristic: 105 unmovable blocks after boot -> 101 unmovable blocks bug 26916944 Change-Id: I5b7ccbbafa4049a2f47f399df4cb4779689f4c40 (cherry picked from commit f0e444d2ebab56eedc22fdc3d5376e41e66cce6c)
-rw-r--r--mm/page_alloc.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c12348ac..1135f4a10 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1318,7 +1318,8 @@ static int preferred_mt = MIGRATE_MOVABLE;
* half of the pageblock, change pageblock's migratetype as well.
*/
static void try_to_steal_freepages(struct zone *zone, struct page *page,
- int start_type, int fallback_type)
+ int start_type, int fallback_type,
+ int start_order)
{
int current_order = page_order(page);
@@ -1330,7 +1331,8 @@ static void try_to_steal_freepages(struct zone *zone, struct page *page,
if (current_order >= pageblock_order / 2 ||
start_type == MIGRATE_RECLAIMABLE ||
- start_type == MIGRATE_UNMOVABLE ||
+ // allow unmovable allocs up to 64K without migrating blocks
+ (start_type == MIGRATE_UNMOVABLE && start_order >= 5) ||
page_group_by_mobility_disabled) {
int pages;
@@ -1408,8 +1410,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
if (!is_migrate_cma(migratetype)) {
try_to_steal_freepages(zone, page,
- start_migratetype,
- migratetype);
+ start_migratetype,
+ migratetype, order);
} else {
/*
* When borrowing from MIGRATE_CMA, we need to