diff options
| -rw-r--r-- | include/linux/gfp.h | 22 | ||||
| -rw-r--r-- | include/linux/highmem.h | 2 | ||||
| -rw-r--r-- | include/linux/pagemap.h | 2 | ||||
| -rw-r--r-- | mm/memory.c | 6 | ||||
| -rw-r--r-- | mm/page_alloc.c | 20 |
5 files changed, 31 insertions, 21 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index d5e542b38..455a1dec9 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -37,9 +37,7 @@ struct vm_area_struct; #define ___GFP_WRITE 0x1000000u #define ___GFP_SLOWHIGHMEM 0x2000000u #define ___GFP_NOMTKPASR 0x4000000u -#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP) -#define ___GFP_NOZONECMA 0x8000000u -#endif +#define ___GFP_CMA 0x8000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -99,9 +97,7 @@ struct vm_area_struct; #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ #define __GFP_SLOWHIGHMEM ((__force gfp_t)___GFP_SLOWHIGHMEM) /* use highmem only in slowpath */ #define __GFP_NOMTKPASR ((__force gfp_t)___GFP_NOMTKPASR) /* Memory allocation can't be extended to MTKPASR-imposed range */ -#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP) -#define __GFP_NOZONECMA ((__force gfp_t)___GFP_NOZONECMA) /* Memory allocation can't be extended to ZONE CMA */ -#endif +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) /* ZONE_MOVABLE need this flag to allocate in */ /* * This may seem redundant, but it's a way of annotating false positives vs. @@ -109,11 +105,7 @@ struct vm_area_struct; */ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) -#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) -#define __GFP_BITS_SHIFT 27 /* Room for N __GFP_FOO bits */ -#else #define __GFP_BITS_SHIFT 28 /* Room for N __GFP_FOO bits */ -#endif #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ @@ -298,10 +290,12 @@ static inline enum zone_type gfp_zone(gfp_t flags) ((1 << ZONES_SHIFT) - 1); VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); -#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP) - if ((flags & __GFP_NOZONECMA) && is_zone_cma_idx(z)) - z = ZONE_MOVABLE; -#endif + /* used for limit - only the flags with __GFP_CMA can go into ZONE_MOVABLE */ + + /* do not allocate at ZONE_MOVABLE without __GFP_CMA */ + if (IS_ENABLED(CONFIG_ZONE_MOVABLE_CMA)) + if (z == ZONE_MOVABLE && !(flags & __GFP_CMA)) + z -= 1; return z; } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 41a50714f..db8c6cddf 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -180,7 +180,7 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); + return __alloc_zeroed_user_highpage(__GFP_MOVABLE | __GFP_CMA, vma, vaddr); } static inline void clear_highpage(struct page *page) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index f9b069d66..862619968 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -238,7 +238,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) static inline struct page *page_cache_alloc_readahead(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x) | - __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN | __GFP_CMA); } typedef int filler_t(void *, struct page *); diff --git a/mm/memory.c b/mm/memory.c index a59ca8b28..12d825704 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2823,11 +2823,7 @@ gotten: if (!new_page) goto oom; } else { -#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); -#else - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_NOZONECMA, vma, address); -#endif + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, address); if (!new_page) goto oom; cow_user_page(new_page, old_page, address, vma); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1139bde1d..7c8b32ab5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3090,6 +3090,26 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) return NULL; + /* + * add special case when gfp_mask only have __GFP_HIGHMEM + __GFP_CMA, + * reassign high_zoneidx to select zone_movable as first choice + */ + if ((gfp_mask & (GFP_ZONEMASK|__GFP_CMA)) == (__GFP_HIGHMEM | __GFP_CMA)) { + gfp_mask |= __GFP_MOVABLE; + high_zoneidx = gfp_zone(gfp_mask); + migratetype = allocflags_to_migratetype(gfp_mask); + } + + if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE) { + if (gfp_mask & __GFP_CMA) { + /* Assign high watermakr for __GFP_CMA page allocation */ + + alloc_flags &= ~ALLOC_WMARK_MASK; + alloc_flags |= ALLOC_WMARK_HIGH; + } + alloc_flags |= ALLOC_CMA; + } + retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); |
