aboutsummaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLevin Calado <levincalado@gmail.com>2015-06-14 23:24:15 +0800
committerMoyster <oysterized@gmail.com>2016-08-26 20:36:21 +0200
commit3a34dc17318f9bf09dbd7d10ac087a8a75f5f629 (patch)
tree8ea4d532a0a486879f369d7cd61d2d03edeb6618 /mm/memory.c
parent5cff97db281b250545152284249a258a4f5c5ccf (diff)
add uksm 0.1.2.3 for v3.10 .ge.46.patch
Conflicts: fs/exec.c Signed-off-by: Stefan Guendhoer <stefan@guendhoer.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c45
1 files changed, 40 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1b5057d4f..abc052b4e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -123,6 +123,27 @@ __setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
+#ifdef CONFIG_UKSM
+unsigned long uksm_zero_pfn __read_mostly;
+struct page *empty_uksm_zero_page;
+
+static int __init setup_uksm_zero_page(void)
+{
+ unsigned long addr;
+ addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!addr)
+ panic("Oh boy, that early out of memory?");
+
+ empty_uksm_zero_page = virt_to_page((void *) addr);
+ SetPageReserved(empty_uksm_zero_page);
+
+ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
+
+ return 0;
+}
+core_initcall(setup_uksm_zero_page);
+#endif
+
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
@@ -134,6 +155,7 @@ static int __init init_zero_pfn(void)
core_initcall(init_zero_pfn);
+
#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm)
@@ -900,6 +922,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
+
+ /* Should return NULL in vm_normal_page() */
+ uksm_bugon_zeropage(pte);
+ } else {
+ uksm_map_zero_page(pte);
}
out_set_pte:
@@ -1142,8 +1169,10 @@ again:
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
- if (unlikely(!page))
+ if (unlikely(!page)) {
+ uksm_unmap_zero_page(ptent);
continue;
+ }
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
addr) != page->index)
@@ -1719,7 +1748,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
- /*
+ /*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
@@ -1779,7 +1808,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
page = vm_normal_page(vma, start, *pte);
if (!page) {
if (!(gup_flags & FOLL_DUMP) &&
- is_zero_pfn(pte_pfn(*pte)))
+ (is_zero_pfn(pte_pfn(*pte))))
page = pte_page(*pte);
else {
pte_unmap(pte);
@@ -2619,8 +2648,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
clear_page(kaddr);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
- } else
+ } else {
copy_user_highpage(dst, src, va, vma);
+ uksm_cow_page(vma, src);
+ }
}
/*
@@ -2823,6 +2854,7 @@ gotten:
#endif
if (!new_page)
goto oom;
+ uksm_cow_pte(vma, orig_pte);
} else {
#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP)
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
@@ -2852,8 +2884,11 @@ gotten:
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
- } else
+ uksm_bugon_zeropage(orig_pte);
+ } else {
+ uksm_unmap_zero_page(orig_pte);
inc_mm_counter_fast(mm, MM_ANONPAGES);
+ }
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);