aboutsummaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMister Oyster <oysterized@gmail.com>2017-07-03 12:14:46 +0200
committerMister Oyster <oysterized@gmail.com>2017-07-04 12:11:29 +0200
commitaf79a782f868c860793e76ed5ec1ae5bb4af4187 (patch)
treee945bb42c4077c26bd12f589f9f0f37d39207b03 /mm/memory.c
parentaefc1dfa10a78d3bc1c6bb40df9c44d9ce5c0823 (diff)
uksm: remove Mtk aksm & uksm (because its fugly)
Revert "KSM: mediatek: implement Adaptive KSM" Revert "mm: uksm: fix maybe-uninitialized warning" Revert "UKSM: Add Governors for Higher CPU usage (HighCPU) for more merging, and low cpu usage (Battery) for less battery drain" Revert "uksm: use deferrable timer" Revert "mm: limit UKSM sleep time instead of failing" Revert "uksm: Fix warning" Revert "uksm: clean up and remove some (no)inlines" Revert "uksm: modify ema logic and tidy up" Revert "uksm: enhancements and cleanups" Revert "uksm: squashed fixups" Revert "UKSM: cast variable as const" Revert "UKSM: remove U64_MAX definition" Revert "add uksm 0.1.2.3 for v3.10 .ge.46.patch"
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c45
1 files changed, 5 insertions, 40 deletions
diff --git a/mm/memory.c b/mm/memory.c
index aa331bd4d..f194520a3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -123,27 +123,6 @@ __setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
-#ifdef CONFIG_UKSM
-unsigned long uksm_zero_pfn __read_mostly;
-struct page *empty_uksm_zero_page;
-
-static int __init setup_uksm_zero_page(void)
-{
- unsigned long addr;
- addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
- if (!addr)
- panic("Oh boy, that early out of memory?");
-
- empty_uksm_zero_page = virt_to_page((void *) addr);
- SetPageReserved(empty_uksm_zero_page);
-
- uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
-
- return 0;
-}
-core_initcall(setup_uksm_zero_page);
-#endif
-
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
@@ -155,7 +134,6 @@ static int __init init_zero_pfn(void)
core_initcall(init_zero_pfn);
-
#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm)
@@ -922,11 +900,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
-
- /* Should return NULL in vm_normal_page() */
- uksm_bugon_zeropage(pte);
- } else {
- uksm_map_zero_page(pte);
}
out_set_pte:
@@ -1169,10 +1142,8 @@ again:
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
- if (unlikely(!page)) {
- uksm_unmap_zero_page(ptent);
+ if (unlikely(!page))
continue;
- }
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
addr) != page->index)
@@ -1758,7 +1729,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
- /*
+ /*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
@@ -1818,7 +1789,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
page = vm_normal_page(vma, start, *pte);
if (!page) {
if (!(gup_flags & FOLL_DUMP) &&
- (is_zero_pfn(pte_pfn(*pte))))
+ is_zero_pfn(pte_pfn(*pte)))
page = pte_page(*pte);
else {
pte_unmap(pte);
@@ -2658,10 +2629,8 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
clear_page(kaddr);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
- } else {
+ } else
copy_user_highpage(dst, src, va, vma);
- uksm_cow_page(vma, src);
- }
}
/*
@@ -2864,7 +2833,6 @@ gotten:
#endif
if (!new_page)
goto oom;
- uksm_cow_pte(vma, orig_pte);
} else {
#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP)
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
@@ -2894,11 +2862,8 @@ gotten:
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
- uksm_bugon_zeropage(orig_pte);
- } else {
- uksm_unmap_zero_page(orig_pte);
+ } else
inc_mm_counter_fast(mm, MM_ANONPAGES);
- }
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);