aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerTeufel <dominik-kassel@gmx.de>2015-10-18 19:20:25 +0200
committerMister Oyster <oysterized@gmail.com>2016-12-11 12:51:27 +0100
commit19128b370cd8188e548c25659e2f7c1eec5d7153 (patch)
tree5eb95f403e917667d6ea7dd769e0fe8985b718fb
parent3d9f3ece074ad2d49551c011b05ee0d439921439 (diff)
use zram implemenation from 3.10.y
who knows where mtk took their driver from ....
-rw-r--r--drivers/staging/zram/Kconfig6
-rw-r--r--drivers/staging/zram/zram_drv.c658
-rw-r--r--drivers/staging/zram/zram_drv.h33
-rw-r--r--drivers/staging/zram/zram_sysfs.c51
4 files changed, 16 insertions, 732 deletions
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index bf7b4d913..983314c41 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -23,9 +23,3 @@ config ZRAM_DEBUG
help
This option adds additional debugging code to the compressed
RAM block device driver.
-config ZSM
- bool "Finding and merging same pages support"
- depends on ZRAM
- default n
- help
- This option enhance ZRAM by finding and merging same pages in ZRAM.
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index a7ba43419..a333d44d0 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -32,379 +32,12 @@
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#ifdef CONFIG_ZSM
-#include <linux/rbtree.h>
-#include <linux/time.h>
-#endif
#include "zram_drv.h"
-#ifdef CONFIG_MT_ENG_BUILD
-#define GUIDE_BYTES_LENGTH 64
-#define GUIDE_BYTES_HALFLEN 32
-#define GUIDE_BYTES (0x0)
-#endif
-
/* Globals */
static int zram_major;
-struct zram *zram_devices = NULL;
-
-/* Compression/Decompression hooks */
-static comp_hook zram_compress = NULL;
-static decomp_hook zram_decompress = NULL;
-static const char *zram_comp = NULL;
-#ifdef CONFIG_ZSM
-static struct rb_root root_zram_tree = RB_ROOT;
-static struct rb_root root_zram_tree_4k = RB_ROOT;
-spinlock_t zram_node_mutex;
-spinlock_t zram_node4k_mutex;
-
-static int zram_test_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag);
-
-static void zram_set_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag);
-
-static void zram_clear_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag);
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc);
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec);
-static void zram_stat64_inc(struct zram *zram, u64 *v);
-static int zsm_test_flag(struct zram_meta *meta, struct table *node,
- enum zram_pageflags flag)
-{
- return node->flags & BIT(flag);
-}
-
-static void zsm_set_flag(struct zram_meta *meta, struct table *node,
- enum zram_pageflags flag)
-{
- node->flags |= BIT(flag);
-}
-
-static struct table * search_node_in_zram_list(struct zram *zram,struct zram_meta *meta,struct table *input_node,struct table *found_node,unsigned char *match_content)
-{
- struct list_head *list_node = NULL;
- struct table *current_node = NULL;
- unsigned char *cmem;
- int one_node_in_list = 0;
- int compare_count = 0;
- int ret;
-
- list_node = found_node->head.next;
- if(list_node == &(found_node->head))
- one_node_in_list = 1;
- while((list_node != &(found_node->head))||one_node_in_list)
- {
- one_node_in_list = 0;
- current_node = list_entry(list_node, struct table, head);
- if((input_node->size != current_node->size)||!zsm_test_flag(meta, current_node, ZRAM_FIRST_NODE))
- {
- list_node = list_node->next;
- }
- else
- {
- cmem = zs_map_object(meta->mem_pool, current_node->handle, ZS_MM_RO);
-#ifdef CONFIG_MT_ENG_BUILD
- /* Move to the start of bitstream */
- if(current_node->size != PAGE_SIZE)
- cmem += GUIDE_BYTES_HALFLEN;
-#endif
-
- ret = memcmp(cmem,match_content,input_node->size);
- compare_count++;
- if(ret == 0)
- {
- zs_unmap_object(meta->mem_pool, current_node->handle);
- return current_node;
- }
- else
- {
- list_node = list_node->next;
- }
- zs_unmap_object(meta->mem_pool, current_node->handle);
- }
- }
- return NULL;
-}
-static struct table *search_node_in_zram_tree(struct table *input_node,struct rb_node **parent_node,struct rb_node ***new_node, unsigned char *match_content,struct rb_root *local_root_zram_tree)
-{
- struct rb_node **new = &(local_root_zram_tree->rb_node);
- struct table *current_node = NULL;
- struct rb_node *parent = NULL;
-
- current_node = rb_entry(*new,struct table, node);
- if(input_node == NULL)
- {
- printk("[zram][search_node_in_zram_tree] input_node is NULL\n");
- return NULL;
- }
- if(current_node == NULL)
- {
- *new_node = new;
- *parent_node = NULL;
- return NULL;
- }
-
- while(*new)
- {
- current_node = rb_entry(*new,struct table, node);
- parent = *new;
- if(input_node->checksum > current_node->checksum)
- {
- new = &parent->rb_right;
- }
- else if(input_node->checksum < current_node->checksum)
- {
- new = &parent->rb_left;
- }
- else
- {
- if(input_node->size > current_node->size)
- {
- new = &parent->rb_right;
- }
- else if(input_node->size < current_node->size)
- {
- new = &parent->rb_left;
- }
- else
- {
- return current_node;
- }
- }
- }
- *parent_node = parent;
- *new_node = new;
- return NULL;
-}
-static u32 insert_node_to_zram_tree(struct zram *zram,struct zram_meta *meta,u32 index, unsigned char *match_content,struct rb_root *local_root_zram_tree)
-{
- struct table *current_node = NULL;
- struct table *node_in_list = NULL;
- struct rb_node *parent = NULL;
- struct rb_node **new = NULL;
- struct table *input_node;
- static int node_count = 0;
-
- input_node = &(meta->table[index]);
- current_node = search_node_in_zram_tree(input_node,&parent,&new,match_content,local_root_zram_tree);
- node_count++;
-
- //found node in zram_tree
- if(NULL != current_node)
- {
- if(!zsm_test_flag(meta,current_node,ZRAM_RB_NODE))
- {
- printk("[ZRAM]ERROR !!found wrong rb node 0x%p\n",(void *)current_node);
- BUG_ON(1);
- }
-
- //check if there is any other node in this position.
- node_in_list = search_node_in_zram_list(zram,meta,input_node,current_node,match_content);
-
-
- //found the same node in list
- if(NULL != node_in_list)
- {
- //insert node after the found node
- if(!zsm_test_flag(meta,current_node,ZRAM_FIRST_NODE))
- {
- printk("[ZRAM]ERROR !!found wrong first node 0x%p\n",(void *)node_in_list);
- BUG_ON(1);
- }
- input_node->next_index = node_in_list->next_index;
- node_in_list->next_index = index;
- input_node->copy_index = node_in_list->copy_index;
-
- //found the same node and add ref count
- node_in_list->copy_count++;
- if (unlikely(input_node->size > max_zpage_size))
- {
- zram_stat64_add(zram,&zram->stats.zsm_saved4k, (u64)input_node->size);
- }
- else
- {
- zram_stat64_add(zram,&zram->stats.zsm_saved, (u64)input_node->size);
- }
- input_node->handle = node_in_list->handle;
- list_add(&input_node->head,&node_in_list->head);
- return 1;
- }
- else //can't found node in list
- {
- zram_set_flag(meta, index, ZRAM_FIRST_NODE);
- list_add(&input_node->head,&current_node->head);
- }
- }
- else
- {
- //insert node into rb tree
- zram_set_flag(meta, index, ZRAM_FIRST_NODE);
- zram_set_flag(meta, index, ZRAM_RB_NODE);
- rb_link_node(&(meta->table[index].node),parent,new);
- rb_insert_color(&(meta->table[index].node),local_root_zram_tree);
- }
- return 0;
-}
-static int remove_node_from_zram_list(struct zram *zram,struct zram_meta *meta,u32 index)
-{
- u32 next_index = 0xffffffff;
- u32 pre_index = 0xffffffff;
- u32 current_index = 0xffffffff;
- u32 copy_index = 0xffffffff;
- u32 i = 0;
-
- next_index = meta->table[index].next_index;
- list_del(&(meta->table[index].head));
-
- //check if there is the same content in list
- if(index != next_index) //found the same page content
- {
- if(zram_test_flag(meta, index, ZRAM_FIRST_NODE))//delete the fist node of content
- {
- if(meta->table[index].copy_count <= 0)
- {
- printk("[ZRAM]ERROR !!count < 0\n ");
- BUG_ON(1);
- return 1;
- }
- current_index = meta->table[next_index].next_index;
- meta->table[next_index].copy_index = next_index;
- pre_index = next_index;
- while(current_index != index)
- {
- i++;
- if(i>= 4096 && (i%1000 == 0))
- {
- printk("[ZRAM]ERROR !!can't find meta->table[%d].size %d chunksum %x in list\n",index,meta->table[index].size,meta->table[index].checksum);
- if(i > meta->table[index].copy_count)
- {
- BUG_ON(1);
- break;
- }
- }
- meta->table[current_index].copy_index = next_index;
- pre_index = current_index;
- current_index = meta->table[current_index].next_index;
- }
- meta->table[pre_index].next_index = meta->table[index].next_index;
- meta->table[next_index].copy_count = meta->table[index].copy_count - 1;
- zram_clear_flag(meta, index, ZRAM_FIRST_NODE);
- zram_set_flag(meta, next_index, ZRAM_FIRST_NODE);
- }
- else
- {
- current_index = meta->table[index].copy_index;
- pre_index = current_index;
- current_index = meta->table[current_index].next_index;
- while(index != current_index)
- {
- i++;
- if(i>= 4096 && (i%1000 == 0))
- {
- u32 tmp_index = 0;
- printk("[ZRAM]ERROR !!can't find2 meta->table[%d].size %d chunksum %d in list\n",index,meta->table[index].size,meta->table[index].checksum);
- tmp_index = meta->table[current_index].copy_index;
- if(i > meta->table[tmp_index].copy_count)
- {
- BUG_ON(1);
- break;
- }
- }
- pre_index = current_index;
- current_index = meta->table[current_index].next_index;
- }
- meta->table[pre_index].next_index = meta->table[index].next_index;
- copy_index = meta->table[index].copy_index;
- meta->table[copy_index].copy_count = meta->table[copy_index].copy_count - 1;
- }
- if (unlikely(meta->table[index].size > max_zpage_size))
- {
- zram_stat64_sub(zram,&zram->stats.zsm_saved4k,(u64)meta->table[index].size);
- }
- else
- {
- zram_stat64_sub(zram,&zram->stats.zsm_saved,(u64)meta->table[index].size);
- }
- return 1;
- }
- else//can't found the same page content
- {
- if(zram_test_flag(meta, index, ZRAM_FIRST_NODE))
- {
- zram_clear_flag(meta, index, ZRAM_FIRST_NODE);
- }
- else
- {
- printk("[ZRAM]ERROR !!index != next_index and flag != ZRAM_FIRST_NODE index %x\n ",index);
- }
- if(meta->table[index].copy_count != 0)
- {
- printk("[ZRAM]ERROR !!index != next_index and count != 0 index %x\n ",index);
- }
- }
- return 0;
-}
-static int remove_node_from_zram_tree(struct zram *zram,struct zram_meta *meta,u32 index,struct rb_root *local_root_zram_tree)
-{
- int ret;
-
- //if it is rb node, choose other node from list and replace original node.
- if(zram_test_flag(meta, index, ZRAM_RB_NODE))
- {
- zram_clear_flag(meta, index, ZRAM_RB_NODE);
-
- //found next node in list
- if(&(meta->table[index].head) != meta->table[index].head.next)
- {
- struct table *next_table;
- next_table = list_entry(meta->table[index].head.next,struct table, head);
- rb_replace_node(&(meta->table[index].node),&(next_table->node),local_root_zram_tree);
- zsm_set_flag(meta,next_table, ZRAM_RB_NODE);
- ret = remove_node_from_zram_list(zram,meta,index);
- return ret;
- }
- else //if no other node can be found in list just remove node from rb tree and free handle
- {
- if(zram_test_flag(meta, index, ZRAM_FIRST_NODE))
- {
- zram_clear_flag(meta, index, ZRAM_FIRST_NODE);
- }
- else
- {
- printk("[ZRAM]ERROR !!ZRAM_RB_NODR's flag != ZRAM_FIRST_NODE index %x\n ",index);
- }
- rb_erase(&(meta->table[index].node),local_root_zram_tree);
- return 0;
- }
- }
- else
- {
- ret = remove_node_from_zram_list(zram,meta,index);
- return ret;
- }
-}
-#endif
-/* Set above hooks */
-void zram_set_hooks(void *compress_func, void *decompress_func, const char *name)
-{
-#ifdef CONFIG_ZSM
- printk(KERN_ALERT "\nZSM only supports LZO1X now.\n\n"); /* TODO: Add LZ4K or other algorithms. */
-#else
- if (name != NULL) {
- printk(KERN_ALERT "[%s] Compress[%p] Decompress[%p]\n",name, compress_func, decompress_func);
- zram_comp = name;
- } else
- printk(KERN_ALERT "[UNKNOWN] Compress[%p] Decompress[%p]\n", compress_func, decompress_func);
- zram_compress = (comp_hook)compress_func;
- zram_decompress = (decomp_hook)decompress_func;
- printk(KERN_ALERT "[%s][%d] ZCompress[%p] ZDecompress[%p]\n", __FUNCTION__, __LINE__, zram_compress, zram_decompress);
-#endif
-}
-EXPORT_SYMBOL(zram_set_hooks);
+struct zram *zram_devices;
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
@@ -466,9 +99,7 @@ static void zram_free_page(struct zram *zram, size_t index)
struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle;
u16 size = meta->table[index].size;
-#ifdef CONFIG_ZSM
- int ret = 0;
-#endif
+
if (unlikely(!handle)) {
/*
* No memory is allocated for zero filled pages.
@@ -483,30 +114,9 @@ static void zram_free_page(struct zram *zram, size_t index)
if (unlikely(size > max_zpage_size))
zram->stats.bad_compress--;
-#ifdef CONFIG_ZSM
- if(!zram_test_flag(meta, index, ZRAM_ZERO))
- {
- if(meta->table[index].size == PAGE_SIZE)
- {
- spin_lock(&zram_node4k_mutex);
- ret = remove_node_from_zram_tree(zram,meta,index,&root_zram_tree_4k);
- spin_unlock(&zram_node4k_mutex);
- }
- else
- {
- spin_lock(&zram_node_mutex);
- ret = remove_node_from_zram_tree(zram,meta,index,&root_zram_tree);
- spin_unlock(&zram_node_mutex);
- }
- }
- if(ret == 0)
- {
- zs_free(meta->mem_pool, handle);
- }
-#else
zs_free(meta->mem_pool, handle);
-#endif
+
if (size <= PAGE_SIZE / 2)
zram->stats.good_compress--;
@@ -535,29 +145,6 @@ static inline int is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE;
}
-#ifdef CONFIG_MT_ENG_BUILD
-static void zram_check_guidebytes(unsigned char *cmem, bool is_header)
-{
- int idx;
- for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
- if (*cmem != (unsigned char)GUIDE_BYTES) {
- if (is_header)
- printk(KERN_ERR "<<HEADER>>\n");
- else
- printk(KERN_ERR "<<TAIL>>\n");
-
- cmem -= idx;
- for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
- printk(KERN_ERR "%x ",(int)*cmem++);
- }
- printk(KERN_ERR "\n<<END>>\n");
- /* Just return */
- return;
- }
- cmem++;
- }
-}
-#endif
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
int ret = LZO_E_OK;
@@ -575,47 +162,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
if (meta->table[index].size == PAGE_SIZE)
memcpy(mem, cmem, PAGE_SIZE);
else
-#ifdef CONFIG_MT_ENG_BUILD
- {
- /* Check header */
- zram_check_guidebytes(cmem, true);
-
- /* Move to the start of bitstream */
- cmem += GUIDE_BYTES_HALFLEN;
-#endif
- ret = zram_decompress(cmem, meta->table[index].size,
+ ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen);
-#ifdef CONFIG_MT_ENG_BUILD
- /* Check tail */
- zram_check_guidebytes(cmem + meta->table[index].size, false);
- }
-#endif
-
zs_unmap_object(meta->mem_pool, handle);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
zram_stat64_inc(zram, &zram->stats.failed_reads);
-#ifdef CONFIG_MT_ENG_BUILD
- {
- int idx;
- size_t tlen;
- printk(KERN_ALERT "\n@@@@@@@@@@\n");
- tlen = meta->table[index].size + GUIDE_BYTES_LENGTH;
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- /* Head guide bytes */
- for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
- printk(KERN_ALERT "%x ",(int)*cmem++);
- }
- printk(KERN_ALERT "\n=========\n");
- for (;idx < tlen; idx++) {
- printk(KERN_ALERT "%x ",(int)*cmem++);
- }
- zs_unmap_object(meta->mem_pool, handle);
- printk(KERN_ALERT "\n!!!!!!!!!\n");
- }
-#endif
return ret;
}
@@ -673,9 +227,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
int ret = 0;
-#ifdef CONFIG_ZSM
- int checksum = 0;
-#endif
size_t clen;
unsigned long handle;
struct page *page;
@@ -698,23 +249,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_decompress_page(zram, uncmem, index);
if (ret)
goto out;
-#ifdef CONFIG_ZSM
- if(!zram_test_flag(meta, index, ZRAM_ZERO))
- {
- if(meta->table[index].size == PAGE_SIZE)
- {
- spin_lock(&zram_node4k_mutex);
- ret = remove_node_from_zram_tree(zram,meta,index,&root_zram_tree_4k);
- spin_unlock(&zram_node4k_mutex);
- }
- else
- {
- spin_lock(&zram_node_mutex);
- ret = remove_node_from_zram_tree(zram,meta,index,&root_zram_tree);
- spin_unlock(&zram_node_mutex);
- }
- }
-#endif
}
/*
@@ -737,21 +271,16 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (page_zero_filled(uncmem)) {
- if (!is_partial_io(bvec))
- kunmap_atomic(user_mem);
+ kunmap_atomic(user_mem);
zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
-#ifdef CONFIG_ZSM
- ret = zram_compress(uncmem, PAGE_SIZE, src, &clen,
- meta->compress_workmem,&checksum);
-#else
- ret = zram_compress(uncmem, PAGE_SIZE, src, &clen,
+ ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
-#endif
+
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
@@ -769,66 +298,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = NULL;
if (is_partial_io(bvec))
src = uncmem;
-#ifdef CONFIG_ZSM
- {
- int search_ret = 0;
-
- meta->table[index].checksum = checksum;
- meta->table[index].size = clen;
- meta->table[index].next_index = index;
- meta->table[index].copy_index = index;
- meta->table[index].copy_count = 0;
- INIT_LIST_HEAD(&(meta->table[index].head));
- if(src != NULL)
- {
- spin_lock(&zram_node4k_mutex);
- search_ret = insert_node_to_zram_tree(zram,meta,index,src,&root_zram_tree_4k);
- spin_unlock(&zram_node4k_mutex);
- }
- else
- {
- src = kmap_atomic(page);
- spin_lock(&zram_node4k_mutex);
- search_ret = insert_node_to_zram_tree(zram,meta,index,src,&root_zram_tree_4k);
- spin_unlock(&zram_node4k_mutex);
- kunmap_atomic(src);
- }
-
- if(search_ret)
- {
- ret = 0;
- goto out;
- }
- }
-#endif
}
-#ifdef CONFIG_ZSM
- else
- {
- int search_ret = 0;
-
- meta->table[index].checksum = checksum;
- meta->table[index].size = clen;
- meta->table[index].next_index = index;
- meta->table[index].copy_index = index;
- meta->table[index].copy_count = 0;
-
- INIT_LIST_HEAD(&(meta->table[index].head));
- spin_lock(&zram_node_mutex);
- search_ret = insert_node_to_zram_tree(zram,meta,index,src,&root_zram_tree);
- spin_unlock(&zram_node_mutex);
- if(search_ret)
- {
- ret = 0;
- goto out;
- }
- }
-#endif
-
-#ifdef CONFIG_MT_ENG_BUILD
- if (clen != PAGE_SIZE)
- clen += GUIDE_BYTES_LENGTH;
-#endif
handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
@@ -841,33 +311,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
src = kmap_atomic(page);
-
-#ifdef CONFIG_MT_ENG_BUILD
- /* Head guide bytes */
- if (clen != PAGE_SIZE) {
- int idx;
- for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
- *cmem = GUIDE_BYTES;
- cmem++;
- }
- clen -= GUIDE_BYTES_LENGTH;
- }
-#endif
-
memcpy(cmem, src, clen);
-
-#ifdef CONFIG_MT_ENG_BUILD
- /* Tail guide bytes */
- if (clen != PAGE_SIZE) {
- int idx;
- cmem += clen;
- for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
- *cmem = GUIDE_BYTES;
- cmem++;
- }
- }
-#endif
-
if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
kunmap_atomic(src);
@@ -1073,11 +517,7 @@ struct zram_meta *zram_meta_alloc(u64 disksize)
if (!meta)
goto out;
-#if defined(CONFIG_64BIT) && defined(CONFIG_LZ4K)
- meta->compress_workmem = kzalloc((LZO1X_MEM_COMPRESS << 1), GFP_KERNEL);
-#else
meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
-#endif
if (!meta->compress_workmem)
goto free_meta;
@@ -1095,7 +535,7 @@ struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_buffer;
}
- meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM | __GFP_NOMTKPASR);
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto free_table;
@@ -1145,12 +585,12 @@ static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+
zram = bdev->bd_disk->private_data;
- /* down_write(&zram->lock); */
+ down_write(&zram->lock);
zram_free_page(zram, index);
- /* up_write(&zram->lock); */
+ up_write(&zram->lock);
zram_stat64_inc(zram, &zram->stats.notify_free);
-
}
static const struct block_device_operations zram_devops = {
@@ -1165,10 +605,7 @@ static int create_device(struct zram *zram, int device_id)
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
-#ifdef CONFIG_ZSM
- spin_lock_init(&zram_node_mutex);
- spin_lock_init(&zram_node4k_mutex);
-#endif
+
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
@@ -1247,64 +684,6 @@ unsigned int zram_get_num_devices(void)
return num_devices;
}
-static int zraminfo_proc_show(struct seq_file *m, void *v)
-{
- if (zram_devices->init_done)
- {
-#define P2K(x) (((unsigned long)x) << (PAGE_SHIFT - 10))
-#define B2K(x) (((unsigned long)x) >> (10))
- seq_printf(m,
- "DiskSize: %8lu kB\n"
- "OrigSize: %8lu kB\n"
- "ComprSize: %8lu kB\n"
- "MemUsed: %8lu kB\n"
- "GoodCompr: %8lu kB\n"
- "BadCompr: %8lu kB\n"
- "ZeroPage: %8lu kB\n"
- "NotifyFree: %8lu kB\n"
- "NumReads: %8lu kB\n"
- "NumWrites: %8lu kB\n"
-#ifdef CONFIG_ZSM
- "ZSM saved: %8lu kB\n"
- "ZSM4k saved: %8lu kB\n"
-#endif
- "InvalidIO: %8lu kB\n"
- ,
- B2K(zram_devices->disksize),
- P2K(zram_devices->stats.pages_stored),
- B2K(zram_devices->stats.compr_size),
- B2K(zs_get_total_size_bytes(zram_devices->meta->mem_pool)),
- P2K(zram_devices->stats.good_compress),
- P2K(zram_devices->stats.bad_compress),
- P2K(zram_devices->stats.pages_zero),
- P2K(zram_devices->stats.notify_free),
- P2K(zram_devices->stats.num_reads),
- P2K(zram_devices->stats.num_writes),
-#ifdef CONFIG_ZSM
- B2K(zram_devices->stats.zsm_saved),
- B2K(zram_devices->stats.zsm_saved4k),
-#endif
- P2K(zram_devices->stats.invalid_io)
- );
-#undef P2K
-#undef B2K
- seq_printf(m, "Algorithm: [%s]\n", (zram_comp != NULL)? zram_comp : "LZO");
- }
- return 0;
-}
-
-static int zraminfo_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zraminfo_proc_show, NULL);
-}
-
-static const struct file_operations zraminfo_proc_fops = {
- .open = zraminfo_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int __init zram_init(void)
{
int ret, dev_id;
@@ -1336,17 +715,6 @@ static int __init zram_init(void)
goto free_devices;
}
- /* Set compression/decompression hooks - Use LZO1X by default */
- if (!zram_compress || !zram_decompress) {
-#ifdef CONFIG_ZSM
- zram_compress = &lzo1x_1_compress_zram;
-#else
- zram_compress = &lzo1x_1_compress;
-#endif
- zram_decompress = &lzo1x_decompress_safe;
- }
- printk(KERN_ALERT "[%s][%d] ZCompress[%p] ZDecompress[%p]\n", __FUNCTION__, __LINE__, zram_compress, zram_decompress);
- proc_create("zraminfo", 0, NULL, &zraminfo_proc_fops);
pr_info("Created %u device(s) ...\n", num_devices);
return 0;
@@ -1355,7 +723,6 @@ free_devices:
while (dev_id)
destroy_device(&zram_devices[--dev_id]);
kfree(zram_devices);
- zram_devices = NULL;
unregister:
unregister_blkdev(zram_major, "zram");
out:
@@ -1379,7 +746,6 @@ static void __exit zram_exit(void)
unregister_blkdev(zram_major, "zram");
kfree(zram_devices);
- zram_devices = NULL;
pr_debug("Cleanup done!\n");
}
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 5fcaeafbb..d542eee81 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -28,15 +28,6 @@ static const unsigned max_num_devices = 32;
/*-- Configurable parameters */
-/* Default zram disk size: 50% of total RAM */
-static const unsigned default_disksize_perc_ram = 50; /* 25 */
-/* Let disk size be DISKSIZE_ALIGNMENT */
-#define DISKSIZE_ALIGNMENT 0x800000 /* 8MB */
-/* Is totalram_pages less than SUPPOSED_TOTALRAM? */
-#define SUPPOSED_TOTALRAM 0x20000 /* 512MB */
-/* Allowable max size */
-#define MAX_DISKSIZE 0x20000000 /* 512MB */
-
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
@@ -66,10 +57,6 @@ enum zram_pageflags {
ZRAM_ZERO,
__NR_ZRAM_PAGEFLAGS,
-#ifdef CONFIG_ZSM
- ZRAM_FIRST_NODE ,
- ZRAM_RB_NODE
-#endif
};
/*-- Data structures */
@@ -80,14 +67,6 @@ struct table {
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
-#ifdef CONFIG_ZSM
- struct rb_node node;
- struct list_head head;
- u32 copy_count;
- u32 next_index;
- u32 copy_index;
- u32 checksum;
-#endif
} __aligned(4);
struct zram_stats {
@@ -98,10 +77,6 @@ struct zram_stats {
u64 failed_writes; /* can happen when memory is too low */
u64 invalid_io; /* non-page-aligned I/O requests */
u64 notify_free; /* no. of swap slot free notifications */
-#ifdef CONFIG_ZSM
- u64 zsm_saved; /* saved physical size*/
- u64 zsm_saved4k;
-#endif
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
@@ -146,12 +121,4 @@ extern struct zram_meta *zram_meta_alloc(u64 disksize);
extern void zram_meta_free(struct zram_meta *meta);
extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
-/* Type for zram compression/decompression hooks */
-#ifdef CONFIG_ZSM
-typedef int (*comp_hook) (const unsigned char *, size_t , unsigned char *, size_t *, void *, int *);
-#else
-typedef int (*comp_hook) (const unsigned char *, size_t , unsigned char *, size_t *, void *);
-#endif
-typedef int (*decomp_hook) (const unsigned char *, size_t , unsigned char *, size_t *);
-
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 27b3d9fed..dc76a3dba 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -55,39 +55,16 @@ static ssize_t disksize_show(struct device *dev,
static ssize_t disksize_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- int ret;
u64 disksize;
struct zram_meta *meta;
struct zram *zram = dev_to_zram(dev);
-
- /* Get disksize from user */
- ret = kstrtoull(buf, 10, &disksize);
- if (ret)
- return ret;
-
- /* If disksize is 0, then we give it a default setting. */
- if (disksize == 0) {
- /* Fix disksize */
- disksize = default_disksize_perc_ram * ((totalram_pages << PAGE_SHIFT) / 100);
- /* Expand its disksize if we have little system ram! */
- if (totalram_pages < SUPPOSED_TOTALRAM) {
- disksize += (disksize >> 1) ;
- }
- /* Don't exceed MAX_DISKSIZE here! */
- if (disksize > MAX_DISKSIZE)
- disksize = MAX_DISKSIZE;
- }
- /* Align it! */
- disksize = round_up(disksize, DISKSIZE_ALIGNMENT);
- /* disksize = PAGE_ALIGN(disksize); */
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+ disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
- /* Check whether meta is null */
- if (!meta) {
- printk(KERN_ALERT"Failed to allocate memory for meta!\n");
- return len;
- }
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
@@ -186,22 +163,6 @@ static ssize_t zero_pages_show(struct device *dev,
return sprintf(buf, "%u\n", zram->stats.pages_zero);
}
-static ssize_t good_compr_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.good_compress);
-}
-
-static ssize_t bad_compr_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.bad_compress);
-}
-
static ssize_t orig_data_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -244,8 +205,6 @@ static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
-static DEVICE_ATTR(good_compr_pages, S_IRUGO, good_compr_pages_show, NULL);
-static DEVICE_ATTR(bad_compr_pages, S_IRUGO, bad_compr_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
@@ -259,8 +218,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_invalid_io.attr,
&dev_attr_notify_free.attr,
&dev_attr_zero_pages.attr,
- &dev_attr_good_compr_pages.attr,
- &dev_attr_bad_compr_pages.attr,
&dev_attr_orig_data_size.attr,
&dev_attr_compr_data_size.attr,
&dev_attr_mem_used_total.attr,