Staging: ramzswap: Remove redundant check for zero page

ramzswap_free_page() already handles the case for zero filled
pages. So, remove redundant logic for the same in ramzswap_write().

Signed-off-by: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Nitin Gupta 2010-01-28 21:13:41 +05:30 committed by Greg Kroah-Hartman
parent de1a21a0f1
commit 2e88228174

View File

@ -593,6 +593,10 @@ static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
u32 offset = rzs->table[index].offset;
if (unlikely(!page)) {
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
*/
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
rzs_clear_flag(rzs, index, RZS_ZERO);
rzs_stat_dec(&rzs->stats.pages_zero);
@ -789,18 +793,9 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
* is no longer referenced by any process. So, its now safe
* to free the memory that was allocated for this page.
*/
if (rzs->table[index].page)
if (rzs->table[index].page || rzs_test_flag(rzs, index, RZS_ZERO))
ramzswap_free_page(rzs, index);
/*
* No memory is allocated for zero filled pages.
* Simply clear zero page flag.
*/
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
rzs_stat_dec(&rzs->stats.pages_zero);
rzs_clear_flag(rzs, index, RZS_ZERO);
}
mutex_lock(&rzs->lock);
user_mem = kmap_atomic(page, KM_USER0);