btrfs: remove all unused functions

Remove static and global declarations and/or definitions. Reduces size
of btrfs.ko by ~3.4kB.

  text    data     bss     dec     hex filename
402081    7464     200  409745   64091 btrfs.ko.base
398620    7144     200  405964   631cc btrfs.ko.remove-all

Signed-off-by: David Sterba <dsterba@suse.cz>
This commit is contained in:
David Sterba
2011-05-05 12:44:41 +02:00
parent 621496f4fd
commit f2a97a9dbd
19 changed files with 1 additions and 817 deletions

View File

@@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
NULL, mask);
}
static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
NULL, mask);
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask)
{
@@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
cached_state, mask);
}
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
}
/*
* either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
@@ -1027,25 +1015,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
mask);
}
/*
* helper function to set pages and extents in the tree dirty
*/
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(tree->mapping, index);
BUG_ON(!page);
__set_page_dirty_nobuffers(page);
page_cache_release(page);
index++;
}
return 0;
}
/*
* helper function to set both pages and extents in the tree writeback
*/
@@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
bio_put(bio);
}
/*
* IO done from prepare_write is pretty simple, we just unlock
* the structs in the extent tree when done, and set the uptodate bits
* as appropriate.
*/
static void end_bio_extent_preparewrite(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
struct extent_io_tree *tree;
u64 start;
u64 end;
do {
struct page *page = bvec->bv_page;
struct extent_state *cached = NULL;
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
bvec->bv_offset;
end = start + bvec->bv_len - 1;
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
if (uptodate) {
set_extent_uptodate(tree, start, end, &cached,
GFP_ATOMIC);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
}
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags)
@@ -2719,128 +2648,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
return 0;
}
/*
* simple commit_write call, set_range_dirty is used to mark both
* the pages and the extent records as dirty
*/
int extent_commit_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to)
{
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
set_page_extent_mapped(page);
set_page_dirty(page);
if (pos > inode->i_size) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
return 0;
}
int extent_prepare_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to, get_extent_t *get_extent)
{
u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
u64 block_start;
u64 orig_block_start;
u64 block_end;
u64 cur_end;
struct extent_map *em;
unsigned blocksize = 1 << inode->i_blkbits;
size_t pg_offset = 0;
size_t block_off_start;
size_t block_off_end;
int err = 0;
int iocount = 0;
int ret = 0;
int isnew;
set_page_extent_mapped(page);
block_start = (page_start + from) & ~((u64)blocksize - 1);
block_end = (page_start + to - 1) | (blocksize - 1);
orig_block_start = block_start;
lock_extent(tree, page_start, page_end, GFP_NOFS);
while (block_start <= block_end) {
em = get_extent(inode, page, pg_offset, block_start,
block_end - block_start + 1, 1);
if (IS_ERR_OR_NULL(em))
goto err;
cur_end = min(block_end, extent_map_end(em) - 1);
block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
block_off_end = block_off_start + blocksize;
isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
if (!PageUptodate(page) && isnew &&
(block_off_end > to || block_off_start < from)) {
void *kaddr;
kaddr = kmap_atomic(page, KM_USER0);
if (block_off_end > to)
memset(kaddr + to, 0, block_off_end - to);
if (block_off_start < from)
memset(kaddr + block_off_start, 0,
from - block_off_start);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
if ((em->block_start != EXTENT_MAP_HOLE &&
em->block_start != EXTENT_MAP_INLINE) &&
!isnew && !PageUptodate(page) &&
(block_off_end > to || block_off_start < from) &&
!test_range_bit(tree, block_start, cur_end,
EXTENT_UPTODATE, 1, NULL)) {
u64 sector;
u64 extent_offset = block_start - em->start;
size_t iosize;
sector = (em->block_start + extent_offset) >> 9;
iosize = (cur_end - block_start + blocksize) &
~((u64)blocksize - 1);
/*
* we've already got the extent locked, but we
* need to split the state such that our end_bio
* handler can clear the lock.
*/
set_extent_bit(tree, block_start,
block_start + iosize - 1,
EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
ret = submit_extent_page(READ, tree, page,
sector, iosize, pg_offset, em->bdev,
NULL, 1,
end_bio_extent_preparewrite, 0,
0, 0);
if (ret && !err)
err = ret;
iocount++;
block_start = block_start + iosize;
} else {
struct extent_state *cached = NULL;
set_extent_uptodate(tree, block_start, cur_end, &cached,
GFP_NOFS);
unlock_extent_cached(tree, block_start, cur_end,
&cached, GFP_NOFS);
block_start = cur_end + 1;
}
pg_offset = block_start & (PAGE_CACHE_SIZE - 1);
free_extent_map(em);
}
if (iocount) {
wait_extent_bit(tree, orig_block_start,
block_end, EXTENT_LOCKED);
}
check_page_uptodate(tree, page);
err:
/* FIXME, zero out newly allocated blocks on error */
return err;
}
/*
* a helper for releasepage, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
@@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
return try_release_extent_state(map, tree, page, mask);
}
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent)
{
struct inode *inode = mapping->host;
struct extent_state *cached_state = NULL;
u64 start = iblock << inode->i_blkbits;
sector_t sector = 0;
size_t blksize = (1 << inode->i_blkbits);
struct extent_map *em;
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
0, &cached_state, GFP_NOFS);
em = get_extent(inode, NULL, 0, start, blksize, 0);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
start + blksize - 1, &cached_state, GFP_NOFS);
if (IS_ERR_OR_NULL(em))
return 0;
if (em->block_start > EXTENT_MAP_LAST_BYTE)
goto out;
sector = (em->block_start + start - em->start) >> inode->i_blkbits;
out:
free_extent_map(em);
return sector;
}
/*
* helper function for fiemap, which doesn't want to see any holes.
* This maps until we find something past 'last'
@@ -3437,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
return 0;
}
int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
struct extent_buffer *eb)
{
return wait_on_extent_writeback(tree, eb->start,
eb->start + eb->len - 1);
}
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb)
{