[XFS] cluster rewrites We can cluster mapped pages aswell, this improves
performances on rewrites since we can reduce the number of allocator calls. SGI-PV: 947118 SGI-Modid: xfs-linux-melb:xfs-kern:203829a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
committed by
Nathan Scott
parent
7336cea8c2
commit
6c4fe19f66
@@ -470,13 +470,13 @@ xfs_map_at_offset(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Look for a page at index which is unlocked and not mapped
|
* Look for a page at index that is suitable for clustering.
|
||||||
* yet - clustering for mmap write case.
|
|
||||||
*/
|
*/
|
||||||
STATIC unsigned int
|
STATIC unsigned int
|
||||||
xfs_probe_unmapped_page(
|
xfs_probe_page(
|
||||||
struct page *page,
|
struct page *page,
|
||||||
unsigned int pg_offset)
|
unsigned int pg_offset,
|
||||||
|
int mapped)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@@ -489,25 +489,28 @@ xfs_probe_unmapped_page(
|
|||||||
|
|
||||||
bh = head = page_buffers(page);
|
bh = head = page_buffers(page);
|
||||||
do {
|
do {
|
||||||
if (buffer_mapped(bh) || !buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
|
break;
|
||||||
|
if (mapped != buffer_mapped(bh))
|
||||||
break;
|
break;
|
||||||
ret += bh->b_size;
|
ret += bh->b_size;
|
||||||
if (ret >= pg_offset)
|
if (ret >= pg_offset)
|
||||||
break;
|
break;
|
||||||
} while ((bh = bh->b_this_page) != head);
|
} while ((bh = bh->b_this_page) != head);
|
||||||
} else
|
} else
|
||||||
ret = PAGE_CACHE_SIZE;
|
ret = mapped ? 0 : PAGE_CACHE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC size_t
|
STATIC size_t
|
||||||
xfs_probe_unmapped_cluster(
|
xfs_probe_cluster(
|
||||||
struct inode *inode,
|
struct inode *inode,
|
||||||
struct page *startpage,
|
struct page *startpage,
|
||||||
struct buffer_head *bh,
|
struct buffer_head *bh,
|
||||||
struct buffer_head *head)
|
struct buffer_head *head,
|
||||||
|
int mapped)
|
||||||
{
|
{
|
||||||
struct pagevec pvec;
|
struct pagevec pvec;
|
||||||
pgoff_t tindex, tlast, tloff;
|
pgoff_t tindex, tlast, tloff;
|
||||||
@@ -516,7 +519,7 @@ xfs_probe_unmapped_cluster(
|
|||||||
|
|
||||||
/* First sum forwards in this page */
|
/* First sum forwards in this page */
|
||||||
do {
|
do {
|
||||||
if (buffer_mapped(bh))
|
if (mapped != buffer_mapped(bh))
|
||||||
return total;
|
return total;
|
||||||
total += bh->b_size;
|
total += bh->b_size;
|
||||||
} while ((bh = bh->b_this_page) != head);
|
} while ((bh = bh->b_this_page) != head);
|
||||||
@@ -550,7 +553,7 @@ xfs_probe_unmapped_cluster(
|
|||||||
pg_offset = PAGE_CACHE_SIZE;
|
pg_offset = PAGE_CACHE_SIZE;
|
||||||
|
|
||||||
if (page->index == tindex && !TestSetPageLocked(page)) {
|
if (page->index == tindex && !TestSetPageLocked(page)) {
|
||||||
len = xfs_probe_unmapped_page(page, pg_offset);
|
len = xfs_probe_page(page, pg_offset, mapped);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,6 +595,8 @@ xfs_is_delayed_page(
|
|||||||
acceptable = (type == IOMAP_UNWRITTEN);
|
acceptable = (type == IOMAP_UNWRITTEN);
|
||||||
else if (buffer_delay(bh))
|
else if (buffer_delay(bh))
|
||||||
acceptable = (type == IOMAP_DELAY);
|
acceptable = (type == IOMAP_DELAY);
|
||||||
|
else if (buffer_mapped(bh))
|
||||||
|
acceptable = (type == 0);
|
||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
} while ((bh = bh->b_this_page) != head);
|
} while ((bh = bh->b_this_page) != head);
|
||||||
@@ -804,6 +809,7 @@ xfs_page_state_convert(
|
|||||||
ssize_t size, len;
|
ssize_t size, len;
|
||||||
int flags, err, iomap_valid = 0, uptodate = 1;
|
int flags, err, iomap_valid = 0, uptodate = 1;
|
||||||
int page_dirty, count = 0, trylock_flag = 0;
|
int page_dirty, count = 0, trylock_flag = 0;
|
||||||
|
int all_bh = unmapped;
|
||||||
|
|
||||||
/* wait for other IO threads? */
|
/* wait for other IO threads? */
|
||||||
if (startio && wbc->sync_mode != WB_SYNC_NONE)
|
if (startio && wbc->sync_mode != WB_SYNC_NONE)
|
||||||
@@ -845,6 +851,8 @@ xfs_page_state_convert(
|
|||||||
|
|
||||||
bh = head = page_buffers(page);
|
bh = head = page_buffers(page);
|
||||||
offset = page_offset(page);
|
offset = page_offset(page);
|
||||||
|
flags = -1;
|
||||||
|
type = 0;
|
||||||
|
|
||||||
/* TODO: cleanup count and page_dirty */
|
/* TODO: cleanup count and page_dirty */
|
||||||
|
|
||||||
@@ -878,6 +886,12 @@ xfs_page_state_convert(
|
|||||||
if (buffer_unwritten(bh) || buffer_delay(bh) ||
|
if (buffer_unwritten(bh) || buffer_delay(bh) ||
|
||||||
((buffer_uptodate(bh) || PageUptodate(page)) &&
|
((buffer_uptodate(bh) || PageUptodate(page)) &&
|
||||||
!buffer_mapped(bh) && (unmapped || startio))) {
|
!buffer_mapped(bh) && (unmapped || startio))) {
|
||||||
|
/*
|
||||||
|
* Make sure we don't use a read-only iomap
|
||||||
|
*/
|
||||||
|
if (flags == BMAPI_READ)
|
||||||
|
iomap_valid = 0;
|
||||||
|
|
||||||
if (buffer_unwritten(bh)) {
|
if (buffer_unwritten(bh)) {
|
||||||
type = IOMAP_UNWRITTEN;
|
type = IOMAP_UNWRITTEN;
|
||||||
flags = BMAPI_WRITE|BMAPI_IGNSTATE;
|
flags = BMAPI_WRITE|BMAPI_IGNSTATE;
|
||||||
@@ -887,14 +901,14 @@ xfs_page_state_convert(
|
|||||||
if (!startio)
|
if (!startio)
|
||||||
flags |= trylock_flag;
|
flags |= trylock_flag;
|
||||||
} else {
|
} else {
|
||||||
type = 0;
|
type = IOMAP_NEW;
|
||||||
flags = BMAPI_WRITE|BMAPI_MMAP;
|
flags = BMAPI_WRITE|BMAPI_MMAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!iomap_valid) {
|
if (!iomap_valid) {
|
||||||
if (type == 0) {
|
if (type == IOMAP_NEW) {
|
||||||
size = xfs_probe_unmapped_cluster(inode,
|
size = xfs_probe_cluster(inode,
|
||||||
page, bh, head);
|
page, bh, head, 0);
|
||||||
} else {
|
} else {
|
||||||
size = len;
|
size = len;
|
||||||
}
|
}
|
||||||
@@ -921,10 +935,27 @@ xfs_page_state_convert(
|
|||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
} else if (buffer_uptodate(bh) && startio) {
|
} else if (buffer_uptodate(bh) && startio) {
|
||||||
type = 0;
|
/*
|
||||||
|
* we got here because the buffer is already mapped.
|
||||||
|
* That means it must already have extents allocated
|
||||||
|
* underneath it. Map the extent by reading it.
|
||||||
|
*/
|
||||||
|
if (!iomap_valid || type != 0) {
|
||||||
|
flags = BMAPI_READ;
|
||||||
|
size = xfs_probe_cluster(inode, page, bh,
|
||||||
|
head, 1);
|
||||||
|
err = xfs_map_blocks(inode, offset, size,
|
||||||
|
&iomap, flags);
|
||||||
|
if (err)
|
||||||
|
goto error;
|
||||||
|
iomap_valid = xfs_iomap_valid(&iomap, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
type = 0;
|
||||||
if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
|
if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
|
||||||
ASSERT(buffer_mapped(bh));
|
ASSERT(buffer_mapped(bh));
|
||||||
|
if (iomap_valid)
|
||||||
|
all_bh = 1;
|
||||||
xfs_add_to_ioend(inode, bh, offset, type,
|
xfs_add_to_ioend(inode, bh, offset, type,
|
||||||
&ioend, !iomap_valid);
|
&ioend, !iomap_valid);
|
||||||
page_dirty--;
|
page_dirty--;
|
||||||
@@ -953,7 +984,7 @@ xfs_page_state_convert(
|
|||||||
PAGE_CACHE_SHIFT;
|
PAGE_CACHE_SHIFT;
|
||||||
tlast = min_t(pgoff_t, offset, last_index);
|
tlast = min_t(pgoff_t, offset, last_index);
|
||||||
xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
|
xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
|
||||||
wbc, startio, unmapped, tlast);
|
wbc, startio, all_bh, tlast);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iohead)
|
if (iohead)
|
||||||
|
Reference in New Issue
Block a user