Merge branch 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block: (65 commits) Documentation/iostats.txt: bit-size reference etc. cfq-iosched: removing unnecessary think time checking cfq-iosched: Don't clear queue stats when preempt. blk-throttle: Reset group slice when limits are changed blk-cgroup: Only give unaccounted_time under debug cfq-iosched: Don't set active queue in preempt block: fix non-atomic access to genhd inflight structures block: attempt to merge with existing requests on plug flush block: NULL dereference on error path in __blkdev_get() cfq-iosched: Don't update group weights when on service tree fs: assign sb->s_bdi to default_backing_dev_info if the bdi is going away block: Require subsystems to explicitly allocate bio_set integrity mempool jbd2: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging jbd: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging fs: make fsync_buffers_list() plug mm: make generic_writepages() use plugging blk-cgroup: Add unaccounted time to timeslice_used. block: fixup plugging stubs for !CONFIG_BLOCK block: remove obsolete comments for blkdev_issue_zeroout. blktrace: Use rq->cmd_flags directly in blk_add_trace_rq. ... Fix up conflicts in fs/{aio.c,super.c}
This commit is contained in:
74
mm/filemap.c
74
mm/filemap.c
@@ -164,45 +164,15 @@ void delete_from_page_cache(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(delete_from_page_cache);
|
||||
|
||||
static int sync_page(void *word)
|
||||
static int sleep_on_page(void *word)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
struct page *page;
|
||||
|
||||
page = container_of((unsigned long *)word, struct page, flags);
|
||||
|
||||
/*
|
||||
* page_mapping() is being called without PG_locked held.
|
||||
* Some knowledge of the state and use of the page is used to
|
||||
* reduce the requirements down to a memory barrier.
|
||||
* The danger here is of a stale page_mapping() return value
|
||||
* indicating a struct address_space different from the one it's
|
||||
* associated with when it is associated with one.
|
||||
* After smp_mb(), it's either the correct page_mapping() for
|
||||
* the page, or an old page_mapping() and the page's own
|
||||
* page_mapping() has gone NULL.
|
||||
* The ->sync_page() address_space operation must tolerate
|
||||
* page_mapping() going NULL. By an amazing coincidence,
|
||||
* this comes about because none of the users of the page
|
||||
* in the ->sync_page() methods make essential use of the
|
||||
* page_mapping(), merely passing the page down to the backing
|
||||
* device's unplug functions when it's non-NULL, which in turn
|
||||
* ignore it for all cases but swap, where only page_private(page) is
|
||||
* of interest. When page_mapping() does go NULL, the entire
|
||||
* call stack gracefully ignores the page and returns.
|
||||
* -- wli
|
||||
*/
|
||||
smp_mb();
|
||||
mapping = page_mapping(page);
|
||||
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
|
||||
mapping->a_ops->sync_page(page);
|
||||
io_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sync_page_killable(void *word)
|
||||
static int sleep_on_page_killable(void *word)
|
||||
{
|
||||
sync_page(word);
|
||||
sleep_on_page(word);
|
||||
return fatal_signal_pending(current) ? -EINTR : 0;
|
||||
}
|
||||
|
||||
@@ -558,12 +528,6 @@ struct page *__page_cache_alloc(gfp_t gfp)
|
||||
EXPORT_SYMBOL(__page_cache_alloc);
|
||||
#endif
|
||||
|
||||
static int __sleep_on_page_lock(void *word)
|
||||
{
|
||||
io_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In order to wait for pages to become available there must be
|
||||
* waitqueues associated with pages. By using a hash table of
|
||||
@@ -591,7 +555,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
|
||||
|
||||
if (test_bit(bit_nr, &page->flags))
|
||||
__wait_on_bit(page_waitqueue(page), &wait, sync_page,
|
||||
__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(wait_on_page_bit);
|
||||
@@ -655,17 +619,12 @@ EXPORT_SYMBOL(end_page_writeback);
|
||||
/**
|
||||
* __lock_page - get a lock on the page, assuming we need to sleep to get it
|
||||
* @page: the page to lock
|
||||
*
|
||||
* Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
|
||||
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
|
||||
* chances are that on the second loop, the block layer's plug list is empty,
|
||||
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
|
||||
*/
|
||||
void __lock_page(struct page *page)
|
||||
{
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||
|
||||
__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
|
||||
__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(__lock_page);
|
||||
@@ -675,24 +634,10 @@ int __lock_page_killable(struct page *page)
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||
|
||||
return __wait_on_bit_lock(page_waitqueue(page), &wait,
|
||||
sync_page_killable, TASK_KILLABLE);
|
||||
sleep_on_page_killable, TASK_KILLABLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__lock_page_killable);
|
||||
|
||||
/**
|
||||
* __lock_page_nosync - get a lock on the page, without calling sync_page()
|
||||
* @page: the page to lock
|
||||
*
|
||||
* Variant of lock_page that does not require the caller to hold a reference
|
||||
* on the page's mapping.
|
||||
*/
|
||||
void __lock_page_nosync(struct page *page)
|
||||
{
|
||||
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
||||
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||
unsigned int flags)
|
||||
{
|
||||
@@ -1407,12 +1352,15 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long seg = 0;
|
||||
size_t count;
|
||||
loff_t *ppos = &iocb->ki_pos;
|
||||
struct blk_plug plug;
|
||||
|
||||
count = 0;
|
||||
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
|
||||
if (filp->f_flags & O_DIRECT) {
|
||||
loff_t size;
|
||||
@@ -1485,6 +1433,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
break;
|
||||
}
|
||||
out:
|
||||
blk_finish_plug(&plug);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(generic_file_aio_read);
|
||||
@@ -2596,11 +2545,13 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct blk_plug plug;
|
||||
ssize_t ret;
|
||||
|
||||
BUG_ON(iocb->ki_pos != pos);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
blk_start_plug(&plug);
|
||||
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
@@ -2611,6 +2562,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
if (err < 0 && ret > 0)
|
||||
ret = err;
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(generic_file_aio_write);
|
||||
|
Reference in New Issue
Block a user