Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull ext3 & udf fixes from Jan Kara: "Shortlog pretty much says it all. The interesting bits are UDF support for direct IO and ext3 fix for a long standing oops in data=journal mode." * 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: jbd: Fix assertion failure in commit code due to lacking transaction credits UDF: Add support for O_DIRECT ext3: Replace 0 with NULL for pointer in super.c file udf: add writepages support for udf ext3: don't clear orphan list on ro mount with errors reiserfs: Make reiserfs_xattr_handlers static
This commit is contained in:
@@ -980,7 +980,7 @@ static int parse_options (char *options, struct super_block *sb,
|
|||||||
* Initialize args struct so we know whether arg was
|
* Initialize args struct so we know whether arg was
|
||||||
* found; some options take optional arguments.
|
* found; some options take optional arguments.
|
||||||
*/
|
*/
|
||||||
args[0].to = args[0].from = 0;
|
args[0].to = args[0].from = NULL;
|
||||||
token = match_token(p, tokens, args);
|
token = match_token(p, tokens, args);
|
||||||
switch (token) {
|
switch (token) {
|
||||||
case Opt_bsd_df:
|
case Opt_bsd_df:
|
||||||
@@ -1484,10 +1484,12 @@ static void ext3_orphan_cleanup (struct super_block * sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
|
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
|
||||||
if (es->s_last_orphan)
|
/* don't clear list on RO mount w/ errors */
|
||||||
|
if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
|
||||||
jbd_debug(1, "Errors on filesystem, "
|
jbd_debug(1, "Errors on filesystem, "
|
||||||
"clearing orphan list.\n");
|
"clearing orphan list.\n");
|
||||||
es->s_last_orphan = 0;
|
es->s_last_orphan = 0;
|
||||||
|
}
|
||||||
jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
|
jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@@ -86,7 +86,12 @@ nope:
|
|||||||
static void release_data_buffer(struct buffer_head *bh)
|
static void release_data_buffer(struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
if (buffer_freed(bh)) {
|
if (buffer_freed(bh)) {
|
||||||
|
WARN_ON_ONCE(buffer_dirty(bh));
|
||||||
clear_buffer_freed(bh);
|
clear_buffer_freed(bh);
|
||||||
|
clear_buffer_mapped(bh);
|
||||||
|
clear_buffer_new(bh);
|
||||||
|
clear_buffer_req(bh);
|
||||||
|
bh->b_bdev = NULL;
|
||||||
release_buffer_page(bh);
|
release_buffer_page(bh);
|
||||||
} else
|
} else
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
@@ -866,17 +871,35 @@ restart_loop:
|
|||||||
* there's no point in keeping a checkpoint record for
|
* there's no point in keeping a checkpoint record for
|
||||||
* it. */
|
* it. */
|
||||||
|
|
||||||
/* A buffer which has been freed while still being
|
/*
|
||||||
* journaled by a previous transaction may end up still
|
* A buffer which has been freed while still being journaled by
|
||||||
* being dirty here, but we want to avoid writing back
|
* a previous transaction.
|
||||||
* that buffer in the future after the "add to orphan"
|
*/
|
||||||
* operation been committed, That's not only a performance
|
if (buffer_freed(bh)) {
|
||||||
* gain, it also stops aliasing problems if the buffer is
|
/*
|
||||||
* left behind for writeback and gets reallocated for another
|
* If the running transaction is the one containing
|
||||||
* use in a different page. */
|
* "add to orphan" operation (b_next_transaction !=
|
||||||
if (buffer_freed(bh) && !jh->b_next_transaction) {
|
* NULL), we have to wait for that transaction to
|
||||||
clear_buffer_freed(bh);
|
* commit before we can really get rid of the buffer.
|
||||||
clear_buffer_jbddirty(bh);
|
* So just clear b_modified to not confuse transaction
|
||||||
|
* credit accounting and refile the buffer to
|
||||||
|
* BJ_Forget of the running transaction. If the just
|
||||||
|
* committed transaction contains "add to orphan"
|
||||||
|
* operation, we can completely invalidate the buffer
|
||||||
|
* now. We are rather throughout in that since the
|
||||||
|
* buffer may be still accessible when blocksize <
|
||||||
|
* pagesize and it is attached to the last partial
|
||||||
|
* page.
|
||||||
|
*/
|
||||||
|
jh->b_modified = 0;
|
||||||
|
if (!jh->b_next_transaction) {
|
||||||
|
clear_buffer_freed(bh);
|
||||||
|
clear_buffer_jbddirty(bh);
|
||||||
|
clear_buffer_mapped(bh);
|
||||||
|
clear_buffer_new(bh);
|
||||||
|
clear_buffer_req(bh);
|
||||||
|
bh->b_bdev = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buffer_jbddirty(bh)) {
|
if (buffer_jbddirty(bh)) {
|
||||||
|
@@ -1843,15 +1843,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
|
|||||||
* We're outside-transaction here. Either or both of j_running_transaction
|
* We're outside-transaction here. Either or both of j_running_transaction
|
||||||
* and j_committing_transaction may be NULL.
|
* and j_committing_transaction may be NULL.
|
||||||
*/
|
*/
|
||||||
static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
|
||||||
|
int partial_page)
|
||||||
{
|
{
|
||||||
transaction_t *transaction;
|
transaction_t *transaction;
|
||||||
struct journal_head *jh;
|
struct journal_head *jh;
|
||||||
int may_free = 1;
|
int may_free = 1;
|
||||||
int ret;
|
|
||||||
|
|
||||||
BUFFER_TRACE(bh, "entry");
|
BUFFER_TRACE(bh, "entry");
|
||||||
|
|
||||||
|
retry:
|
||||||
/*
|
/*
|
||||||
* It is safe to proceed here without the j_list_lock because the
|
* It is safe to proceed here without the j_list_lock because the
|
||||||
* buffers cannot be stolen by try_to_free_buffers as long as we are
|
* buffers cannot be stolen by try_to_free_buffers as long as we are
|
||||||
@@ -1879,10 +1880,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
|||||||
* clear the buffer dirty bit at latest at the moment when the
|
* clear the buffer dirty bit at latest at the moment when the
|
||||||
* transaction marking the buffer as freed in the filesystem
|
* transaction marking the buffer as freed in the filesystem
|
||||||
* structures is committed because from that moment on the
|
* structures is committed because from that moment on the
|
||||||
* buffer can be reallocated and used by a different page.
|
* block can be reallocated and used by a different page.
|
||||||
* Since the block hasn't been freed yet but the inode has
|
* Since the block hasn't been freed yet but the inode has
|
||||||
* already been added to orphan list, it is safe for us to add
|
* already been added to orphan list, it is safe for us to add
|
||||||
* the buffer to BJ_Forget list of the newest transaction.
|
* the buffer to BJ_Forget list of the newest transaction.
|
||||||
|
*
|
||||||
|
* Also we have to clear buffer_mapped flag of a truncated buffer
|
||||||
|
* because the buffer_head may be attached to the page straddling
|
||||||
|
* i_size (can happen only when blocksize < pagesize) and thus the
|
||||||
|
* buffer_head can be reused when the file is extended again. So we end
|
||||||
|
* up keeping around invalidated buffers attached to transactions'
|
||||||
|
* BJ_Forget list just to stop checkpointing code from cleaning up
|
||||||
|
* the transaction this buffer was modified in.
|
||||||
*/
|
*/
|
||||||
transaction = jh->b_transaction;
|
transaction = jh->b_transaction;
|
||||||
if (transaction == NULL) {
|
if (transaction == NULL) {
|
||||||
@@ -1909,13 +1918,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
|||||||
* committed, the buffer won't be needed any
|
* committed, the buffer won't be needed any
|
||||||
* longer. */
|
* longer. */
|
||||||
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
|
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
|
||||||
ret = __dispose_buffer(jh,
|
may_free = __dispose_buffer(jh,
|
||||||
journal->j_running_transaction);
|
journal->j_running_transaction);
|
||||||
journal_put_journal_head(jh);
|
goto zap_buffer;
|
||||||
spin_unlock(&journal->j_list_lock);
|
|
||||||
jbd_unlock_bh_state(bh);
|
|
||||||
spin_unlock(&journal->j_state_lock);
|
|
||||||
return ret;
|
|
||||||
} else {
|
} else {
|
||||||
/* There is no currently-running transaction. So the
|
/* There is no currently-running transaction. So the
|
||||||
* orphan record which we wrote for this file must have
|
* orphan record which we wrote for this file must have
|
||||||
@@ -1923,13 +1928,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
|||||||
* the committing transaction, if it exists. */
|
* the committing transaction, if it exists. */
|
||||||
if (journal->j_committing_transaction) {
|
if (journal->j_committing_transaction) {
|
||||||
JBUFFER_TRACE(jh, "give to committing trans");
|
JBUFFER_TRACE(jh, "give to committing trans");
|
||||||
ret = __dispose_buffer(jh,
|
may_free = __dispose_buffer(jh,
|
||||||
journal->j_committing_transaction);
|
journal->j_committing_transaction);
|
||||||
journal_put_journal_head(jh);
|
goto zap_buffer;
|
||||||
spin_unlock(&journal->j_list_lock);
|
|
||||||
jbd_unlock_bh_state(bh);
|
|
||||||
spin_unlock(&journal->j_state_lock);
|
|
||||||
return ret;
|
|
||||||
} else {
|
} else {
|
||||||
/* The orphan record's transaction has
|
/* The orphan record's transaction has
|
||||||
* committed. We can cleanse this buffer */
|
* committed. We can cleanse this buffer */
|
||||||
@@ -1950,10 +1951,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
|||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* The buffer is committing, we simply cannot touch
|
* The buffer is committing, we simply cannot touch
|
||||||
* it. So we just set j_next_transaction to the
|
* it. If the page is straddling i_size we have to wait
|
||||||
* running transaction (if there is one) and mark
|
* for commit and try again.
|
||||||
* buffer as freed so that commit code knows it should
|
*/
|
||||||
* clear dirty bits when it is done with the buffer.
|
if (partial_page) {
|
||||||
|
tid_t tid = journal->j_committing_transaction->t_tid;
|
||||||
|
|
||||||
|
journal_put_journal_head(jh);
|
||||||
|
spin_unlock(&journal->j_list_lock);
|
||||||
|
jbd_unlock_bh_state(bh);
|
||||||
|
spin_unlock(&journal->j_state_lock);
|
||||||
|
log_wait_commit(journal, tid);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* OK, buffer won't be reachable after truncate. We just set
|
||||||
|
* j_next_transaction to the running transaction (if there is
|
||||||
|
* one) and mark buffer as freed so that commit code knows it
|
||||||
|
* should clear dirty bits when it is done with the buffer.
|
||||||
*/
|
*/
|
||||||
set_buffer_freed(bh);
|
set_buffer_freed(bh);
|
||||||
if (journal->j_running_transaction && buffer_jbddirty(bh))
|
if (journal->j_running_transaction && buffer_jbddirty(bh))
|
||||||
@@ -1976,6 +1991,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
|
|||||||
}
|
}
|
||||||
|
|
||||||
zap_buffer:
|
zap_buffer:
|
||||||
|
/*
|
||||||
|
* This is tricky. Although the buffer is truncated, it may be reused
|
||||||
|
* if blocksize < pagesize and it is attached to the page straddling
|
||||||
|
* EOF. Since the buffer might have been added to BJ_Forget list of the
|
||||||
|
* running transaction, journal_get_write_access() won't clear
|
||||||
|
* b_modified and credit accounting gets confused. So clear b_modified
|
||||||
|
* here. */
|
||||||
|
jh->b_modified = 0;
|
||||||
journal_put_journal_head(jh);
|
journal_put_journal_head(jh);
|
||||||
zap_buffer_no_jh:
|
zap_buffer_no_jh:
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
@@ -2024,7 +2047,8 @@ void journal_invalidatepage(journal_t *journal,
|
|||||||
if (offset <= curr_off) {
|
if (offset <= curr_off) {
|
||||||
/* This block is wholly outside the truncation point */
|
/* This block is wholly outside the truncation point */
|
||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
may_free &= journal_unmap_buffer(journal, bh);
|
may_free &= journal_unmap_buffer(journal, bh,
|
||||||
|
offset > 0);
|
||||||
unlock_buffer(bh);
|
unlock_buffer(bh);
|
||||||
}
|
}
|
||||||
curr_off = next_off;
|
curr_off = next_off;
|
||||||
|
@@ -896,7 +896,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Actual operations that are exported to VFS-land */
|
/* Actual operations that are exported to VFS-land */
|
||||||
const struct xattr_handler *reiserfs_xattr_handlers[] = {
|
static const struct xattr_handler *reiserfs_xattr_handlers[] = {
|
||||||
#ifdef CONFIG_REISERFS_FS_XATTR
|
#ifdef CONFIG_REISERFS_FS_XATTR
|
||||||
&reiserfs_xattr_user_handler,
|
&reiserfs_xattr_user_handler,
|
||||||
&reiserfs_xattr_trusted_handler,
|
&reiserfs_xattr_trusted_handler,
|
||||||
|
@@ -118,11 +118,20 @@ static int udf_adinicb_write_end(struct file *file,
|
|||||||
return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
|
return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
|
||||||
|
const struct iovec *iov,
|
||||||
|
loff_t offset, unsigned long nr_segs)
|
||||||
|
{
|
||||||
|
/* Fallback to buffered I/O. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
const struct address_space_operations udf_adinicb_aops = {
|
const struct address_space_operations udf_adinicb_aops = {
|
||||||
.readpage = udf_adinicb_readpage,
|
.readpage = udf_adinicb_readpage,
|
||||||
.writepage = udf_adinicb_writepage,
|
.writepage = udf_adinicb_writepage,
|
||||||
.write_begin = udf_adinicb_write_begin,
|
.write_begin = udf_adinicb_write_begin,
|
||||||
.write_end = udf_adinicb_write_end,
|
.write_end = udf_adinicb_write_end,
|
||||||
|
.direct_IO = udf_adinicb_direct_IO,
|
||||||
};
|
};
|
||||||
|
|
||||||
static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
@@ -95,11 +95,33 @@ void udf_evict_inode(struct inode *inode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void udf_write_failed(struct address_space *mapping, loff_t to)
|
||||||
|
{
|
||||||
|
struct inode *inode = mapping->host;
|
||||||
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
||||||
|
loff_t isize = inode->i_size;
|
||||||
|
|
||||||
|
if (to > isize) {
|
||||||
|
truncate_pagecache(inode, to, isize);
|
||||||
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
||||||
|
down_write(&iinfo->i_data_sem);
|
||||||
|
udf_truncate_extents(inode);
|
||||||
|
up_write(&iinfo->i_data_sem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int udf_writepage(struct page *page, struct writeback_control *wbc)
|
static int udf_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return block_write_full_page(page, udf_get_block, wbc);
|
return block_write_full_page(page, udf_get_block, wbc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int udf_writepages(struct address_space *mapping,
|
||||||
|
struct writeback_control *wbc)
|
||||||
|
{
|
||||||
|
return mpage_writepages(mapping, wbc, udf_get_block);
|
||||||
|
}
|
||||||
|
|
||||||
static int udf_readpage(struct file *file, struct page *page)
|
static int udf_readpage(struct file *file, struct page *page)
|
||||||
{
|
{
|
||||||
return mpage_readpage(page, udf_get_block);
|
return mpage_readpage(page, udf_get_block);
|
||||||
@@ -118,21 +140,24 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
|
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret))
|
||||||
struct inode *inode = mapping->host;
|
udf_write_failed(mapping, pos + len);
|
||||||
struct udf_inode_info *iinfo = UDF_I(inode);
|
return ret;
|
||||||
loff_t isize = inode->i_size;
|
}
|
||||||
|
|
||||||
if (pos + len > isize) {
|
static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
|
||||||
truncate_pagecache(inode, pos + len, isize);
|
const struct iovec *iov,
|
||||||
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
loff_t offset, unsigned long nr_segs)
|
||||||
down_write(&iinfo->i_data_sem);
|
{
|
||||||
udf_truncate_extents(inode);
|
struct file *file = iocb->ki_filp;
|
||||||
up_write(&iinfo->i_data_sem);
|
struct address_space *mapping = file->f_mapping;
|
||||||
}
|
struct inode *inode = mapping->host;
|
||||||
}
|
ssize_t ret;
|
||||||
}
|
|
||||||
|
|
||||||
|
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||||
|
udf_get_block);
|
||||||
|
if (unlikely(ret < 0 && (rw & WRITE)))
|
||||||
|
udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,8 +170,10 @@ const struct address_space_operations udf_aops = {
|
|||||||
.readpage = udf_readpage,
|
.readpage = udf_readpage,
|
||||||
.readpages = udf_readpages,
|
.readpages = udf_readpages,
|
||||||
.writepage = udf_writepage,
|
.writepage = udf_writepage,
|
||||||
.write_begin = udf_write_begin,
|
.writepages = udf_writepages,
|
||||||
.write_end = generic_write_end,
|
.write_begin = udf_write_begin,
|
||||||
|
.write_end = generic_write_end,
|
||||||
|
.direct_IO = udf_direct_IO,
|
||||||
.bmap = udf_bmap,
|
.bmap = udf_bmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user