vfs: Move syncing code from super.c to sync.c (version 4)
Move sync_filesystems(), __fsync_super(), fsync_super() from super.c to sync.c where it fits better. [build fixes folded] Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
85
fs/super.c
85
fs/super.c
@@ -283,42 +283,6 @@ void unlock_super(struct super_block * sb)
|
|||||||
EXPORT_SYMBOL(lock_super);
|
EXPORT_SYMBOL(lock_super);
|
||||||
EXPORT_SYMBOL(unlock_super);
|
EXPORT_SYMBOL(unlock_super);
|
||||||
|
|
||||||
/*
|
|
||||||
* Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
|
|
||||||
* just dirties buffers with inodes so we have to submit IO for these buffers
|
|
||||||
* via __sync_blockdev(). This also speeds up the wait == 1 case since in that
|
|
||||||
* case write_inode() functions do sync_dirty_buffer() and thus effectively
|
|
||||||
* write one block at a time.
|
|
||||||
*/
|
|
||||||
static int __fsync_super(struct super_block *sb, int wait)
|
|
||||||
{
|
|
||||||
vfs_dq_sync(sb);
|
|
||||||
sync_inodes_sb(sb, wait);
|
|
||||||
lock_super(sb);
|
|
||||||
if (sb->s_dirt && sb->s_op->write_super)
|
|
||||||
sb->s_op->write_super(sb);
|
|
||||||
unlock_super(sb);
|
|
||||||
if (sb->s_op->sync_fs)
|
|
||||||
sb->s_op->sync_fs(sb, wait);
|
|
||||||
return __sync_blockdev(sb->s_bdev, wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Write out and wait upon all dirty data associated with this
|
|
||||||
* superblock. Filesystem data as well as the underlying block
|
|
||||||
* device. Takes the superblock lock.
|
|
||||||
*/
|
|
||||||
int fsync_super(struct super_block *sb)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = __fsync_super(sb, 0);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
return __fsync_super(sb, 1);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(fsync_super);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* generic_shutdown_super - common helper for ->kill_sb()
|
* generic_shutdown_super - common helper for ->kill_sb()
|
||||||
* @sb: superblock to kill
|
* @sb: superblock to kill
|
||||||
@@ -473,55 +437,6 @@ restart:
|
|||||||
spin_unlock(&sb_lock);
|
spin_unlock(&sb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Sync all the data for all the filesystems (called by sys_sync() and
|
|
||||||
* emergency sync)
|
|
||||||
*
|
|
||||||
* This operation is careful to avoid the livelock which could easily happen
|
|
||||||
* if two or more filesystems are being continuously dirtied. s_need_sync
|
|
||||||
* is used only here. We set it against all filesystems and then clear it as
|
|
||||||
* we sync them. So redirtied filesystems are skipped.
|
|
||||||
*
|
|
||||||
* But if process A is currently running sync_filesystems and then process B
|
|
||||||
* calls sync_filesystems as well, process B will set all the s_need_sync
|
|
||||||
* flags again, which will cause process A to resync everything. Fix that with
|
|
||||||
* a local mutex.
|
|
||||||
*/
|
|
||||||
void sync_filesystems(int wait)
|
|
||||||
{
|
|
||||||
struct super_block *sb;
|
|
||||||
static DEFINE_MUTEX(mutex);
|
|
||||||
|
|
||||||
mutex_lock(&mutex); /* Could be down_interruptible */
|
|
||||||
spin_lock(&sb_lock);
|
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
continue;
|
|
||||||
sb->s_need_sync = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
restart:
|
|
||||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
|
||||||
if (!sb->s_need_sync)
|
|
||||||
continue;
|
|
||||||
sb->s_need_sync = 0;
|
|
||||||
if (sb->s_flags & MS_RDONLY)
|
|
||||||
continue; /* hm. Was remounted r/o meanwhile */
|
|
||||||
sb->s_count++;
|
|
||||||
spin_unlock(&sb_lock);
|
|
||||||
down_read(&sb->s_umount);
|
|
||||||
if (sb->s_root)
|
|
||||||
__fsync_super(sb, wait);
|
|
||||||
up_read(&sb->s_umount);
|
|
||||||
/* restart only when sb is no longer on the list */
|
|
||||||
spin_lock(&sb_lock);
|
|
||||||
if (__put_super_and_need_restart(sb))
|
|
||||||
goto restart;
|
|
||||||
}
|
|
||||||
spin_unlock(&sb_lock);
|
|
||||||
mutex_unlock(&mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_super - get the superblock of a device
|
* get_super - get the superblock of a device
|
||||||
* @bdev: device to get the superblock for
|
* @bdev: device to get the superblock for
|
||||||
|
85
fs/sync.c
85
fs/sync.c
@@ -18,6 +18,91 @@
|
|||||||
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
|
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
|
||||||
SYNC_FILE_RANGE_WAIT_AFTER)
|
SYNC_FILE_RANGE_WAIT_AFTER)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
|
||||||
|
* just dirties buffers with inodes so we have to submit IO for these buffers
|
||||||
|
* via __sync_blockdev(). This also speeds up the wait == 1 case since in that
|
||||||
|
* case write_inode() functions do sync_dirty_buffer() and thus effectively
|
||||||
|
* write one block at a time.
|
||||||
|
*/
|
||||||
|
static int __fsync_super(struct super_block *sb, int wait)
|
||||||
|
{
|
||||||
|
vfs_dq_sync(sb);
|
||||||
|
sync_inodes_sb(sb, wait);
|
||||||
|
lock_super(sb);
|
||||||
|
if (sb->s_dirt && sb->s_op->write_super)
|
||||||
|
sb->s_op->write_super(sb);
|
||||||
|
unlock_super(sb);
|
||||||
|
if (sb->s_op->sync_fs)
|
||||||
|
sb->s_op->sync_fs(sb, wait);
|
||||||
|
return __sync_blockdev(sb->s_bdev, wait);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write out and wait upon all dirty data associated with this
|
||||||
|
* superblock. Filesystem data as well as the underlying block
|
||||||
|
* device. Takes the superblock lock.
|
||||||
|
*/
|
||||||
|
int fsync_super(struct super_block *sb)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __fsync_super(sb, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
return __fsync_super(sb, 1);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(fsync_super);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sync all the data for all the filesystems (called by sys_sync() and
|
||||||
|
* emergency sync)
|
||||||
|
*
|
||||||
|
* This operation is careful to avoid the livelock which could easily happen
|
||||||
|
* if two or more filesystems are being continuously dirtied. s_need_sync
|
||||||
|
* is used only here. We set it against all filesystems and then clear it as
|
||||||
|
* we sync them. So redirtied filesystems are skipped.
|
||||||
|
*
|
||||||
|
* But if process A is currently running sync_filesystems and then process B
|
||||||
|
* calls sync_filesystems as well, process B will set all the s_need_sync
|
||||||
|
* flags again, which will cause process A to resync everything. Fix that with
|
||||||
|
* a local mutex.
|
||||||
|
*/
|
||||||
|
static void sync_filesystems(int wait)
|
||||||
|
{
|
||||||
|
struct super_block *sb;
|
||||||
|
static DEFINE_MUTEX(mutex);
|
||||||
|
|
||||||
|
mutex_lock(&mutex); /* Could be down_interruptible */
|
||||||
|
spin_lock(&sb_lock);
|
||||||
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
|
if (sb->s_flags & MS_RDONLY)
|
||||||
|
continue;
|
||||||
|
sb->s_need_sync = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
restart:
|
||||||
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||||
|
if (!sb->s_need_sync)
|
||||||
|
continue;
|
||||||
|
sb->s_need_sync = 0;
|
||||||
|
if (sb->s_flags & MS_RDONLY)
|
||||||
|
continue; /* hm. Was remounted r/o meanwhile */
|
||||||
|
sb->s_count++;
|
||||||
|
spin_unlock(&sb_lock);
|
||||||
|
down_read(&sb->s_umount);
|
||||||
|
if (sb->s_root)
|
||||||
|
__fsync_super(sb, wait);
|
||||||
|
up_read(&sb->s_umount);
|
||||||
|
/* restart only when sb is no longer on the list */
|
||||||
|
spin_lock(&sb_lock);
|
||||||
|
if (__put_super_and_need_restart(sb))
|
||||||
|
goto restart;
|
||||||
|
}
|
||||||
|
spin_unlock(&sb_lock);
|
||||||
|
mutex_unlock(&mutex);
|
||||||
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE0(sync)
|
SYSCALL_DEFINE0(sync)
|
||||||
{
|
{
|
||||||
sync_filesystems(0);
|
sync_filesystems(0);
|
||||||
|
@@ -1942,7 +1942,6 @@ extern struct super_block *freeze_bdev(struct block_device *);
|
|||||||
extern void emergency_thaw_all(void);
|
extern void emergency_thaw_all(void);
|
||||||
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
|
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
|
||||||
extern int fsync_bdev(struct block_device *);
|
extern int fsync_bdev(struct block_device *);
|
||||||
extern int fsync_super(struct super_block *);
|
|
||||||
extern int fsync_no_super(struct block_device *);
|
extern int fsync_no_super(struct block_device *);
|
||||||
#else
|
#else
|
||||||
static inline void bd_forget(struct inode *inode) {}
|
static inline void bd_forget(struct inode *inode) {}
|
||||||
@@ -1959,6 +1958,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
extern int fsync_super(struct super_block *);
|
||||||
extern const struct file_operations def_blk_fops;
|
extern const struct file_operations def_blk_fops;
|
||||||
extern const struct file_operations def_chr_fops;
|
extern const struct file_operations def_chr_fops;
|
||||||
extern const struct file_operations bad_sock_fops;
|
extern const struct file_operations bad_sock_fops;
|
||||||
@@ -2077,7 +2077,6 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
|
|||||||
|
|
||||||
extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync);
|
extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync);
|
||||||
extern void sync_supers(void);
|
extern void sync_supers(void);
|
||||||
extern void sync_filesystems(int wait);
|
|
||||||
extern void emergency_sync(void);
|
extern void emergency_sync(void);
|
||||||
extern void emergency_remount(void);
|
extern void emergency_remount(void);
|
||||||
extern int do_remount_sb(struct super_block *sb, int flags,
|
extern int do_remount_sb(struct super_block *sb, int flags,
|
||||||
|
Reference in New Issue
Block a user