New locking/refcounting for fs_struct
* all changes of current->fs are done under task_lock and write_lock of old fs->lock * refcount is not atomic anymore (same protection) * its decrements are done when removing reference from current; at the same time we decide whether to free it. * put_fs_struct() is gone * new field - ->in_exec. Set by check_unsafe_exec() if we are trying to do execve() and only subthreads share fs_struct. Cleared when finishing exec (success and failure alike). Makes CLONE_FS fail with -EAGAIN if set. * check_unsafe_exec() may fail with -EAGAIN if another execve() from subthread is in progress. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
@@ -72,25 +72,27 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
|
||||
path_put(old_root);
|
||||
}
|
||||
|
||||
void put_fs_struct(struct fs_struct *fs)
|
||||
void free_fs_struct(struct fs_struct *fs)
|
||||
{
|
||||
/* No need to hold fs->lock if we are killing it */
|
||||
if (atomic_dec_and_test(&fs->count)) {
|
||||
path_put(&fs->root);
|
||||
path_put(&fs->pwd);
|
||||
kmem_cache_free(fs_cachep, fs);
|
||||
}
|
||||
path_put(&fs->root);
|
||||
path_put(&fs->pwd);
|
||||
kmem_cache_free(fs_cachep, fs);
|
||||
}
|
||||
|
||||
void exit_fs(struct task_struct *tsk)
|
||||
{
|
||||
struct fs_struct * fs = tsk->fs;
|
||||
struct fs_struct *fs = tsk->fs;
|
||||
|
||||
if (fs) {
|
||||
int kill;
|
||||
task_lock(tsk);
|
||||
write_lock(&fs->lock);
|
||||
tsk->fs = NULL;
|
||||
kill = !--fs->users;
|
||||
write_unlock(&fs->lock);
|
||||
task_unlock(tsk);
|
||||
put_fs_struct(fs);
|
||||
if (kill)
|
||||
free_fs_struct(fs);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +101,8 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
||||
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
|
||||
/* We don't need to lock fs - think why ;-) */
|
||||
if (fs) {
|
||||
atomic_set(&fs->count, 1);
|
||||
fs->users = 1;
|
||||
fs->in_exec = 0;
|
||||
rwlock_init(&fs->lock);
|
||||
fs->umask = old->umask;
|
||||
read_lock(&old->lock);
|
||||
@@ -114,28 +117,54 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
||||
|
||||
int unshare_fs_struct(void)
|
||||
{
|
||||
struct fs_struct *fsp = copy_fs_struct(current->fs);
|
||||
if (!fsp)
|
||||
struct fs_struct *fs = current->fs;
|
||||
struct fs_struct *new_fs = copy_fs_struct(fs);
|
||||
int kill;
|
||||
|
||||
if (!new_fs)
|
||||
return -ENOMEM;
|
||||
exit_fs(current);
|
||||
current->fs = fsp;
|
||||
|
||||
task_lock(current);
|
||||
write_lock(&fs->lock);
|
||||
kill = !--fs->users;
|
||||
current->fs = new_fs;
|
||||
write_unlock(&fs->lock);
|
||||
task_unlock(current);
|
||||
|
||||
if (kill)
|
||||
free_fs_struct(fs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unshare_fs_struct);
|
||||
|
||||
/* to be mentioned only in INIT_TASK */
|
||||
struct fs_struct init_fs = {
|
||||
.count = ATOMIC_INIT(1),
|
||||
.users = 1,
|
||||
.lock = __RW_LOCK_UNLOCKED(init_fs.lock),
|
||||
.umask = 0022,
|
||||
};
|
||||
|
||||
void daemonize_fs_struct(void)
|
||||
{
|
||||
struct fs_struct *fs;
|
||||
struct fs_struct *fs = current->fs;
|
||||
|
||||
exit_fs(current); /* current->fs->count--; */
|
||||
fs = &init_fs;
|
||||
current->fs = fs;
|
||||
atomic_inc(&fs->count);
|
||||
if (fs) {
|
||||
int kill;
|
||||
|
||||
task_lock(current);
|
||||
|
||||
write_lock(&init_fs.lock);
|
||||
init_fs.users++;
|
||||
write_unlock(&init_fs.lock);
|
||||
|
||||
write_lock(&fs->lock);
|
||||
current->fs = &init_fs;
|
||||
kill = !--fs->users;
|
||||
write_unlock(&fs->lock);
|
||||
|
||||
task_unlock(current);
|
||||
if (kill)
|
||||
free_fs_struct(fs);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user