Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: SUNRPC: Replace flush_workqueue() with cancel_work_sync() and friends NFS: Replace flush_scheduled_work with cancel_work_sync() and friends SUNRPC: Don't call gss_delete_sec_context() from an rcu context NFSv4: Don't call put_rpccred() from an rcu callback NFS: Fix NFSv4 open stateid regressions NFSv4: Fix a locking regression in nfs4_set_mode_locked() NFS: Fix put_nfs_open_context SUNRPC: Fix a race in rpciod_down()
This commit is contained in:
@@ -20,10 +20,8 @@
|
|||||||
#include "delegation.h"
|
#include "delegation.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static void nfs_free_delegation(struct nfs_delegation *delegation)
|
static void nfs_do_free_delegation(struct nfs_delegation *delegation)
|
||||||
{
|
{
|
||||||
if (delegation->cred)
|
|
||||||
put_rpccred(delegation->cred);
|
|
||||||
kfree(delegation);
|
kfree(delegation);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,7 +29,18 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
|
|||||||
{
|
{
|
||||||
struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
|
struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
|
||||||
|
|
||||||
nfs_free_delegation(delegation);
|
nfs_do_free_delegation(delegation);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nfs_free_delegation(struct nfs_delegation *delegation)
|
||||||
|
{
|
||||||
|
struct rpc_cred *cred;
|
||||||
|
|
||||||
|
cred = rcu_dereference(delegation->cred);
|
||||||
|
rcu_assign_pointer(delegation->cred, NULL);
|
||||||
|
call_rcu(&delegation->rcu, nfs_free_delegation_callback);
|
||||||
|
if (cred)
|
||||||
|
put_rpccred(cred);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
|
static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
|
||||||
@@ -166,7 +175,7 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
|
|||||||
int res = 0;
|
int res = 0;
|
||||||
|
|
||||||
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
|
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
|
||||||
call_rcu(&delegation->rcu, nfs_free_delegation_callback);
|
nfs_free_delegation(delegation);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -448,7 +457,7 @@ restart:
|
|||||||
spin_unlock(&clp->cl_lock);
|
spin_unlock(&clp->cl_lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (delegation != NULL)
|
if (delegation != NULL)
|
||||||
call_rcu(&delegation->rcu, nfs_free_delegation_callback);
|
nfs_free_delegation(delegation);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@@ -468,7 +468,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
|
|||||||
ctx->lockowner = current->files;
|
ctx->lockowner = current->files;
|
||||||
ctx->error = 0;
|
ctx->error = 0;
|
||||||
ctx->dir_cookie = 0;
|
ctx->dir_cookie = 0;
|
||||||
kref_init(&ctx->kref);
|
atomic_set(&ctx->count, 1);
|
||||||
}
|
}
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
@@ -476,21 +476,18 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
|
|||||||
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
|
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx != NULL)
|
if (ctx != NULL)
|
||||||
kref_get(&ctx->kref);
|
atomic_inc(&ctx->count);
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs_free_open_context(struct kref *kref)
|
void put_nfs_open_context(struct nfs_open_context *ctx)
|
||||||
{
|
{
|
||||||
struct nfs_open_context *ctx = container_of(kref,
|
struct inode *inode = ctx->path.dentry->d_inode;
|
||||||
struct nfs_open_context, kref);
|
|
||||||
|
|
||||||
if (!list_empty(&ctx->list)) {
|
if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
|
||||||
struct inode *inode = ctx->path.dentry->d_inode;
|
return;
|
||||||
spin_lock(&inode->i_lock);
|
list_del(&ctx->list);
|
||||||
list_del(&ctx->list);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode->i_lock);
|
|
||||||
}
|
|
||||||
if (ctx->state != NULL)
|
if (ctx->state != NULL)
|
||||||
nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
|
nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
|
||||||
if (ctx->cred != NULL)
|
if (ctx->cred != NULL)
|
||||||
@@ -500,11 +497,6 @@ static void nfs_free_open_context(struct kref *kref)
|
|||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void put_nfs_open_context(struct nfs_open_context *ctx)
|
|
||||||
{
|
|
||||||
kref_put(&ctx->kref, nfs_free_open_context);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that mmap has a recent RPC credential for use when writing out
|
* Ensure that mmap has a recent RPC credential for use when writing out
|
||||||
* shared pages
|
* shared pages
|
||||||
|
@@ -175,10 +175,8 @@ static void nfs_expire_automounts(struct work_struct *work)
|
|||||||
|
|
||||||
void nfs_release_automount_timer(void)
|
void nfs_release_automount_timer(void)
|
||||||
{
|
{
|
||||||
if (list_empty(&nfs_automount_list)) {
|
if (list_empty(&nfs_automount_list))
|
||||||
cancel_delayed_work(&nfs_automount_task);
|
cancel_delayed_work_sync(&nfs_automount_task);
|
||||||
flush_scheduled_work();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -332,11 +332,9 @@ static int can_open_cached(struct nfs4_state *state, int mode)
|
|||||||
switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) {
|
switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) {
|
||||||
case FMODE_READ:
|
case FMODE_READ:
|
||||||
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
|
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
|
||||||
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
|
|
||||||
break;
|
break;
|
||||||
case FMODE_WRITE:
|
case FMODE_WRITE:
|
||||||
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
|
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
|
||||||
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
|
|
||||||
break;
|
break;
|
||||||
case FMODE_READ|FMODE_WRITE:
|
case FMODE_READ|FMODE_WRITE:
|
||||||
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
|
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
|
||||||
@@ -1260,7 +1258,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
|||||||
nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
|
nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case 0:
|
case 0:
|
||||||
nfs_set_open_stateid(state, &calldata->res.stateid, calldata->arg.open_flags);
|
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
|
||||||
renew_lease(server, calldata->timestamp);
|
renew_lease(server, calldata->timestamp);
|
||||||
break;
|
break;
|
||||||
case -NFS4ERR_STALE_STATEID:
|
case -NFS4ERR_STALE_STATEID:
|
||||||
@@ -1286,23 +1284,19 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||||||
.rpc_cred = state->owner->so_cred,
|
.rpc_cred = state->owner->so_cred,
|
||||||
};
|
};
|
||||||
int clear_rd, clear_wr, clear_rdwr;
|
int clear_rd, clear_wr, clear_rdwr;
|
||||||
int mode;
|
|
||||||
|
|
||||||
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mode = FMODE_READ|FMODE_WRITE;
|
|
||||||
clear_rd = clear_wr = clear_rdwr = 0;
|
clear_rd = clear_wr = clear_rdwr = 0;
|
||||||
spin_lock(&state->owner->so_lock);
|
spin_lock(&state->owner->so_lock);
|
||||||
/* Calculate the change in open mode */
|
/* Calculate the change in open mode */
|
||||||
if (state->n_rdwr == 0) {
|
if (state->n_rdwr == 0) {
|
||||||
if (state->n_rdonly == 0) {
|
if (state->n_rdonly == 0) {
|
||||||
mode &= ~FMODE_READ;
|
|
||||||
clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||||
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||||
}
|
}
|
||||||
if (state->n_wronly == 0) {
|
if (state->n_wronly == 0) {
|
||||||
mode &= ~FMODE_WRITE;
|
|
||||||
clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||||
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||||
}
|
}
|
||||||
@@ -1314,9 +1308,13 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
nfs_fattr_init(calldata->res.fattr);
|
nfs_fattr_init(calldata->res.fattr);
|
||||||
if (mode != 0)
|
if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
|
||||||
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||||
calldata->arg.open_flags = mode;
|
calldata->arg.open_flags = FMODE_READ;
|
||||||
|
} else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
|
||||||
|
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||||
|
calldata->arg.open_flags = FMODE_WRITE;
|
||||||
|
}
|
||||||
calldata->timestamp = jiffies;
|
calldata->timestamp = jiffies;
|
||||||
rpc_call_setup(task, &msg, 0);
|
rpc_call_setup(task, &msg, 0);
|
||||||
}
|
}
|
||||||
|
@@ -127,16 +127,15 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
|
|||||||
void
|
void
|
||||||
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
|
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
|
||||||
{
|
{
|
||||||
flush_scheduled_work();
|
cancel_delayed_work(&server->nfs_client->cl_renewd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
nfs4_kill_renewd(struct nfs_client *clp)
|
nfs4_kill_renewd(struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
down_read(&clp->cl_sem);
|
down_read(&clp->cl_sem);
|
||||||
cancel_delayed_work(&clp->cl_renewd);
|
cancel_delayed_work_sync(&clp->cl_renewd);
|
||||||
up_read(&clp->cl_sem);
|
up_read(&clp->cl_sem);
|
||||||
flush_scheduled_work();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -341,8 +341,6 @@ nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
|
|||||||
else
|
else
|
||||||
list_move_tail(&state->open_states, &state->owner->so_states);
|
list_move_tail(&state->open_states, &state->owner->so_states);
|
||||||
}
|
}
|
||||||
if (mode == 0)
|
|
||||||
list_del_init(&state->inode_states);
|
|
||||||
state->state = mode;
|
state->state = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,8 +413,7 @@ void nfs4_put_open_state(struct nfs4_state *state)
|
|||||||
if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
|
if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
|
||||||
return;
|
return;
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
if (!list_empty(&state->inode_states))
|
list_del(&state->inode_states);
|
||||||
list_del(&state->inode_states);
|
|
||||||
list_del(&state->open_states);
|
list_del(&state->open_states);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&owner->so_lock);
|
spin_unlock(&owner->so_lock);
|
||||||
|
@@ -71,7 +71,7 @@ struct nfs_access_entry {
|
|||||||
|
|
||||||
struct nfs4_state;
|
struct nfs4_state;
|
||||||
struct nfs_open_context {
|
struct nfs_open_context {
|
||||||
struct kref kref;
|
atomic_t count;
|
||||||
struct path path;
|
struct path path;
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
struct nfs4_state *state;
|
struct nfs4_state *state;
|
||||||
|
@@ -736,9 +736,6 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx)
|
|||||||
{
|
{
|
||||||
dprintk("RPC: gss_free_ctx\n");
|
dprintk("RPC: gss_free_ctx\n");
|
||||||
|
|
||||||
if (ctx->gc_gss_ctx)
|
|
||||||
gss_delete_sec_context(&ctx->gc_gss_ctx);
|
|
||||||
|
|
||||||
kfree(ctx->gc_wire_ctx.data);
|
kfree(ctx->gc_wire_ctx.data);
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
@@ -753,7 +750,13 @@ gss_free_ctx_callback(struct rcu_head *head)
|
|||||||
static void
|
static void
|
||||||
gss_free_ctx(struct gss_cl_ctx *ctx)
|
gss_free_ctx(struct gss_cl_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
struct gss_ctx *gc_gss_ctx;
|
||||||
|
|
||||||
|
gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
|
||||||
|
rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
|
||||||
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
|
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
|
||||||
|
if (gc_gss_ctx)
|
||||||
|
gss_delete_sec_context(&gc_gss_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@@ -371,8 +371,7 @@ int cache_unregister(struct cache_detail *cd)
|
|||||||
}
|
}
|
||||||
if (list_empty(&cache_list)) {
|
if (list_empty(&cache_list)) {
|
||||||
/* module must be being unloaded so its safe to kill the worker */
|
/* module must be being unloaded so its safe to kill the worker */
|
||||||
cancel_delayed_work(&cache_cleaner);
|
cancel_delayed_work_sync(&cache_cleaner);
|
||||||
flush_scheduled_work();
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -132,8 +132,7 @@ rpc_close_pipes(struct inode *inode)
|
|||||||
rpci->nwriters = 0;
|
rpci->nwriters = 0;
|
||||||
if (ops->release_pipe)
|
if (ops->release_pipe)
|
||||||
ops->release_pipe(inode);
|
ops->release_pipe(inode);
|
||||||
cancel_delayed_work(&rpci->queue_timeout);
|
cancel_delayed_work_sync(&rpci->queue_timeout);
|
||||||
flush_workqueue(rpciod_workqueue);
|
|
||||||
}
|
}
|
||||||
rpc_inode_setowner(inode, NULL);
|
rpc_inode_setowner(inode, NULL);
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
|
@@ -50,8 +50,6 @@ static RPC_WAITQ(delay_queue, "delayq");
|
|||||||
/*
|
/*
|
||||||
* rpciod-related stuff
|
* rpciod-related stuff
|
||||||
*/
|
*/
|
||||||
static DEFINE_MUTEX(rpciod_mutex);
|
|
||||||
static atomic_t rpciod_users = ATOMIC_INIT(0);
|
|
||||||
struct workqueue_struct *rpciod_workqueue;
|
struct workqueue_struct *rpciod_workqueue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -961,60 +959,49 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
|
|||||||
spin_unlock(&clnt->cl_lock);
|
spin_unlock(&clnt->cl_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int rpciod_up(void)
|
||||||
|
{
|
||||||
|
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void rpciod_down(void)
|
||||||
|
{
|
||||||
|
module_put(THIS_MODULE);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start up the rpciod process if it's not already running.
|
* Start up the rpciod workqueue.
|
||||||
*/
|
*/
|
||||||
int
|
static int rpciod_start(void)
|
||||||
rpciod_up(void)
|
|
||||||
{
|
{
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
if (atomic_inc_not_zero(&rpciod_users))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
mutex_lock(&rpciod_mutex);
|
|
||||||
|
|
||||||
/* Guard against races with rpciod_down() */
|
|
||||||
if (rpciod_workqueue != NULL)
|
|
||||||
goto out_ok;
|
|
||||||
/*
|
/*
|
||||||
* Create the rpciod thread and wait for it to start.
|
* Create the rpciod thread and wait for it to start.
|
||||||
*/
|
*/
|
||||||
dprintk("RPC: creating workqueue rpciod\n");
|
dprintk("RPC: creating workqueue rpciod\n");
|
||||||
error = -ENOMEM;
|
|
||||||
wq = create_workqueue("rpciod");
|
wq = create_workqueue("rpciod");
|
||||||
if (wq == NULL)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
rpciod_workqueue = wq;
|
rpciod_workqueue = wq;
|
||||||
error = 0;
|
return rpciod_workqueue != NULL;
|
||||||
out_ok:
|
|
||||||
atomic_inc(&rpciod_users);
|
|
||||||
out:
|
|
||||||
mutex_unlock(&rpciod_mutex);
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void rpciod_stop(void)
|
||||||
rpciod_down(void)
|
|
||||||
{
|
{
|
||||||
if (!atomic_dec_and_test(&rpciod_users))
|
struct workqueue_struct *wq = NULL;
|
||||||
return;
|
|
||||||
|
|
||||||
mutex_lock(&rpciod_mutex);
|
if (rpciod_workqueue == NULL)
|
||||||
|
return;
|
||||||
dprintk("RPC: destroying workqueue rpciod\n");
|
dprintk("RPC: destroying workqueue rpciod\n");
|
||||||
|
|
||||||
if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) {
|
wq = rpciod_workqueue;
|
||||||
destroy_workqueue(rpciod_workqueue);
|
rpciod_workqueue = NULL;
|
||||||
rpciod_workqueue = NULL;
|
destroy_workqueue(wq);
|
||||||
}
|
|
||||||
mutex_unlock(&rpciod_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rpc_destroy_mempool(void)
|
rpc_destroy_mempool(void)
|
||||||
{
|
{
|
||||||
|
rpciod_stop();
|
||||||
if (rpc_buffer_mempool)
|
if (rpc_buffer_mempool)
|
||||||
mempool_destroy(rpc_buffer_mempool);
|
mempool_destroy(rpc_buffer_mempool);
|
||||||
if (rpc_task_mempool)
|
if (rpc_task_mempool)
|
||||||
@@ -1048,6 +1035,8 @@ rpc_init_mempool(void)
|
|||||||
rpc_buffer_slabp);
|
rpc_buffer_slabp);
|
||||||
if (!rpc_buffer_mempool)
|
if (!rpc_buffer_mempool)
|
||||||
goto err_nomem;
|
goto err_nomem;
|
||||||
|
if (!rpciod_start())
|
||||||
|
goto err_nomem;
|
||||||
return 0;
|
return 0;
|
||||||
err_nomem:
|
err_nomem:
|
||||||
rpc_destroy_mempool();
|
rpc_destroy_mempool();
|
||||||
|
Reference in New Issue
Block a user