RDMA/cxgb4: Serialize calls to CQ's comp_handler
Commit 01e7da6ba5
("RDMA/cxgb4: Make sure flush CQ entries are
collected on connection close") introduced a potential problem where a
CQ's comp_handler can get called simultaneously from different places
in the iw_cxgb4 driver. This does not comply with
Documentation/infiniband/core_locking.txt, which states that at a
given point of time, there should be only one callback per CQ should
be active.
This problem was reported by Parav Pandit <Parav.Pandit@Emulex.Com>.
Based on discussion between Parav Pandit and Steve Wise, this patch
fixes the above problem by serializing the calls to a CQ's
comp_handler using a spin_lock.
Reported-by: Parav Pandit <Parav.Pandit@Emulex.Com>
Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
committed by
Roland Dreier
parent
e14d62c05c
commit
581bbe2cd0
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
|
|||||||
chp->cq.size--; /* status page */
|
chp->cq.size--; /* status page */
|
||||||
chp->ibcq.cqe = entries - 2;
|
chp->ibcq.cqe = entries - 2;
|
||||||
spin_lock_init(&chp->lock);
|
spin_lock_init(&chp->lock);
|
||||||
|
spin_lock_init(&chp->comp_handler_lock);
|
||||||
atomic_set(&chp->refcnt, 1);
|
atomic_set(&chp->refcnt, 1);
|
||||||
init_waitqueue_head(&chp->wait);
|
init_waitqueue_head(&chp->wait);
|
||||||
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||||
|
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
|
|||||||
{
|
{
|
||||||
struct ib_event event;
|
struct ib_event event;
|
||||||
struct c4iw_qp_attributes attrs;
|
struct c4iw_qp_attributes attrs;
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
|
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
|
||||||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
|
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
|
||||||
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
|
|||||||
if (qhp->ibqp.event_handler)
|
if (qhp->ibqp.event_handler)
|
||||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||||
@@ -183,11 +186,14 @@ out:
|
|||||||
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
|
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
|
||||||
{
|
{
|
||||||
struct c4iw_cq *chp;
|
struct c4iw_cq *chp;
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
chp = get_chp(dev, qid);
|
chp = get_chp(dev, qid);
|
||||||
if (chp)
|
if (chp) {
|
||||||
|
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||||
else
|
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||||
|
} else
|
||||||
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
|
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -309,6 +309,7 @@ struct c4iw_cq {
|
|||||||
struct c4iw_dev *rhp;
|
struct c4iw_dev *rhp;
|
||||||
struct t4_cq cq;
|
struct t4_cq cq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
spinlock_t comp_handler_lock;
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
};
|
};
|
||||||
|
@@ -941,8 +941,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
|||||||
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
|
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||||
spin_unlock(&qhp->lock);
|
spin_unlock(&qhp->lock);
|
||||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||||
if (flushed)
|
if (flushed) {
|
||||||
|
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||||
|
}
|
||||||
|
|
||||||
/* locking hierarchy: cq lock first, then qp lock. */
|
/* locking hierarchy: cq lock first, then qp lock. */
|
||||||
spin_lock_irqsave(&schp->lock, flag);
|
spin_lock_irqsave(&schp->lock, flag);
|
||||||
@@ -952,13 +955,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
|||||||
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
|
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
|
||||||
spin_unlock(&qhp->lock);
|
spin_unlock(&qhp->lock);
|
||||||
spin_unlock_irqrestore(&schp->lock, flag);
|
spin_unlock_irqrestore(&schp->lock, flag);
|
||||||
if (flushed)
|
if (flushed) {
|
||||||
|
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||||
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_qp(struct c4iw_qp *qhp)
|
static void flush_qp(struct c4iw_qp *qhp)
|
||||||
{
|
{
|
||||||
struct c4iw_cq *rchp, *schp;
|
struct c4iw_cq *rchp, *schp;
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
|
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
|
||||||
schp = get_chp(qhp->rhp, qhp->attr.scq);
|
schp = get_chp(qhp->rhp, qhp->attr.scq);
|
||||||
@@ -966,11 +973,15 @@ static void flush_qp(struct c4iw_qp *qhp)
|
|||||||
if (qhp->ibqp.uobject) {
|
if (qhp->ibqp.uobject) {
|
||||||
t4_set_wq_in_error(&qhp->wq);
|
t4_set_wq_in_error(&qhp->wq);
|
||||||
t4_set_cq_in_error(&rchp->cq);
|
t4_set_cq_in_error(&rchp->cq);
|
||||||
|
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||||
if (schp != rchp) {
|
if (schp != rchp) {
|
||||||
t4_set_cq_in_error(&schp->cq);
|
t4_set_cq_in_error(&schp->cq);
|
||||||
|
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||||
schp->ibcq.cq_context);
|
schp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user