RDMA/cxgb3: Serialize calls to CQ's comp_handler
iw_cxgb3 has a potential problem where a CQ's comp_handler can get called simultaneously from different places in iw_cxgb3 driver. This does not comply with Documentation/infiniband/core_locking.txt, which states that at a given point of time, there should be only one callback per CQ should be active. Such problem was reported by Parav Pandit <Parav.Pandit@Emulex.Com> for iw_cxgb4 driver. Based on discussion between Parav Pandit and Steve Wise, this patch fixes the above problem by serializing the calls to a CQ's comp_handler using a spin_lock. Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com> Acked-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
committed by
Roland Dreier
parent
976d167615
commit
f7cc25d018
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
|||||||
struct ib_event event;
|
struct ib_event event;
|
||||||
struct iwch_qp_attributes attrs;
|
struct iwch_qp_attributes attrs;
|
||||||
struct iwch_qp *qhp;
|
struct iwch_qp *qhp;
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
spin_lock(&rnicp->lock);
|
spin_lock(&rnicp->lock);
|
||||||
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
|
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
|
||||||
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
|||||||
if (qhp->ibqp.event_handler)
|
if (qhp->ibqp.event_handler)
|
||||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&qhp->refcnt))
|
if (atomic_dec_and_test(&qhp->refcnt))
|
||||||
wake_up(&qhp->wait);
|
wake_up(&qhp->wait);
|
||||||
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
|||||||
struct iwch_cq *chp;
|
struct iwch_cq *chp;
|
||||||
struct iwch_qp *qhp;
|
struct iwch_qp *qhp;
|
||||||
u32 cqid = RSPQ_CQID(rsp_msg);
|
u32 cqid = RSPQ_CQID(rsp_msg);
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
rnicp = (struct iwch_dev *) rdev_p->ulp;
|
rnicp = (struct iwch_dev *) rdev_p->ulp;
|
||||||
spin_lock(&rnicp->lock);
|
spin_lock(&rnicp->lock);
|
||||||
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
|
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
|
||||||
dst_confirm(qhp->ep->dst);
|
dst_confirm(qhp->ep->dst);
|
||||||
|
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TPT_ERR_STAG:
|
case TPT_ERR_STAG:
|
||||||
|
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
|
|||||||
chp->rhp = rhp;
|
chp->rhp = rhp;
|
||||||
chp->ibcq.cqe = 1 << chp->cq.size_log2;
|
chp->ibcq.cqe = 1 << chp->cq.size_log2;
|
||||||
spin_lock_init(&chp->lock);
|
spin_lock_init(&chp->lock);
|
||||||
|
spin_lock_init(&chp->comp_handler_lock);
|
||||||
atomic_set(&chp->refcnt, 1);
|
atomic_set(&chp->refcnt, 1);
|
||||||
init_waitqueue_head(&chp->wait);
|
init_waitqueue_head(&chp->wait);
|
||||||
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
|
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
|
||||||
|
@@ -103,6 +103,7 @@ struct iwch_cq {
|
|||||||
struct iwch_dev *rhp;
|
struct iwch_dev *rhp;
|
||||||
struct t3_cq cq;
|
struct t3_cq cq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
spinlock_t comp_handler_lock;
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
u32 __user *user_rptr_addr;
|
u32 __user *user_rptr_addr;
|
||||||
|
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
|
|||||||
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
|
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||||
spin_unlock(&qhp->lock);
|
spin_unlock(&qhp->lock);
|
||||||
spin_unlock_irqrestore(&rchp->lock, *flag);
|
spin_unlock_irqrestore(&rchp->lock, *flag);
|
||||||
if (flushed)
|
if (flushed) {
|
||||||
|
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
|
||||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
|
||||||
|
}
|
||||||
|
|
||||||
/* locking hierarchy: cq lock first, then qp lock. */
|
/* locking hierarchy: cq lock first, then qp lock. */
|
||||||
spin_lock_irqsave(&schp->lock, *flag);
|
spin_lock_irqsave(&schp->lock, *flag);
|
||||||
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
|
|||||||
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
|
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
|
||||||
spin_unlock(&qhp->lock);
|
spin_unlock(&qhp->lock);
|
||||||
spin_unlock_irqrestore(&schp->lock, *flag);
|
spin_unlock_irqrestore(&schp->lock, *flag);
|
||||||
if (flushed)
|
if (flushed) {
|
||||||
|
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
|
||||||
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
|
||||||
|
}
|
||||||
|
|
||||||
/* deref */
|
/* deref */
|
||||||
if (atomic_dec_and_test(&qhp->refcnt))
|
if (atomic_dec_and_test(&qhp->refcnt))
|
||||||
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
|||||||
if (qhp->ibqp.uobject) {
|
if (qhp->ibqp.uobject) {
|
||||||
cxio_set_wq_in_error(&qhp->wq);
|
cxio_set_wq_in_error(&qhp->wq);
|
||||||
cxio_set_cq_in_error(&rchp->cq);
|
cxio_set_cq_in_error(&rchp->cq);
|
||||||
|
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
|
||||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
|
||||||
if (schp != rchp) {
|
if (schp != rchp) {
|
||||||
cxio_set_cq_in_error(&schp->cq);
|
cxio_set_cq_in_error(&schp->cq);
|
||||||
|
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
|
||||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||||
schp->ibcq.cq_context);
|
schp->ibcq.cq_context);
|
||||||
|
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user