Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
This commit is contained in:
@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
|
||||
}
|
||||
}
|
||||
|
||||
void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
|
||||
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
|
||||
{
|
||||
struct mthca_cq *cq;
|
||||
|
||||
@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
}
|
||||
|
||||
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
|
||||
enum ib_event_type event_type)
|
||||
{
|
||||
struct mthca_cq *cq;
|
||||
struct ib_event event;
|
||||
|
||||
spin_lock(&dev->cq_table.lock);
|
||||
|
||||
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
|
||||
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
spin_unlock(&dev->cq_table.lock);
|
||||
|
||||
if (!cq) {
|
||||
mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
event.device = &dev->ib_dev;
|
||||
event.event = event_type;
|
||||
event.element.cq = &cq->ibcq;
|
||||
if (cq->ibcq.event_handler)
|
||||
cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
wake_up(&cq->wait);
|
||||
}
|
||||
|
||||
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
|
||||
struct mthca_srq *srq)
|
||||
{
|
||||
|
@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
|
||||
struct mthca_cq *cq);
|
||||
void mthca_free_cq(struct mthca_dev *dev,
|
||||
struct mthca_cq *cq);
|
||||
void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
|
||||
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
|
||||
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
|
||||
enum ib_event_type event_type);
|
||||
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
|
||||
struct mthca_srq *srq);
|
||||
|
||||
|
@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
|
||||
case MTHCA_EVENT_TYPE_COMP:
|
||||
disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
|
||||
disarm_cq(dev, eq->eqn, disarm_cqn);
|
||||
mthca_cq_event(dev, disarm_cqn);
|
||||
mthca_cq_completion(dev, disarm_cqn);
|
||||
break;
|
||||
|
||||
case MTHCA_EVENT_TYPE_PATH_MIG:
|
||||
@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
|
||||
eqe->event.cq_err.syndrome == 1 ?
|
||||
"overrun" : "access violation",
|
||||
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
|
||||
mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
|
||||
IB_EVENT_CQ_ERR);
|
||||
break;
|
||||
|
||||
case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
|
||||
|
@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
|
||||
goto err_cmd;
|
||||
|
||||
if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
|
||||
mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n",
|
||||
mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
|
||||
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
|
||||
(int) (mdev->fw_ver & 0xffff),
|
||||
(int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
|
||||
|
@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
|
||||
buddy->max_order = max_order;
|
||||
spin_lock_init(&buddy->lock);
|
||||
|
||||
buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *),
|
||||
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
|
||||
GFP_KERNEL);
|
||||
if (!buddy->bits)
|
||||
goto err_out;
|
||||
|
||||
memset(buddy->bits, 0, (buddy->max_order + 1) * sizeof (long *));
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i) {
|
||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
|
||||
|
@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
|
||||
struct mthca_resource tmp;
|
||||
int i, j;
|
||||
|
||||
profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
|
||||
profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
|
||||
if (!profile)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(profile, 0, MTHCA_RES_NUM * sizeof *profile);
|
||||
|
||||
profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
|
||||
profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
|
||||
profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
|
||||
|
@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
|
||||
static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
|
||||
return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
|
||||
return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
|
||||
(int) (dev->fw_ver >> 16) & 0xffff,
|
||||
(int) dev->fw_ver & 0xffff);
|
||||
}
|
||||
|
@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_QP_PKEY_INDEX) &&
|
||||
attr->pkey_index >= dev->limits.pkey_table_len) {
|
||||
mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
|
||||
attr->pkey_index,dev->limits.pkey_table_len-1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n)
|
||||
|
||||
/*
|
||||
* Return a pointer to the location within a WQE that we're using as a
|
||||
* link when the WQE is in the free list. We use an offset of 4
|
||||
* because in the Tavor case, posting a WQE may overwrite the first
|
||||
* four bytes of the previous WQE. The offset avoids corrupting our
|
||||
* free list if the WQE has already completed and been put on the free
|
||||
* list when we post the next WQE.
|
||||
* link when the WQE is in the free list. We use the imm field
|
||||
* because in the Tavor case, posting a WQE may overwrite the next
|
||||
* segment of the previous WQE, but a receive WQE will never touch the
|
||||
* imm field. This avoids corrupting our free list if the previous
|
||||
* WQE has already completed and been put on the free list when we
|
||||
* post the next WQE.
|
||||
*/
|
||||
static inline int *wqe_to_link(void *wqe)
|
||||
{
|
||||
return (int *) (wqe + 4);
|
||||
return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
|
||||
}
|
||||
|
||||
static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
|
||||
|
Reference in New Issue
Block a user