Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/infiniband/core/iwcm.c drivers/net/chelsio/cxgb2.c drivers/net/wireless/bcm43xx/bcm43xx_main.c drivers/net/wireless/prism54/islpci_eth.c drivers/usb/core/hub.h drivers/usb/input/hid-core.c net/core/netpoll.c Fix up merge failures with Linus's head and fix new compilation failures. Signed-Off-By: David Howells <dhowells@redhat.com>
This commit is contained in:
@@ -139,7 +139,7 @@ static void queue_req(struct addr_req *req)
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry_reverse(temp_req, &req_list, list) {
|
||||
if (time_after(req->timeout, temp_req->timeout))
|
||||
if (time_after_eq(req->timeout, temp_req->timeout))
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -225,19 +225,17 @@ static void process_req(struct work_struct *work)
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry_safe(req, temp_req, &req_list, list) {
|
||||
if (req->status) {
|
||||
if (req->status == -ENODATA) {
|
||||
src_in = (struct sockaddr_in *) &req->src_addr;
|
||||
dst_in = (struct sockaddr_in *) &req->dst_addr;
|
||||
req->status = addr_resolve_remote(src_in, dst_in,
|
||||
req->addr);
|
||||
if (req->status && time_after_eq(jiffies, req->timeout))
|
||||
req->status = -ETIMEDOUT;
|
||||
else if (req->status == -ENODATA)
|
||||
continue;
|
||||
}
|
||||
if (req->status && time_after(jiffies, req->timeout))
|
||||
req->status = -ETIMEDOUT;
|
||||
else if (req->status == -ENODATA)
|
||||
continue;
|
||||
|
||||
list_del(&req->list);
|
||||
list_add_tail(&req->list, &done_list);
|
||||
list_move_tail(&req->list, &done_list);
|
||||
}
|
||||
|
||||
if (!list_empty(&req_list)) {
|
||||
@@ -347,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
|
||||
if (req->addr == addr) {
|
||||
req->status = -ECANCELED;
|
||||
req->timeout = jiffies;
|
||||
list_del(&req->list);
|
||||
list_add(&req->list, &req_list);
|
||||
list_move(&req->list, &req_list);
|
||||
set_timeout(req->timeout);
|
||||
break;
|
||||
}
|
||||
|
@@ -147,12 +147,12 @@ struct cm_id_private {
|
||||
__be32 rq_psn;
|
||||
int timeout_ms;
|
||||
enum ib_mtu path_mtu;
|
||||
__be16 pkey;
|
||||
u8 private_data_len;
|
||||
u8 max_cm_retries;
|
||||
u8 peer_to_peer;
|
||||
u8 responder_resources;
|
||||
u8 initiator_depth;
|
||||
u8 local_ack_timeout;
|
||||
u8 retry_count;
|
||||
u8 rnr_retry_count;
|
||||
u8 service_timeout;
|
||||
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data,
|
||||
if (!private_data || !private_data_len)
|
||||
return NULL;
|
||||
|
||||
data = kmalloc(private_data_len, GFP_KERNEL);
|
||||
data = kmemdup(private_data, private_data_len, GFP_KERNEL);
|
||||
if (!data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(data, private_data, private_data_len);
|
||||
return data;
|
||||
}
|
||||
|
||||
@@ -690,7 +689,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
|
||||
* timewait before notifying the user that we've exited timewait.
|
||||
*/
|
||||
cm_id_priv->id.state = IB_CM_TIMEWAIT;
|
||||
wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
|
||||
wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
|
||||
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
|
||||
msecs_to_jiffies(wait_time));
|
||||
cm_id_priv->timewait_info = NULL;
|
||||
@@ -1009,6 +1008,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
|
||||
cm_id_priv->responder_resources = param->responder_resources;
|
||||
cm_id_priv->retry_count = param->retry_count;
|
||||
cm_id_priv->path_mtu = param->primary_path->mtu;
|
||||
cm_id_priv->pkey = param->primary_path->pkey;
|
||||
cm_id_priv->qp_type = param->qp_type;
|
||||
|
||||
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
|
||||
@@ -1023,8 +1023,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
|
||||
|
||||
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
|
||||
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
|
||||
cm_id_priv->local_ack_timeout =
|
||||
cm_req_get_primary_local_ack_timeout(req_msg);
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
|
||||
@@ -1409,9 +1407,8 @@ static int cm_req_handler(struct cm_work *work)
|
||||
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
|
||||
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
|
||||
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
|
||||
cm_id_priv->pkey = req_msg->pkey;
|
||||
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
|
||||
cm_id_priv->local_ack_timeout =
|
||||
cm_req_get_primary_local_ack_timeout(req_msg);
|
||||
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
|
||||
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
|
||||
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
|
||||
@@ -1715,7 +1712,7 @@ static int cm_establish_handler(struct cm_work *work)
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* See comment in ib_cm_establish about lookup. */
|
||||
/* See comment in cm_establish about lookup. */
|
||||
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
|
||||
if (!cm_id_priv)
|
||||
return -EINVAL;
|
||||
@@ -2401,11 +2398,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
if (cm_id->state != IB_CM_ESTABLISHED ||
|
||||
cm_id->lap_state != IB_CM_LAP_IDLE) {
|
||||
(cm_id->lap_state != IB_CM_LAP_UNINIT &&
|
||||
cm_id->lap_state != IB_CM_LAP_IDLE)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = cm_alloc_msg(cm_id_priv, &msg);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -2430,7 +2432,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_send_cm_lap);
|
||||
|
||||
static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
|
||||
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
|
||||
struct ib_sa_path_rec *path,
|
||||
struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
memset(path, 0, sizeof *path);
|
||||
@@ -2442,10 +2445,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
|
||||
path->hop_limit = lap_msg->alt_hop_limit;
|
||||
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
|
||||
path->reversible = 1;
|
||||
/* pkey is same as in REQ */
|
||||
path->pkey = cm_id_priv->pkey;
|
||||
path->sl = cm_lap_get_sl(lap_msg);
|
||||
path->mtu_selector = IB_SA_EQ;
|
||||
/* mtu is same as in REQ */
|
||||
path->mtu = cm_id_priv->path_mtu;
|
||||
path->rate_selector = IB_SA_EQ;
|
||||
path->rate = cm_lap_get_packet_rate(lap_msg);
|
||||
path->packet_life_time_selector = IB_SA_EQ;
|
||||
@@ -2471,7 +2474,7 @@ static int cm_lap_handler(struct cm_work *work)
|
||||
|
||||
param = &work->cm_event.param.lap_rcvd;
|
||||
param->alternate_path = &work->path[0];
|
||||
cm_format_path_from_lap(param->alternate_path, lap_msg);
|
||||
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
|
||||
work->cm_event.private_data = &lap_msg->private_data;
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
@@ -2479,6 +2482,7 @@ static int cm_lap_handler(struct cm_work *work)
|
||||
goto unlock;
|
||||
|
||||
switch (cm_id_priv->id.lap_state) {
|
||||
case IB_CM_LAP_UNINIT:
|
||||
case IB_CM_LAP_IDLE:
|
||||
break;
|
||||
case IB_CM_MRA_LAP_SENT:
|
||||
@@ -2501,6 +2505,10 @@ static int cm_lap_handler(struct cm_work *work)
|
||||
|
||||
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
|
||||
cm_id_priv->tid = lap_msg->hdr.tid;
|
||||
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
|
||||
work->mad_recv_wc->recv_buf.grh,
|
||||
&cm_id_priv->av);
|
||||
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
|
||||
ret = atomic_inc_and_test(&cm_id_priv->work_count);
|
||||
if (!ret)
|
||||
list_add_tail(&work->list, &cm_id_priv->work_list);
|
||||
@@ -3039,7 +3047,7 @@ static void cm_work_handler(struct work_struct *_work)
|
||||
cm_free_work(work);
|
||||
}
|
||||
|
||||
int ib_cm_establish(struct ib_cm_id *cm_id)
|
||||
static int cm_establish(struct ib_cm_id *cm_id)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
struct cm_work *work;
|
||||
@@ -3087,7 +3095,44 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_cm_establish);
|
||||
|
||||
static int cm_migrate(struct ib_cm_id *cm_id)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
if (cm_id->state == IB_CM_ESTABLISHED &&
|
||||
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
|
||||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
|
||||
cm_id->lap_state = IB_CM_LAP_IDLE;
|
||||
cm_id_priv->av = cm_id_priv->alt_av;
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (event) {
|
||||
case IB_EVENT_COMM_EST:
|
||||
ret = cm_establish(cm_id);
|
||||
break;
|
||||
case IB_EVENT_PATH_MIG:
|
||||
ret = cm_migrate(cm_id);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_cm_notify);
|
||||
|
||||
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
@@ -3172,8 +3217,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
|
||||
case IB_CM_ESTABLISHED:
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
|
||||
IB_QP_PKEY_INDEX | IB_QP_PORT;
|
||||
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE;
|
||||
qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
|
||||
if (cm_id_priv->responder_resources)
|
||||
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
|
||||
IB_ACCESS_REMOTE_ATOMIC;
|
||||
@@ -3221,6 +3265,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
|
||||
if (cm_id_priv->alt_av.ah_attr.dlid) {
|
||||
*qp_attr_mask |= IB_QP_ALT_PATH;
|
||||
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
|
||||
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
|
||||
qp_attr->alt_timeout =
|
||||
cm_id_priv->alt_av.packet_life_time + 1;
|
||||
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
|
||||
}
|
||||
ret = 0;
|
||||
@@ -3247,19 +3294,31 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
|
||||
case IB_CM_REP_SENT:
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
case IB_CM_ESTABLISHED:
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
|
||||
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
|
||||
if (cm_id_priv->qp_type == IB_QPT_RC) {
|
||||
*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
|
||||
IB_QP_RNR_RETRY |
|
||||
IB_QP_MAX_QP_RD_ATOMIC;
|
||||
qp_attr->timeout = cm_id_priv->local_ack_timeout;
|
||||
qp_attr->retry_cnt = cm_id_priv->retry_count;
|
||||
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
|
||||
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
|
||||
}
|
||||
if (cm_id_priv->alt_av.ah_attr.dlid) {
|
||||
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
|
||||
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
|
||||
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
|
||||
if (cm_id_priv->qp_type == IB_QPT_RC) {
|
||||
*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
|
||||
IB_QP_RNR_RETRY |
|
||||
IB_QP_MAX_QP_RD_ATOMIC;
|
||||
qp_attr->timeout =
|
||||
cm_id_priv->av.packet_life_time + 1;
|
||||
qp_attr->retry_cnt = cm_id_priv->retry_count;
|
||||
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
|
||||
qp_attr->max_rd_atomic =
|
||||
cm_id_priv->initiator_depth;
|
||||
}
|
||||
if (cm_id_priv->alt_av.ah_attr.dlid) {
|
||||
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
|
||||
qp_attr->path_mig_state = IB_MIG_REARM;
|
||||
}
|
||||
} else {
|
||||
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
|
||||
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
|
||||
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
|
||||
qp_attr->alt_timeout =
|
||||
cm_id_priv->alt_av.packet_life_time + 1;
|
||||
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
|
||||
qp_attr->path_mig_state = IB_MIG_REARM;
|
||||
}
|
||||
ret = 0;
|
||||
|
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
|
||||
return ret;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_INIT;
|
||||
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
|
||||
qp_attr.qp_access_flags = 0;
|
||||
qp_attr.port_num = id_priv->id.port_num;
|
||||
return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
|
||||
IB_QP_PKEY_INDEX | IB_QP_PORT);
|
||||
@@ -935,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
mutex_lock(&lock);
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
mutex_unlock(&lock);
|
||||
if (ret) {
|
||||
ret = -ENODEV;
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
cma_release_remove(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
goto release_conn_id;
|
||||
|
||||
conn_id->cm_id.ib = cm_id;
|
||||
cm_id->context = conn_id;
|
||||
@@ -951,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
|
||||
ib_event->private_data + offset,
|
||||
IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
|
||||
if (ret) {
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
conn_id->cm_id.ib = NULL;
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
cma_release_remove(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
}
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
conn_id->cm_id.ib = NULL;
|
||||
|
||||
release_conn_id:
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
cma_release_remove(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
|
||||
out:
|
||||
cma_release_remove(listen_id);
|
||||
return ret;
|
||||
@@ -1481,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
||||
u8 p;
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry(cma_dev, &dev_list, list)
|
||||
for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
|
||||
if (!ib_query_port (cma_dev->device, p, &port_attr) &&
|
||||
port_attr.state == IB_PORT_ACTIVE)
|
||||
goto port_found;
|
||||
|
||||
if (!list_empty(&dev_list)) {
|
||||
p = 1;
|
||||
cma_dev = list_entry(dev_list.next, struct cma_device, list);
|
||||
} else {
|
||||
if (list_empty(&dev_list)) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
list_for_each_entry(cma_dev, &dev_list, list)
|
||||
for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
|
||||
if (!ib_query_port(cma_dev->device, p, &port_attr) &&
|
||||
port_attr.state == IB_PORT_ACTIVE)
|
||||
goto port_found;
|
||||
|
||||
p = 1;
|
||||
cma_dev = list_entry(dev_list.next, struct cma_device, list);
|
||||
|
||||
port_found:
|
||||
ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
|
||||
@@ -2123,8 +2121,6 @@ static void cma_add_one(struct ib_device *device)
|
||||
|
||||
cma_dev->device = device;
|
||||
cma_dev->node_guid = device->node_guid;
|
||||
if (!cma_dev->node_guid)
|
||||
goto err;
|
||||
|
||||
init_completion(&cma_dev->comp);
|
||||
atomic_set(&cma_dev->refcount, 1);
|
||||
@@ -2136,9 +2132,6 @@ static void cma_add_one(struct ib_device *device)
|
||||
list_for_each_entry(id_priv, &listen_any_list, list)
|
||||
cma_listen_on_dev(id_priv, cma_dev);
|
||||
mutex_unlock(&lock);
|
||||
return;
|
||||
err:
|
||||
kfree(cma_dev);
|
||||
}
|
||||
|
||||
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
|
||||
|
@@ -80,7 +80,7 @@ struct iwcm_work {
|
||||
* 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
|
||||
* the backlog is exceeded, then no more connection request events will
|
||||
* be processed. cm_event_handler() returns -ENOMEM in this case. Its up
|
||||
* to the provider to reject the connectino request.
|
||||
* to the provider to reject the connection request.
|
||||
* 2) in the connection request workqueue handler, cm_conn_req_handler().
|
||||
* If work elements cannot be allocated for the new connect request cm_id,
|
||||
* then IWCM will call the provider reject method. This is ok since
|
||||
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
|
||||
}
|
||||
|
||||
/*
|
||||
* Save private data from incoming connection requests in the
|
||||
* cm_id_priv so the low level driver doesn't have to. Adjust
|
||||
* Save private data from incoming connection requests to
|
||||
* iw_cm_event, so the low level driver doesn't have to. Adjust
|
||||
* the event ptr to point to the local copy.
|
||||
*/
|
||||
static int copy_private_data(struct iwcm_id_private *cm_id_priv,
|
||||
struct iw_cm_event *event)
|
||||
static int copy_private_data(struct iw_cm_event *event)
|
||||
{
|
||||
void *p;
|
||||
|
||||
p = kmalloc(event->private_data_len, GFP_ATOMIC);
|
||||
p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
memcpy(p, event->private_data, event->private_data_len);
|
||||
event->private_data = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a reference on cm_id. If the last reference is being removed
|
||||
* and iw_destroy_cm_id is waiting, wake up the waiting thread.
|
||||
* Release a reference on cm_id. If the last reference is being
|
||||
* released, enable the waiting thread (in iw_destroy_cm_id) to
|
||||
* get woken up, and return 1 if a thread is already waiting.
|
||||
*/
|
||||
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
|
||||
{
|
||||
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp)
|
||||
/*
|
||||
* CM_ID <-- CLOSING
|
||||
*
|
||||
* Block if a passive or active connection is currenlty being processed. Then
|
||||
* Block if a passive or active connection is currently being processed. Then
|
||||
* process the event as follows:
|
||||
* - If we are ESTABLISHED, move to CLOSING and modify the QP state
|
||||
* based on the abrupt flag
|
||||
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||
|
||||
@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept);
|
||||
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct ib_qp *qp;
|
||||
|
||||
@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
|
||||
spin_lock_irqsave(&listen_id_priv->lock, flags);
|
||||
if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
|
||||
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
|
||||
|
||||
@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
|
||||
listen_id_priv->id.context);
|
||||
/* If the cm_id could not be created, ignore the request */
|
||||
if (IS_ERR(cm_id))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
cm_id->provider_data = iw_event->provider_data;
|
||||
cm_id->local_addr = iw_event->local_addr;
|
||||
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
|
||||
if (ret) {
|
||||
iw_cm_reject(cm_id, NULL, 0);
|
||||
iw_destroy_cm_id(cm_id);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Call the client CM handler */
|
||||
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
|
||||
kfree(cm_id);
|
||||
}
|
||||
|
||||
out:
|
||||
if (iw_event->private_data_len)
|
||||
kfree(iw_event->private_data);
|
||||
}
|
||||
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
|
||||
struct iw_cm_event *iw_event)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
|
||||
@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
|
||||
struct iw_cm_event *iw_event)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
/*
|
||||
@@ -830,8 +830,8 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
|
||||
*/
|
||||
static void cm_work_handler(struct work_struct *_work)
|
||||
{
|
||||
struct iwcm_work lwork, *work =
|
||||
container_of(_work, struct iwcm_work, work);
|
||||
struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
|
||||
struct iw_cm_event levent;
|
||||
struct iwcm_id_private *cm_id_priv = work->cm_id;
|
||||
unsigned long flags;
|
||||
int empty;
|
||||
@@ -844,11 +844,11 @@ static void cm_work_handler(struct work_struct *_work)
|
||||
struct iwcm_work, list);
|
||||
list_del_init(&work->list);
|
||||
empty = list_empty(&cm_id_priv->work_list);
|
||||
lwork = *work;
|
||||
levent = work->event;
|
||||
put_work(work);
|
||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
|
||||
ret = process_event(cm_id_priv, &work->event);
|
||||
ret = process_event(cm_id_priv, &levent);
|
||||
if (ret) {
|
||||
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
|
||||
destroy_cm_id(&cm_id_priv->id);
|
||||
@@ -907,7 +907,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
|
||||
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
|
||||
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
|
||||
work->event.private_data_len) {
|
||||
ret = copy_private_data(cm_id_priv, &work->event);
|
||||
ret = copy_private_data(&work->event);
|
||||
if (ret) {
|
||||
put_work(work);
|
||||
goto out;
|
||||
|
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API");
|
||||
MODULE_AUTHOR("Hal Rosenstock");
|
||||
MODULE_AUTHOR("Sean Hefty");
|
||||
|
||||
static kmem_cache_t *ib_mad_cache;
|
||||
static struct kmem_cache *ib_mad_cache;
|
||||
|
||||
static struct list_head ib_mad_port_list;
|
||||
static u32 ib_mad_client_id = 0;
|
||||
|
@@ -161,12 +161,14 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
|
||||
struct ib_ucm_event, ctx_list);
|
||||
list_del(&uevent->file_list);
|
||||
list_del(&uevent->ctx_list);
|
||||
mutex_unlock(&ctx->file->file_mutex);
|
||||
|
||||
/* clear incoming connections. */
|
||||
if (ib_ucm_new_cm_id(uevent->resp.event))
|
||||
ib_destroy_cm_id(uevent->cm_id);
|
||||
|
||||
kfree(uevent);
|
||||
mutex_lock(&ctx->file->file_mutex);
|
||||
}
|
||||
mutex_unlock(&ctx->file->file_mutex);
|
||||
}
|
||||
@@ -328,20 +330,18 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
|
||||
}
|
||||
|
||||
if (uvt->data_len) {
|
||||
uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
|
||||
uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
|
||||
if (!uvt->data)
|
||||
goto err1;
|
||||
|
||||
memcpy(uvt->data, evt->private_data, uvt->data_len);
|
||||
uvt->resp.present |= IB_UCM_PRES_DATA;
|
||||
}
|
||||
|
||||
if (uvt->info_len) {
|
||||
uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
|
||||
uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
|
||||
if (!uvt->info)
|
||||
goto err2;
|
||||
|
||||
memcpy(uvt->info, info, uvt->info_len);
|
||||
uvt->resp.present |= IB_UCM_PRES_INFO;
|
||||
}
|
||||
return 0;
|
||||
@@ -685,11 +685,11 @@ out:
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len)
|
||||
static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len)
|
||||
{
|
||||
struct ib_ucm_establish cmd;
|
||||
struct ib_ucm_notify cmd;
|
||||
struct ib_ucm_context *ctx;
|
||||
int result;
|
||||
|
||||
@@ -700,7 +700,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
result = ib_cm_establish(ctx->cm_id);
|
||||
result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
|
||||
ib_ucm_ctx_put(ctx);
|
||||
return result;
|
||||
}
|
||||
@@ -1107,7 +1107,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
|
||||
[IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
|
||||
[IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
|
||||
[IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
|
||||
[IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
|
||||
[IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify,
|
||||
[IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
|
||||
[IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
|
||||
[IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
|
||||
|
Reference in New Issue
Block a user