Merge /scratch/Ksrc/linux-git/
This commit is contained in:
@@ -1,15 +1,20 @@
|
||||
EXTRA_CFLAGS += -Idrivers/infiniband/include
|
||||
|
||||
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o ib_umad.o
|
||||
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
|
||||
ib_cm.o ib_umad.o ib_ucm.o
|
||||
obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
|
||||
|
||||
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
|
||||
device.o fmr_pool.o cache.o
|
||||
|
||||
ib_mad-y := mad.o smi.o agent.o
|
||||
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
|
||||
|
||||
ib_sa-y := sa_query.o
|
||||
|
||||
ib_cm-y := cm.o
|
||||
|
||||
ib_umad-y := user_mad.o
|
||||
|
||||
ib_ucm-y := ucm.o
|
||||
|
||||
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o
|
||||
|
@@ -134,7 +134,7 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
|
||||
sizeof(mad_priv->mad),
|
||||
DMA_TO_DEVICE);
|
||||
gather_list.length = sizeof(mad_priv->mad);
|
||||
gather_list.lkey = (*port_priv->mr).lkey;
|
||||
gather_list.lkey = mad_agent->mr->lkey;
|
||||
|
||||
send_wr.next = NULL;
|
||||
send_wr.opcode = IB_WR_SEND;
|
||||
@@ -156,10 +156,10 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
|
||||
/* Should sgid be looked up ? */
|
||||
ah_attr.grh.sgid_index = 0;
|
||||
ah_attr.grh.hop_limit = grh->hop_limit;
|
||||
ah_attr.grh.flow_label = be32_to_cpup(
|
||||
&grh->version_tclass_flow) & 0xfffff;
|
||||
ah_attr.grh.traffic_class = (be32_to_cpup(
|
||||
&grh->version_tclass_flow) >> 20) & 0xff;
|
||||
ah_attr.grh.flow_label = be32_to_cpu(
|
||||
grh->version_tclass_flow) & 0xfffff;
|
||||
ah_attr.grh.traffic_class = (be32_to_cpu(
|
||||
grh->version_tclass_flow) >> 20) & 0xff;
|
||||
memcpy(ah_attr.grh.dgid.raw,
|
||||
grh->sgid.raw,
|
||||
sizeof(ah_attr.grh.dgid));
|
||||
@@ -322,22 +322,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
|
||||
goto error3;
|
||||
}
|
||||
|
||||
port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(port_priv->mr)) {
|
||||
printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
|
||||
ret = PTR_ERR(port_priv->mr);
|
||||
goto error4;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ib_agent_port_list_lock, flags);
|
||||
list_add_tail(&port_priv->port_list, &ib_agent_port_list);
|
||||
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
error4:
|
||||
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
|
||||
error3:
|
||||
ib_unregister_mad_agent(port_priv->smp_agent);
|
||||
error2:
|
||||
@@ -361,8 +351,6 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
|
||||
list_del(&port_priv->port_list);
|
||||
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
|
||||
|
||||
ib_dereg_mr(port_priv->mr);
|
||||
|
||||
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
|
||||
ib_unregister_mad_agent(port_priv->smp_agent);
|
||||
kfree(port_priv);
|
||||
|
@@ -33,7 +33,7 @@
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $
|
||||
* $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
|
||||
*/
|
||||
|
||||
#ifndef __IB_AGENT_PRIV_H__
|
||||
@@ -57,7 +57,6 @@ struct ib_agent_port_private {
|
||||
int port_num;
|
||||
struct ib_mad_agent *smp_agent; /* SM class */
|
||||
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
|
||||
struct ib_mr *mr;
|
||||
};
|
||||
|
||||
#endif /* __IB_AGENT_PRIV_H__ */
|
||||
|
3324
drivers/infiniband/core/cm.c
Normal file
3324
drivers/infiniband/core/cm.c
Normal file
File diff suppressed because it is too large
Load Diff
819
drivers/infiniband/core/cm_msgs.h
Normal file
819
drivers/infiniband/core/cm_msgs.h
Normal file
@@ -0,0 +1,819 @@
|
||||
/*
|
||||
* Copyright (c) 2004 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING the madirectory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use source and binary forms, with or
|
||||
* withmodification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retathe above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#if !defined(CM_MSGS_H)
|
||||
#define CM_MSGS_H
|
||||
|
||||
#include <ib_mad.h>
|
||||
|
||||
/*
|
||||
* Parameters to routines below should be in network-byte order, and values
|
||||
* are returned in network-byte order.
|
||||
*/
|
||||
|
||||
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
|
||||
|
||||
enum cm_msg_attr_id {
|
||||
CM_REQ_ATTR_ID = __constant_htons(0x0010),
|
||||
CM_MRA_ATTR_ID = __constant_htons(0x0011),
|
||||
CM_REJ_ATTR_ID = __constant_htons(0x0012),
|
||||
CM_REP_ATTR_ID = __constant_htons(0x0013),
|
||||
CM_RTU_ATTR_ID = __constant_htons(0x0014),
|
||||
CM_DREQ_ATTR_ID = __constant_htons(0x0015),
|
||||
CM_DREP_ATTR_ID = __constant_htons(0x0016),
|
||||
CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
|
||||
CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
|
||||
CM_LAP_ATTR_ID = __constant_htons(0x0019),
|
||||
CM_APR_ATTR_ID = __constant_htons(0x001A)
|
||||
};
|
||||
|
||||
enum cm_msg_sequence {
|
||||
CM_MSG_SEQUENCE_REQ,
|
||||
CM_MSG_SEQUENCE_LAP,
|
||||
CM_MSG_SEQUENCE_DREQ,
|
||||
CM_MSG_SEQUENCE_SIDR
|
||||
};
|
||||
|
||||
struct cm_req_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 rsvd4;
|
||||
u64 service_id;
|
||||
u64 local_ca_guid;
|
||||
u32 rsvd24;
|
||||
u32 local_qkey;
|
||||
/* local QPN:24, responder resources:8 */
|
||||
u32 offset32;
|
||||
/* local EECN:24, initiator depth:8 */
|
||||
u32 offset36;
|
||||
/*
|
||||
* remote EECN:24, remote CM response timeout:5,
|
||||
* transport service type:2, end-to-end flow control:1
|
||||
*/
|
||||
u32 offset40;
|
||||
/* starting PSN:24, local CM response timeout:5, retry count:3 */
|
||||
u32 offset44;
|
||||
u16 pkey;
|
||||
/* path MTU:4, RDC exists:1, RNR retry count:3. */
|
||||
u8 offset50;
|
||||
/* max CM Retries:4, SRQ:1, rsvd:3 */
|
||||
u8 offset51;
|
||||
|
||||
u16 primary_local_lid;
|
||||
u16 primary_remote_lid;
|
||||
union ib_gid primary_local_gid;
|
||||
union ib_gid primary_remote_gid;
|
||||
/* flow label:20, rsvd:6, packet rate:6 */
|
||||
u32 primary_offset88;
|
||||
u8 primary_traffic_class;
|
||||
u8 primary_hop_limit;
|
||||
/* SL:4, subnet local:1, rsvd:3 */
|
||||
u8 primary_offset94;
|
||||
/* local ACK timeout:5, rsvd:3 */
|
||||
u8 primary_offset95;
|
||||
|
||||
u16 alt_local_lid;
|
||||
u16 alt_remote_lid;
|
||||
union ib_gid alt_local_gid;
|
||||
union ib_gid alt_remote_gid;
|
||||
/* flow label:20, rsvd:6, packet rate:6 */
|
||||
u32 alt_offset132;
|
||||
u8 alt_traffic_class;
|
||||
u8 alt_hop_limit;
|
||||
/* SL:4, subnet local:1, rsvd:3 */
|
||||
u8 alt_offset138;
|
||||
/* local ACK timeout:5, rsvd:3 */
|
||||
u8 alt_offset139;
|
||||
|
||||
u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
|
||||
{
|
||||
req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
|
||||
(be32_to_cpu(req_msg->offset32) &
|
||||
0x000000FF));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) be32_to_cpu(req_msg->offset32);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
|
||||
{
|
||||
req_msg->offset32 = cpu_to_be32(resp_res |
|
||||
(be32_to_cpu(req_msg->offset32) &
|
||||
0xFFFFFF00));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) be32_to_cpu(req_msg->offset36);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
|
||||
u8 init_depth)
|
||||
{
|
||||
req_msg->offset36 = cpu_to_be32(init_depth |
|
||||
(be32_to_cpu(req_msg->offset36) &
|
||||
0xFFFFFF00));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
|
||||
u8 resp_timeout)
|
||||
{
|
||||
req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
|
||||
(be32_to_cpu(req_msg->offset40) &
|
||||
0xFFFFFF07));
|
||||
}
|
||||
|
||||
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
|
||||
{
|
||||
u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
|
||||
switch(transport_type) {
|
||||
case 0: return IB_QPT_RC;
|
||||
case 1: return IB_QPT_UC;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
|
||||
enum ib_qp_type qp_type)
|
||||
{
|
||||
switch(qp_type) {
|
||||
case IB_QPT_UC:
|
||||
req_msg->offset40 = cpu_to_be32((be32_to_cpu(
|
||||
req_msg->offset40) &
|
||||
0xFFFFFFF9) | 0x2);
|
||||
default:
|
||||
req_msg->offset40 = cpu_to_be32(be32_to_cpu(
|
||||
req_msg->offset40) &
|
||||
0xFFFFFFF9);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return be32_to_cpu(req_msg->offset40) & 0x1;
|
||||
}
|
||||
|
||||
static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
|
||||
u8 flow_ctrl)
|
||||
{
|
||||
req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
|
||||
(be32_to_cpu(req_msg->offset40) &
|
||||
0xFFFFFFFE));
|
||||
}
|
||||
|
||||
static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
|
||||
u32 starting_psn)
|
||||
{
|
||||
req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
|
||||
(be32_to_cpu(req_msg->offset44) & 0x000000FF));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
|
||||
u8 resp_timeout)
|
||||
{
|
||||
req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
|
||||
(be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
|
||||
u8 retry_count)
|
||||
{
|
||||
req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
|
||||
(be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return req_msg->offset50 >> 4;
|
||||
}
|
||||
|
||||
static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
|
||||
{
|
||||
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return req_msg->offset50 & 0x7;
|
||||
}
|
||||
|
||||
static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
|
||||
u8 rnr_retry_count)
|
||||
{
|
||||
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
|
||||
(rnr_retry_count & 0x7));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return req_msg->offset51 >> 4;
|
||||
}
|
||||
|
||||
static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
|
||||
u8 retries)
|
||||
{
|
||||
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (req_msg->offset51 & 0x8) >> 3;
|
||||
}
|
||||
|
||||
static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
|
||||
{
|
||||
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
|
||||
((srq & 0x1) << 3));
|
||||
}
|
||||
|
||||
static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
|
||||
}
|
||||
|
||||
static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
|
||||
u32 flow_label)
|
||||
{
|
||||
req_msg->primary_offset88 = cpu_to_be32(
|
||||
(be32_to_cpu(req_msg->primary_offset88) &
|
||||
0x00000FFF) |
|
||||
(be32_to_cpu(flow_label) << 12));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
|
||||
u8 rate)
|
||||
{
|
||||
req_msg->primary_offset88 = cpu_to_be32(
|
||||
(be32_to_cpu(req_msg->primary_offset88) &
|
||||
0xFFFFFFC0) | (rate & 0x3F));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (req_msg->primary_offset94 >> 4);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
|
||||
{
|
||||
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
|
||||
(sl << 4));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
|
||||
u8 subnet_local)
|
||||
{
|
||||
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
|
||||
((subnet_local & 0x1) << 3));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (req_msg->primary_offset95 >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
|
||||
u8 local_ack_timeout)
|
||||
{
|
||||
req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
|
||||
(local_ack_timeout << 3));
|
||||
}
|
||||
|
||||
static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
|
||||
}
|
||||
|
||||
static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
|
||||
u32 flow_label)
|
||||
{
|
||||
req_msg->alt_offset132 = cpu_to_be32(
|
||||
(be32_to_cpu(req_msg->alt_offset132) &
|
||||
0x00000FFF) |
|
||||
(be32_to_cpu(flow_label) << 12));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
|
||||
u8 rate)
|
||||
{
|
||||
req_msg->alt_offset132 = cpu_to_be32(
|
||||
(be32_to_cpu(req_msg->alt_offset132) &
|
||||
0xFFFFFFC0) | (rate & 0x3F));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (req_msg->alt_offset138 >> 4);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
|
||||
{
|
||||
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
|
||||
(sl << 4));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
|
||||
u8 subnet_local)
|
||||
{
|
||||
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
|
||||
((subnet_local & 0x1) << 3));
|
||||
}
|
||||
|
||||
static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
|
||||
{
|
||||
return (u8) (req_msg->alt_offset139 >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
|
||||
u8 local_ack_timeout)
|
||||
{
|
||||
req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
|
||||
(local_ack_timeout << 3));
|
||||
}
|
||||
|
||||
/* Message REJected or MRAed */
|
||||
enum cm_msg_response {
|
||||
CM_MSG_RESPONSE_REQ = 0x0,
|
||||
CM_MSG_RESPONSE_REP = 0x1,
|
||||
CM_MSG_RESPONSE_OTHER = 0x2
|
||||
};
|
||||
|
||||
struct cm_mra_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
/* message MRAed:2, rsvd:6 */
|
||||
u8 offset8;
|
||||
/* service timeout:5, rsvd:3 */
|
||||
u8 offset9;
|
||||
|
||||
u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
|
||||
{
|
||||
return (u8) (mra_msg->offset8 >> 6);
|
||||
}
|
||||
|
||||
static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
|
||||
{
|
||||
mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
|
||||
}
|
||||
|
||||
static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
|
||||
{
|
||||
return (u8) (mra_msg->offset9 >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
|
||||
u8 service_timeout)
|
||||
{
|
||||
mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
|
||||
(service_timeout << 3));
|
||||
}
|
||||
|
||||
struct cm_rej_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
/* message REJected:2, rsvd:6 */
|
||||
u8 offset8;
|
||||
/* reject info length:7, rsvd:1. */
|
||||
u8 offset9;
|
||||
u16 reason;
|
||||
u8 ari[IB_CM_REJ_ARI_LENGTH];
|
||||
|
||||
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
|
||||
{
|
||||
return (u8) (rej_msg->offset8 >> 6);
|
||||
}
|
||||
|
||||
static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
|
||||
{
|
||||
rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
|
||||
}
|
||||
|
||||
static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
|
||||
{
|
||||
return (u8) (rej_msg->offset9 >> 1);
|
||||
}
|
||||
|
||||
static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
|
||||
u8 len)
|
||||
{
|
||||
rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
|
||||
}
|
||||
|
||||
struct cm_rep_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
u32 local_qkey;
|
||||
/* local QPN:24, rsvd:8 */
|
||||
u32 offset12;
|
||||
/* local EECN:24, rsvd:8 */
|
||||
u32 offset16;
|
||||
/* starting PSN:24 rsvd:8 */
|
||||
u32 offset20;
|
||||
u8 resp_resources;
|
||||
u8 initiator_depth;
|
||||
/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
|
||||
u8 offset26;
|
||||
/* RNR retry count:3, SRQ:1, rsvd:5 */
|
||||
u8 offset27;
|
||||
u64 local_ca_guid;
|
||||
|
||||
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
|
||||
{
|
||||
rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
|
||||
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
|
||||
}
|
||||
|
||||
static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
|
||||
u32 starting_psn)
|
||||
{
|
||||
rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
|
||||
(be32_to_cpu(rep_msg->offset20) & 0x000000FF));
|
||||
}
|
||||
|
||||
static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return (u8) (rep_msg->offset26 >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
|
||||
u8 target_ack_delay)
|
||||
{
|
||||
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
|
||||
(target_ack_delay << 3));
|
||||
}
|
||||
|
||||
static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return (u8) ((rep_msg->offset26 & 0x06) >> 1);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
|
||||
{
|
||||
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
|
||||
((failover & 0x3) << 1));
|
||||
}
|
||||
|
||||
static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return (u8) (rep_msg->offset26 & 0x01);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
|
||||
u8 flow_ctrl)
|
||||
{
|
||||
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
|
||||
(flow_ctrl & 0x1));
|
||||
}
|
||||
|
||||
static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return (u8) (rep_msg->offset27 >> 5);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
|
||||
u8 rnr_retry_count)
|
||||
{
|
||||
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
|
||||
(rnr_retry_count << 5));
|
||||
}
|
||||
|
||||
static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return (u8) ((rep_msg->offset27 >> 4) & 0x1);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
|
||||
{
|
||||
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
|
||||
((srq & 0x1) << 4));
|
||||
}
|
||||
|
||||
struct cm_rtu_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
|
||||
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct cm_dreq_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
/* remote QPN/EECN:24, rsvd:8 */
|
||||
u32 offset8;
|
||||
|
||||
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
|
||||
{
|
||||
dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
|
||||
(be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
|
||||
}
|
||||
|
||||
struct cm_drep_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
|
||||
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
|
||||
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct cm_lap_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
|
||||
u32 rsvd8;
|
||||
/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
|
||||
u32 offset12;
|
||||
u32 rsvd16;
|
||||
|
||||
u16 alt_local_lid;
|
||||
u16 alt_remote_lid;
|
||||
union ib_gid alt_local_gid;
|
||||
union ib_gid alt_remote_gid;
|
||||
/* flow label:20, rsvd:4, traffic class:8 */
|
||||
u32 offset56;
|
||||
u8 alt_hop_limit;
|
||||
/* rsvd:2, packet rate:6 */
|
||||
uint8_t offset61;
|
||||
/* SL:4, subnet local:1, rsvd:3 */
|
||||
uint8_t offset62;
|
||||
/* local ACK timeout:5, rsvd:3 */
|
||||
uint8_t offset63;
|
||||
|
||||
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn)
|
||||
{
|
||||
lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
|
||||
(be32_to_cpu(lap_msg->offset12) &
|
||||
0x000000FF));
|
||||
}
|
||||
|
||||
static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
|
||||
u8 resp_timeout)
|
||||
{
|
||||
lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
|
||||
(be32_to_cpu(lap_msg->offset12) &
|
||||
0xFFFFFF07));
|
||||
}
|
||||
|
||||
static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return be32_to_cpu(lap_msg->offset56) >> 12;
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
|
||||
u32 flow_label)
|
||||
{
|
||||
lap_msg->offset56 = cpu_to_be32((flow_label << 12) |
|
||||
(be32_to_cpu(lap_msg->offset56) &
|
||||
0x00000FFF));
|
||||
}
|
||||
|
||||
static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return (u8) be32_to_cpu(lap_msg->offset56);
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
|
||||
u8 traffic_class)
|
||||
{
|
||||
lap_msg->offset56 = cpu_to_be32(traffic_class |
|
||||
(be32_to_cpu(lap_msg->offset56) &
|
||||
0xFFFFFF00));
|
||||
}
|
||||
|
||||
static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return lap_msg->offset61 & 0x3F;
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
|
||||
u8 packet_rate)
|
||||
{
|
||||
lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
|
||||
}
|
||||
|
||||
static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return lap_msg->offset62 >> 4;
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
|
||||
{
|
||||
lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
|
||||
}
|
||||
|
||||
static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return (lap_msg->offset62 >> 3) & 0x1;
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
|
||||
u8 subnet_local)
|
||||
{
|
||||
lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
|
||||
(lap_msg->offset61 & 0xF7);
|
||||
}
|
||||
static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
|
||||
{
|
||||
return lap_msg->offset63 >> 3;
|
||||
}
|
||||
|
||||
static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
|
||||
u8 local_ack_timeout)
|
||||
{
|
||||
lap_msg->offset63 = (local_ack_timeout << 3) |
|
||||
(lap_msg->offset63 & 0x07);
|
||||
}
|
||||
|
||||
struct cm_apr_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 local_comm_id;
|
||||
u32 remote_comm_id;
|
||||
|
||||
u8 info_length;
|
||||
u8 ap_status;
|
||||
u8 info[IB_CM_APR_INFO_LENGTH];
|
||||
|
||||
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct cm_sidr_req_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 request_id;
|
||||
u16 pkey;
|
||||
u16 rsvd;
|
||||
u64 service_id;
|
||||
|
||||
u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct cm_sidr_rep_msg {
|
||||
struct ib_mad_hdr hdr;
|
||||
|
||||
u32 request_id;
|
||||
u8 status;
|
||||
u8 info_length;
|
||||
u16 rsvd;
|
||||
/* QPN:24, rsvd:8 */
|
||||
u32 offset8;
|
||||
u64 service_id;
|
||||
u32 qkey;
|
||||
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
|
||||
|
||||
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
|
||||
u32 qpn)
|
||||
{
|
||||
sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
|
||||
(be32_to_cpu(sidr_rep_msg->offset8) &
|
||||
0x000000FF));
|
||||
}
|
||||
|
||||
#endif /* CM_MSGS_H */
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -29,7 +30,7 @@
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $
|
||||
* $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
@@ -329,7 +330,7 @@ EXPORT_SYMBOL(ib_create_fmr_pool);
|
||||
*
|
||||
* Destroy an FMR pool and free all associated resources.
|
||||
*/
|
||||
int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
|
||||
void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
|
||||
{
|
||||
struct ib_pool_fmr *fmr;
|
||||
struct ib_pool_fmr *tmp;
|
||||
@@ -352,8 +353,6 @@ int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
|
||||
|
||||
kfree(pool->cache_bucket);
|
||||
kfree(pool);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_fmr_pool);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -29,7 +31,7 @@
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: mad_priv.h 1389 2004-12-27 22:56:47Z roland $
|
||||
* $Id: mad_priv.h 2730 2005-06-28 16:43:03Z sean.hefty $
|
||||
*/
|
||||
|
||||
#ifndef __IB_MAD_PRIV_H__
|
||||
@@ -92,16 +94,15 @@ struct ib_mad_agent_private {
|
||||
spinlock_t lock;
|
||||
struct list_head send_list;
|
||||
struct list_head wait_list;
|
||||
struct list_head done_list;
|
||||
struct work_struct timed_work;
|
||||
unsigned long timeout;
|
||||
struct list_head local_list;
|
||||
struct work_struct local_work;
|
||||
struct list_head canceled_list;
|
||||
struct work_struct canceled_work;
|
||||
struct list_head rmpp_list;
|
||||
|
||||
atomic_t refcount;
|
||||
wait_queue_head_t wait;
|
||||
u8 rmpp_version;
|
||||
};
|
||||
|
||||
struct ib_mad_snoop_private {
|
||||
@@ -116,15 +117,24 @@ struct ib_mad_snoop_private {
|
||||
struct ib_mad_send_wr_private {
|
||||
struct ib_mad_list_head mad_list;
|
||||
struct list_head agent_list;
|
||||
struct ib_mad_agent *agent;
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_send_wr send_wr;
|
||||
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
||||
u64 wr_id; /* client WR ID */
|
||||
u64 tid;
|
||||
unsigned long timeout;
|
||||
int retries;
|
||||
int retry;
|
||||
int refcount;
|
||||
enum ib_wc_status status;
|
||||
|
||||
/* RMPP control */
|
||||
int last_ack;
|
||||
int seg_num;
|
||||
int newwin;
|
||||
int total_seg;
|
||||
int data_offset;
|
||||
int pad;
|
||||
};
|
||||
|
||||
struct ib_mad_local_private {
|
||||
@@ -197,4 +207,17 @@ struct ib_mad_port_private {
|
||||
|
||||
extern kmem_cache_t *ib_mad_cache;
|
||||
|
||||
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
|
||||
|
||||
struct ib_mad_send_wr_private *
|
||||
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
|
||||
|
||||
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
struct ib_mad_send_wc *mad_send_wc);
|
||||
|
||||
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
|
||||
|
||||
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
int timeout_ms);
|
||||
|
||||
#endif /* __IB_MAD_PRIV_H__ */
|
||||
|
765
drivers/infiniband/core/mad_rmpp.c
Normal file
765
drivers/infiniband/core/mad_rmpp.c
Normal file
@@ -0,0 +1,765 @@
|
||||
/*
|
||||
* Copyright (c) 2005 Intel Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "mad_priv.h"
|
||||
#include "mad_rmpp.h"
|
||||
|
||||
enum rmpp_state {
|
||||
RMPP_STATE_ACTIVE,
|
||||
RMPP_STATE_TIMEOUT,
|
||||
RMPP_STATE_COMPLETE
|
||||
};
|
||||
|
||||
struct mad_rmpp_recv {
|
||||
struct ib_mad_agent_private *agent;
|
||||
struct list_head list;
|
||||
struct work_struct timeout_work;
|
||||
struct work_struct cleanup_work;
|
||||
wait_queue_head_t wait;
|
||||
enum rmpp_state state;
|
||||
spinlock_t lock;
|
||||
atomic_t refcount;
|
||||
|
||||
struct ib_ah *ah;
|
||||
struct ib_mad_recv_wc *rmpp_wc;
|
||||
struct ib_mad_recv_buf *cur_seg_buf;
|
||||
int last_ack;
|
||||
int seg_num;
|
||||
int newwin;
|
||||
|
||||
u64 tid;
|
||||
u32 src_qp;
|
||||
u16 slid;
|
||||
u8 mgmt_class;
|
||||
u8 class_version;
|
||||
u8 method;
|
||||
};
|
||||
|
||||
static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
atomic_dec(&rmpp_recv->refcount);
|
||||
wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
|
||||
ib_destroy_ah(rmpp_recv->ah);
|
||||
kfree(rmpp_recv);
|
||||
}
|
||||
|
||||
void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&agent->lock, flags);
|
||||
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
|
||||
cancel_delayed_work(&rmpp_recv->timeout_work);
|
||||
cancel_delayed_work(&rmpp_recv->cleanup_work);
|
||||
}
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
|
||||
flush_workqueue(agent->qp_info->port_priv->wq);
|
||||
|
||||
list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
|
||||
&agent->rmpp_list, list) {
|
||||
list_del(&rmpp_recv->list);
|
||||
if (rmpp_recv->state != RMPP_STATE_COMPLETE)
|
||||
ib_free_recv_mad(rmpp_recv->rmpp_wc);
|
||||
destroy_rmpp_recv(rmpp_recv);
|
||||
}
|
||||
}
|
||||
|
||||
static void recv_timeout_handler(void *data)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv = data;
|
||||
struct ib_mad_recv_wc *rmpp_wc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
|
||||
if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
|
||||
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
|
||||
return;
|
||||
}
|
||||
rmpp_recv->state = RMPP_STATE_TIMEOUT;
|
||||
list_del(&rmpp_recv->list);
|
||||
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
|
||||
|
||||
/* TODO: send abort. */
|
||||
rmpp_wc = rmpp_recv->rmpp_wc;
|
||||
destroy_rmpp_recv(rmpp_recv);
|
||||
ib_free_recv_mad(rmpp_wc);
|
||||
}
|
||||
|
||||
static void recv_cleanup_handler(void *data)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv = data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
|
||||
list_del(&rmpp_recv->list);
|
||||
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
|
||||
destroy_rmpp_recv(rmpp_recv);
|
||||
}
|
||||
|
||||
static struct mad_rmpp_recv *
|
||||
create_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv;
|
||||
struct ib_mad_hdr *mad_hdr;
|
||||
|
||||
rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
|
||||
if (!rmpp_recv)
|
||||
return NULL;
|
||||
|
||||
rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
|
||||
mad_recv_wc->wc,
|
||||
mad_recv_wc->recv_buf.grh,
|
||||
agent->agent.port_num);
|
||||
if (IS_ERR(rmpp_recv->ah))
|
||||
goto error;
|
||||
|
||||
rmpp_recv->agent = agent;
|
||||
init_waitqueue_head(&rmpp_recv->wait);
|
||||
INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
|
||||
INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
|
||||
spin_lock_init(&rmpp_recv->lock);
|
||||
rmpp_recv->state = RMPP_STATE_ACTIVE;
|
||||
atomic_set(&rmpp_recv->refcount, 1);
|
||||
|
||||
rmpp_recv->rmpp_wc = mad_recv_wc;
|
||||
rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
|
||||
rmpp_recv->newwin = 1;
|
||||
rmpp_recv->seg_num = 1;
|
||||
rmpp_recv->last_ack = 0;
|
||||
|
||||
mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
|
||||
rmpp_recv->tid = mad_hdr->tid;
|
||||
rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
|
||||
rmpp_recv->slid = mad_recv_wc->wc->slid;
|
||||
rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
|
||||
rmpp_recv->class_version = mad_hdr->class_version;
|
||||
rmpp_recv->method = mad_hdr->method;
|
||||
return rmpp_recv;
|
||||
|
||||
error: kfree(rmpp_recv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
if (atomic_dec_and_test(&rmpp_recv->refcount))
|
||||
wake_up(&rmpp_recv->wait);
|
||||
}
|
||||
|
||||
static struct mad_rmpp_recv *
|
||||
find_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv;
|
||||
struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
|
||||
|
||||
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
|
||||
if (rmpp_recv->tid == mad_hdr->tid &&
|
||||
rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
|
||||
rmpp_recv->slid == mad_recv_wc->wc->slid &&
|
||||
rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
|
||||
rmpp_recv->class_version == mad_hdr->class_version &&
|
||||
rmpp_recv->method == mad_hdr->method)
|
||||
return rmpp_recv;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct mad_rmpp_recv *
|
||||
acquire_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&agent->lock, flags);
|
||||
rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
|
||||
if (rmpp_recv)
|
||||
atomic_inc(&rmpp_recv->refcount);
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
return rmpp_recv;
|
||||
}
|
||||
|
||||
static struct mad_rmpp_recv *
|
||||
insert_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
struct mad_rmpp_recv *cur_rmpp_recv;
|
||||
|
||||
cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
|
||||
if (!cur_rmpp_recv)
|
||||
list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
|
||||
|
||||
return cur_rmpp_recv;
|
||||
}
|
||||
|
||||
static int data_offset(u8 mgmt_class)
|
||||
{
|
||||
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
|
||||
return offsetof(struct ib_sa_mad, data);
|
||||
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
|
||||
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
|
||||
return offsetof(struct ib_vendor_mad, data);
|
||||
else
|
||||
return offsetof(struct ib_rmpp_mad, data);
|
||||
}
|
||||
|
||||
static void format_ack(struct ib_rmpp_mad *ack,
|
||||
struct ib_rmpp_mad *data,
|
||||
struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
memcpy(&ack->mad_hdr, &data->mad_hdr,
|
||||
data_offset(data->mad_hdr.mgmt_class));
|
||||
|
||||
ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
|
||||
ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
|
||||
ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
|
||||
|
||||
spin_lock_irqsave(&rmpp_recv->lock, flags);
|
||||
rmpp_recv->last_ack = rmpp_recv->seg_num;
|
||||
ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
|
||||
ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
|
||||
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
}
|
||||
|
||||
static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
|
||||
struct ib_mad_recv_wc *recv_wc)
|
||||
{
|
||||
struct ib_mad_send_buf *msg;
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
int hdr_len, ret;
|
||||
|
||||
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
|
||||
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
|
||||
recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
|
||||
hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
|
||||
GFP_KERNEL);
|
||||
if (!msg)
|
||||
return;
|
||||
|
||||
format_ack((struct ib_rmpp_mad *) msg->mad,
|
||||
(struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
|
||||
ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
|
||||
&bad_send_wr);
|
||||
if (ret)
|
||||
ib_free_send_mad(msg);
|
||||
}
|
||||
|
||||
static inline int get_last_flag(struct ib_mad_recv_buf *seg)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
|
||||
return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
|
||||
}
|
||||
|
||||
static inline int get_seg_num(struct ib_mad_recv_buf *seg)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
|
||||
return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
|
||||
}
|
||||
|
||||
static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
|
||||
struct ib_mad_recv_buf *seg)
|
||||
{
|
||||
if (seg->list.next == rmpp_list)
|
||||
return NULL;
|
||||
|
||||
return container_of(seg->list.next, struct ib_mad_recv_buf, list);
|
||||
}
|
||||
|
||||
static inline int window_size(struct ib_mad_agent_private *agent)
|
||||
{
|
||||
return max(agent->qp_info->recv_queue.max_active >> 3, 1);
|
||||
}
|
||||
|
||||
static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
|
||||
int seg_num)
|
||||
{
|
||||
struct ib_mad_recv_buf *seg_buf;
|
||||
int cur_seg_num;
|
||||
|
||||
list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
|
||||
cur_seg_num = get_seg_num(seg_buf);
|
||||
if (seg_num > cur_seg_num)
|
||||
return seg_buf;
|
||||
if (seg_num == cur_seg_num)
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
|
||||
struct ib_mad_recv_buf *new_buf)
|
||||
{
|
||||
struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
|
||||
|
||||
while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
|
||||
rmpp_recv->cur_seg_buf = new_buf;
|
||||
rmpp_recv->seg_num++;
|
||||
new_buf = get_next_seg(rmpp_list, new_buf);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
int hdr_size, data_size, pad;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
|
||||
|
||||
hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
|
||||
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
|
||||
pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
if (pad > data_size || pad < 0)
|
||||
pad = 0;
|
||||
|
||||
return hdr_size + rmpp_recv->seg_num * data_size - pad;
|
||||
}
|
||||
|
||||
static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
struct ib_mad_recv_wc *rmpp_wc;
|
||||
|
||||
ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
|
||||
if (rmpp_recv->seg_num > 1)
|
||||
cancel_delayed_work(&rmpp_recv->timeout_work);
|
||||
|
||||
rmpp_wc = rmpp_recv->rmpp_wc;
|
||||
rmpp_wc->mad_len = get_mad_len(rmpp_recv);
|
||||
/* 10 seconds until we can find the packet lifetime */
|
||||
queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
|
||||
&rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
|
||||
return rmpp_wc;
|
||||
}
|
||||
|
||||
void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
|
||||
{
|
||||
struct ib_mad_recv_buf *seg_buf;
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
void *data;
|
||||
int size, len, offset;
|
||||
u8 flags;
|
||||
|
||||
len = mad_recv_wc->mad_len;
|
||||
if (len <= sizeof(struct ib_mad)) {
|
||||
memcpy(buf, mad_recv_wc->recv_buf.mad, len);
|
||||
return;
|
||||
}
|
||||
|
||||
offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
|
||||
|
||||
list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
|
||||
rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
|
||||
flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
|
||||
|
||||
if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
|
||||
data = rmpp_mad;
|
||||
size = sizeof(*rmpp_mad);
|
||||
} else {
|
||||
data = (void *) rmpp_mad + offset;
|
||||
if (flags & IB_MGMT_RMPP_FLAG_LAST)
|
||||
size = len;
|
||||
else
|
||||
size = sizeof(*rmpp_mad) - offset;
|
||||
}
|
||||
|
||||
memcpy(buf, data, size);
|
||||
len -= size;
|
||||
buf += size;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ib_coalesce_recv_mad);
|
||||
|
||||
static struct ib_mad_recv_wc *
|
||||
continue_rmpp(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv;
|
||||
struct ib_mad_recv_buf *prev_buf;
|
||||
struct ib_mad_recv_wc *done_wc;
|
||||
int seg_num;
|
||||
unsigned long flags;
|
||||
|
||||
rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
|
||||
if (!rmpp_recv)
|
||||
goto drop1;
|
||||
|
||||
seg_num = get_seg_num(&mad_recv_wc->recv_buf);
|
||||
|
||||
spin_lock_irqsave(&rmpp_recv->lock, flags);
|
||||
if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
|
||||
(seg_num > rmpp_recv->newwin))
|
||||
goto drop3;
|
||||
|
||||
if ((seg_num <= rmpp_recv->last_ack) ||
|
||||
(rmpp_recv->state == RMPP_STATE_COMPLETE)) {
|
||||
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
ack_recv(rmpp_recv, mad_recv_wc);
|
||||
goto drop2;
|
||||
}
|
||||
|
||||
prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
|
||||
if (!prev_buf)
|
||||
goto drop3;
|
||||
|
||||
done_wc = NULL;
|
||||
list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
|
||||
if (rmpp_recv->cur_seg_buf == prev_buf) {
|
||||
update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
|
||||
if (get_last_flag(rmpp_recv->cur_seg_buf)) {
|
||||
rmpp_recv->state = RMPP_STATE_COMPLETE;
|
||||
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
done_wc = complete_rmpp(rmpp_recv);
|
||||
goto out;
|
||||
} else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
|
||||
rmpp_recv->newwin += window_size(agent);
|
||||
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
ack_recv(rmpp_recv, mad_recv_wc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
out:
|
||||
deref_rmpp_recv(rmpp_recv);
|
||||
return done_wc;
|
||||
|
||||
drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
|
||||
drop2: deref_rmpp_recv(rmpp_recv);
|
||||
drop1: ib_free_recv_mad(mad_recv_wc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ib_mad_recv_wc *
|
||||
start_rmpp(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv;
|
||||
unsigned long flags;
|
||||
|
||||
rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
|
||||
if (!rmpp_recv) {
|
||||
ib_free_recv_mad(mad_recv_wc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&agent->lock, flags);
|
||||
if (insert_rmpp_recv(agent, rmpp_recv)) {
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
/* duplicate first MAD */
|
||||
destroy_rmpp_recv(rmpp_recv);
|
||||
return continue_rmpp(agent, mad_recv_wc);
|
||||
}
|
||||
atomic_inc(&rmpp_recv->refcount);
|
||||
|
||||
if (get_last_flag(&mad_recv_wc->recv_buf)) {
|
||||
rmpp_recv->state = RMPP_STATE_COMPLETE;
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
complete_rmpp(rmpp_recv);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
/* 40 seconds until we can find the packet lifetimes */
|
||||
queue_delayed_work(agent->qp_info->port_priv->wq,
|
||||
&rmpp_recv->timeout_work,
|
||||
msecs_to_jiffies(40000));
|
||||
rmpp_recv->newwin += window_size(agent);
|
||||
ack_recv(rmpp_recv, mad_recv_wc);
|
||||
mad_recv_wc = NULL;
|
||||
}
|
||||
deref_rmpp_recv(rmpp_recv);
|
||||
return mad_recv_wc;
|
||||
}
|
||||
|
||||
static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
|
||||
(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
|
||||
(mad_send_wr->seg_num - 1);
|
||||
}
|
||||
|
||||
static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
int timeout;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
|
||||
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
|
||||
rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
|
||||
|
||||
if (mad_send_wr->seg_num == 1) {
|
||||
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
|
||||
rmpp_mad->rmpp_hdr.paylen_newwin =
|
||||
cpu_to_be32(mad_send_wr->total_seg *
|
||||
(sizeof(struct ib_rmpp_mad) -
|
||||
offsetof(struct ib_rmpp_mad, data)));
|
||||
mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
|
||||
} else {
|
||||
mad_send_wr->send_wr.num_sge = 2;
|
||||
mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
|
||||
mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
|
||||
mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
|
||||
mad_send_wr->data_offset;
|
||||
mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
|
||||
}
|
||||
|
||||
if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
|
||||
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
|
||||
rmpp_mad->rmpp_hdr.paylen_newwin =
|
||||
cpu_to_be32(sizeof(struct ib_rmpp_mad) -
|
||||
offsetof(struct ib_rmpp_mad, data) -
|
||||
mad_send_wr->pad);
|
||||
}
|
||||
|
||||
/* 2 seconds for an ACK until we can find the packet lifetime */
|
||||
timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
|
||||
if (!timeout || timeout > 2000)
|
||||
mad_send_wr->timeout = msecs_to_jiffies(2000);
|
||||
mad_send_wr->seg_num++;
|
||||
return ib_send_mad(mad_send_wr);
|
||||
}
|
||||
|
||||
static void process_rmpp_ack(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
unsigned long flags;
|
||||
int seg_num, newwin, ret;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
|
||||
if (rmpp_mad->rmpp_hdr.rmpp_status)
|
||||
return;
|
||||
|
||||
seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
|
||||
newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
|
||||
spin_lock_irqsave(&agent->lock, flags);
|
||||
mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
|
||||
if (!mad_send_wr)
|
||||
goto out; /* Unmatched ACK */
|
||||
|
||||
if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
|
||||
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
|
||||
goto out; /* Send is already done */
|
||||
|
||||
if (seg_num > mad_send_wr->total_seg)
|
||||
goto out; /* Bad ACK */
|
||||
|
||||
if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
|
||||
goto out; /* Old ACK */
|
||||
|
||||
if (seg_num > mad_send_wr->last_ack) {
|
||||
mad_send_wr->last_ack = seg_num;
|
||||
mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
|
||||
}
|
||||
mad_send_wr->newwin = newwin;
|
||||
if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
|
||||
/* If no response is expected, the ACK completes the send */
|
||||
if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
|
||||
struct ib_mad_send_wc wc;
|
||||
|
||||
ib_mark_mad_done(mad_send_wr);
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
|
||||
wc.status = IB_WC_SUCCESS;
|
||||
wc.vendor_err = 0;
|
||||
wc.wr_id = mad_send_wr->wr_id;
|
||||
ib_mad_complete_send_wr(mad_send_wr, &wc);
|
||||
return;
|
||||
}
|
||||
if (mad_send_wr->refcount == 1)
|
||||
ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
|
||||
send_wr.wr.ud.timeout_ms);
|
||||
} else if (mad_send_wr->refcount == 1 &&
|
||||
mad_send_wr->seg_num < mad_send_wr->newwin &&
|
||||
mad_send_wr->seg_num <= mad_send_wr->total_seg) {
|
||||
/* Send failure will just result in a timeout/retry */
|
||||
ret = send_next_seg(mad_send_wr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
mad_send_wr->refcount++;
|
||||
list_del(&mad_send_wr->agent_list);
|
||||
list_add_tail(&mad_send_wr->agent_list,
|
||||
&mad_send_wr->mad_agent_priv->send_list);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&agent->lock, flags);
|
||||
}
|
||||
|
||||
struct ib_mad_recv_wc *
|
||||
ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
|
||||
if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
|
||||
return mad_recv_wc;
|
||||
|
||||
if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
|
||||
goto out;
|
||||
|
||||
switch (rmpp_mad->rmpp_hdr.rmpp_type) {
|
||||
case IB_MGMT_RMPP_TYPE_DATA:
|
||||
if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
|
||||
return start_rmpp(agent, mad_recv_wc);
|
||||
else
|
||||
return continue_rmpp(agent, mad_recv_wc);
|
||||
case IB_MGMT_RMPP_TYPE_ACK:
|
||||
process_rmpp_ack(agent, mad_recv_wc);
|
||||
break;
|
||||
case IB_MGMT_RMPP_TYPE_STOP:
|
||||
case IB_MGMT_RMPP_TYPE_ABORT:
|
||||
/* TODO: process_rmpp_nack(agent, mad_recv_wc); */
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
out:
|
||||
ib_free_recv_mad(mad_recv_wc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
int i, total_len, ret;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
|
||||
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
|
||||
IB_MGMT_RMPP_FLAG_ACTIVE))
|
||||
return IB_RMPP_RESULT_UNHANDLED;
|
||||
|
||||
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
|
||||
return IB_RMPP_RESULT_INTERNAL;
|
||||
|
||||
if (mad_send_wr->send_wr.num_sge > 1)
|
||||
return -EINVAL; /* TODO: support num_sge > 1 */
|
||||
|
||||
mad_send_wr->seg_num = 1;
|
||||
mad_send_wr->newwin = 1;
|
||||
mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
|
||||
|
||||
total_len = 0;
|
||||
for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
|
||||
total_len += mad_send_wr->send_wr.sg_list[i].length;
|
||||
|
||||
mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
|
||||
(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
|
||||
mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
|
||||
be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
|
||||
/* We need to wait for the final ACK even if there isn't a response */
|
||||
mad_send_wr->refcount += (mad_send_wr->timeout == 0);
|
||||
ret = send_next_seg(mad_send_wr);
|
||||
if (!ret)
|
||||
return IB_RMPP_RESULT_CONSUMED;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
struct ib_mad_send_wc *mad_send_wc)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
struct ib_mad_send_buf *msg;
|
||||
int ret;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
|
||||
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
|
||||
IB_MGMT_RMPP_FLAG_ACTIVE))
|
||||
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
|
||||
|
||||
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
|
||||
msg = (struct ib_mad_send_buf *) (unsigned long)
|
||||
mad_send_wc->wr_id;
|
||||
ib_free_send_mad(msg);
|
||||
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
|
||||
}
|
||||
|
||||
if (mad_send_wc->status != IB_WC_SUCCESS ||
|
||||
mad_send_wr->status != IB_WC_SUCCESS)
|
||||
return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
|
||||
|
||||
if (!mad_send_wr->timeout)
|
||||
return IB_RMPP_RESULT_PROCESSED; /* Response received */
|
||||
|
||||
if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
|
||||
mad_send_wr->timeout =
|
||||
msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
|
||||
return IB_RMPP_RESULT_PROCESSED; /* Send done */
|
||||
}
|
||||
|
||||
if (mad_send_wr->seg_num > mad_send_wr->newwin ||
|
||||
mad_send_wr->seg_num > mad_send_wr->total_seg)
|
||||
return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
|
||||
|
||||
ret = send_next_seg(mad_send_wr);
|
||||
if (ret) {
|
||||
mad_send_wc->status = IB_WC_GENERAL_ERR;
|
||||
return IB_RMPP_RESULT_PROCESSED;
|
||||
}
|
||||
return IB_RMPP_RESULT_CONSUMED;
|
||||
}
|
||||
|
||||
int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
int ret;
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
|
||||
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
|
||||
IB_MGMT_RMPP_FLAG_ACTIVE))
|
||||
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
|
||||
|
||||
if (mad_send_wr->last_ack == mad_send_wr->total_seg)
|
||||
return IB_RMPP_RESULT_PROCESSED;
|
||||
|
||||
mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
|
||||
ret = send_next_seg(mad_send_wr);
|
||||
if (ret)
|
||||
return IB_RMPP_RESULT_PROCESSED;
|
||||
|
||||
return IB_RMPP_RESULT_CONSUMED;
|
||||
}
|
58
drivers/infiniband/core/mad_rmpp.h
Normal file
58
drivers/infiniband/core/mad_rmpp.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2005 Intel Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: mad_rmpp.h 1921 2005-02-25 22:58:44Z sean.hefty $
|
||||
*/
|
||||
|
||||
#ifndef __MAD_RMPP_H__
|
||||
#define __MAD_RMPP_H__
|
||||
|
||||
enum {
|
||||
IB_RMPP_RESULT_PROCESSED,
|
||||
IB_RMPP_RESULT_CONSUMED,
|
||||
IB_RMPP_RESULT_INTERNAL,
|
||||
IB_RMPP_RESULT_UNHANDLED
|
||||
};
|
||||
|
||||
int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr);
|
||||
|
||||
struct ib_mad_recv_wc *
|
||||
ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
|
||||
struct ib_mad_recv_wc *mad_recv_wc);
|
||||
|
||||
int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
struct ib_mad_send_wc *mad_send_wc);
|
||||
|
||||
void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
|
||||
|
||||
int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
|
||||
|
||||
#endif /* __MAD_RMPP_H__ */
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -29,7 +30,7 @@
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: sa_query.c 1389 2004-12-27 22:56:47Z roland $
|
||||
* $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@@ -50,26 +51,6 @@ MODULE_AUTHOR("Roland Dreier");
|
||||
MODULE_DESCRIPTION("InfiniBand subnet administration query support");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
/*
|
||||
* These two structures must be packed because they have 64-bit fields
|
||||
* that are only 32-bit aligned. 64-bit architectures will lay them
|
||||
* out wrong otherwise. (And unfortunately they are sent on the wire
|
||||
* so we can't change the layout)
|
||||
*/
|
||||
struct ib_sa_hdr {
|
||||
u64 sm_key;
|
||||
u16 attr_offset;
|
||||
u16 reserved;
|
||||
ib_sa_comp_mask comp_mask;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ib_sa_mad {
|
||||
struct ib_mad_hdr mad_hdr;
|
||||
struct ib_rmpp_hdr rmpp_hdr;
|
||||
struct ib_sa_hdr sa_hdr;
|
||||
u8 data[200];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ib_sa_sm_ah {
|
||||
struct ib_ah *ah;
|
||||
struct kref ref;
|
||||
@@ -77,7 +58,6 @@ struct ib_sa_sm_ah {
|
||||
|
||||
struct ib_sa_port {
|
||||
struct ib_mad_agent *agent;
|
||||
struct ib_mr *mr;
|
||||
struct ib_sa_sm_ah *sm_ah;
|
||||
struct work_struct update_task;
|
||||
spinlock_t ah_lock;
|
||||
@@ -100,6 +80,12 @@ struct ib_sa_query {
|
||||
int id;
|
||||
};
|
||||
|
||||
struct ib_sa_service_query {
|
||||
void (*callback)(int, struct ib_sa_service_rec *, void *);
|
||||
void *context;
|
||||
struct ib_sa_query sa_query;
|
||||
};
|
||||
|
||||
struct ib_sa_path_query {
|
||||
void (*callback)(int, struct ib_sa_path_rec *, void *);
|
||||
void *context;
|
||||
@@ -341,6 +327,54 @@ static const struct ib_field mcmember_rec_table[] = {
|
||||
.size_bits = 23 },
|
||||
};
|
||||
|
||||
#define SERVICE_REC_FIELD(field) \
|
||||
.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
|
||||
.struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
|
||||
.field_name = "sa_service_rec:" #field
|
||||
|
||||
static const struct ib_field service_rec_table[] = {
|
||||
{ SERVICE_REC_FIELD(id),
|
||||
.offset_words = 0,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 64 },
|
||||
{ SERVICE_REC_FIELD(gid),
|
||||
.offset_words = 2,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(pkey),
|
||||
.offset_words = 6,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 16 },
|
||||
{ SERVICE_REC_FIELD(lease),
|
||||
.offset_words = 7,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 32 },
|
||||
{ SERVICE_REC_FIELD(key),
|
||||
.offset_words = 8,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 128 },
|
||||
{ SERVICE_REC_FIELD(name),
|
||||
.offset_words = 12,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 64*8 },
|
||||
{ SERVICE_REC_FIELD(data8),
|
||||
.offset_words = 28,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 16*8 },
|
||||
{ SERVICE_REC_FIELD(data16),
|
||||
.offset_words = 32,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 8*16 },
|
||||
{ SERVICE_REC_FIELD(data32),
|
||||
.offset_words = 36,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 4*32 },
|
||||
{ SERVICE_REC_FIELD(data64),
|
||||
.offset_words = 40,
|
||||
.offset_bits = 0,
|
||||
.size_bits = 2*64 },
|
||||
};
|
||||
|
||||
static void free_sm_ah(struct kref *kref)
|
||||
{
|
||||
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
|
||||
@@ -463,7 +497,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms)
|
||||
.mad_hdr = &query->mad->mad_hdr,
|
||||
.remote_qpn = 1,
|
||||
.remote_qkey = IB_QP1_QKEY,
|
||||
.timeout_ms = timeout_ms
|
||||
.timeout_ms = timeout_ms,
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -492,7 +526,7 @@ retry:
|
||||
sizeof (struct ib_sa_mad),
|
||||
DMA_TO_DEVICE);
|
||||
gather_list.length = sizeof (struct ib_sa_mad);
|
||||
gather_list.lkey = port->mr->lkey;
|
||||
gather_list.lkey = port->agent->mr->lkey;
|
||||
pci_unmap_addr_set(query, mapping, gather_list.addr);
|
||||
|
||||
ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
|
||||
@@ -566,7 +600,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
|
||||
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
|
||||
struct ib_sa_path_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, int gfp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_path_rec *resp,
|
||||
void *context),
|
||||
@@ -616,6 +650,114 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_path_rec_get);
|
||||
|
||||
static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
|
||||
int status,
|
||||
struct ib_sa_mad *mad)
|
||||
{
|
||||
struct ib_sa_service_query *query =
|
||||
container_of(sa_query, struct ib_sa_service_query, sa_query);
|
||||
|
||||
if (mad) {
|
||||
struct ib_sa_service_rec rec;
|
||||
|
||||
ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
|
||||
mad->data, &rec);
|
||||
query->callback(status, &rec, query->context);
|
||||
} else
|
||||
query->callback(status, NULL, query->context);
|
||||
}
|
||||
|
||||
static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
|
||||
{
|
||||
kfree(sa_query->mad);
|
||||
kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_sa_service_rec_query - Start Service Record operation
|
||||
* @device:device to send request on
|
||||
* @port_num: port number to send request on
|
||||
* @method:SA method - should be get, set, or delete
|
||||
* @rec:Service Record to send in request
|
||||
* @comp_mask:component mask to send in request
|
||||
* @timeout_ms:time to wait for response
|
||||
* @gfp_mask:GFP mask to use for internal allocations
|
||||
* @callback:function called when request completes, times out or is
|
||||
* canceled
|
||||
* @context:opaque user context passed to callback
|
||||
* @sa_query:request context, used to cancel request
|
||||
*
|
||||
* Send a Service Record set/get/delete to the SA to register,
|
||||
* unregister or query a service record.
|
||||
* The callback function will be called when the request completes (or
|
||||
* fails); status is 0 for a successful response, -EINTR if the query
|
||||
* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
|
||||
* occurred sending the query. The resp parameter of the callback is
|
||||
* only valid if status is 0.
|
||||
*
|
||||
* If the return value of ib_sa_service_rec_query() is negative, it is an
|
||||
* error code. Otherwise it is a request ID that can be used to cancel
|
||||
* the query.
|
||||
*/
|
||||
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
|
||||
struct ib_sa_service_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_service_rec *resp,
|
||||
void *context),
|
||||
void *context,
|
||||
struct ib_sa_query **sa_query)
|
||||
{
|
||||
struct ib_sa_service_query *query;
|
||||
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
|
||||
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
|
||||
struct ib_mad_agent *agent = port->agent;
|
||||
int ret;
|
||||
|
||||
if (method != IB_MGMT_METHOD_GET &&
|
||||
method != IB_MGMT_METHOD_SET &&
|
||||
method != IB_SA_METHOD_DELETE)
|
||||
return -EINVAL;
|
||||
|
||||
query = kmalloc(sizeof *query, gfp_mask);
|
||||
if (!query)
|
||||
return -ENOMEM;
|
||||
query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
|
||||
if (!query->sa_query.mad) {
|
||||
kfree(query);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
query->callback = callback;
|
||||
query->context = context;
|
||||
|
||||
init_mad(query->sa_query.mad, agent);
|
||||
|
||||
query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
|
||||
query->sa_query.release = ib_sa_service_rec_release;
|
||||
query->sa_query.port = port;
|
||||
query->sa_query.mad->mad_hdr.method = method;
|
||||
query->sa_query.mad->mad_hdr.attr_id =
|
||||
cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
|
||||
query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
|
||||
|
||||
ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
|
||||
rec, query->sa_query.mad->data);
|
||||
|
||||
*sa_query = &query->sa_query;
|
||||
|
||||
ret = send_mad(&query->sa_query, timeout_ms);
|
||||
if (ret < 0) {
|
||||
*sa_query = NULL;
|
||||
kfree(query->sa_query.mad);
|
||||
kfree(query);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_sa_service_rec_query);
|
||||
|
||||
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
|
||||
int status,
|
||||
struct ib_sa_mad *mad)
|
||||
@@ -643,7 +785,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
|
||||
u8 method,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, int gfp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_mcmember_rec *resp,
|
||||
void *context),
|
||||
@@ -780,7 +922,6 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
sa_dev->end_port = e;
|
||||
|
||||
for (i = 0; i <= e - s; ++i) {
|
||||
sa_dev->port[i].mr = NULL;
|
||||
sa_dev->port[i].sm_ah = NULL;
|
||||
sa_dev->port[i].port_num = i + s;
|
||||
spin_lock_init(&sa_dev->port[i].ah_lock);
|
||||
@@ -792,13 +933,6 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
if (IS_ERR(sa_dev->port[i].agent))
|
||||
goto err;
|
||||
|
||||
sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(sa_dev->port[i].mr)) {
|
||||
ib_unregister_mad_agent(sa_dev->port[i].agent);
|
||||
goto err;
|
||||
}
|
||||
|
||||
INIT_WORK(&sa_dev->port[i].update_task,
|
||||
update_sm_ah, &sa_dev->port[i]);
|
||||
}
|
||||
@@ -822,10 +956,8 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
return;
|
||||
|
||||
err:
|
||||
while (--i >= 0) {
|
||||
ib_dereg_mr(sa_dev->port[i].mr);
|
||||
while (--i >= 0)
|
||||
ib_unregister_mad_agent(sa_dev->port[i].agent);
|
||||
}
|
||||
|
||||
kfree(sa_dev);
|
||||
|
||||
|
1393
drivers/infiniband/core/ucm.c
Normal file
1393
drivers/infiniband/core/ucm.c
Normal file
File diff suppressed because it is too large
Load Diff
89
drivers/infiniband/core/ucm.h
Normal file
89
drivers/infiniband/core/ucm.h
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2005 Topspin Communications. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: ucm.h 2208 2005-04-22 23:24:31Z libor $
|
||||
*/
|
||||
|
||||
#ifndef UCM_H
|
||||
#define UCM_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#include <ib_cm.h>
|
||||
#include <ib_user_cm.h>
|
||||
|
||||
#define IB_UCM_CM_ID_INVALID 0xffffffff
|
||||
|
||||
struct ib_ucm_file {
|
||||
struct semaphore mutex;
|
||||
struct file *filp;
|
||||
/*
|
||||
* list of pending events
|
||||
*/
|
||||
struct list_head ctxs; /* list of active connections */
|
||||
struct list_head events; /* list of pending events */
|
||||
wait_queue_head_t poll_wait;
|
||||
};
|
||||
|
||||
struct ib_ucm_context {
|
||||
int id;
|
||||
int ref;
|
||||
int error;
|
||||
|
||||
struct ib_ucm_file *file;
|
||||
struct ib_cm_id *cm_id;
|
||||
struct semaphore mutex;
|
||||
|
||||
struct list_head events; /* list of pending events. */
|
||||
struct list_head file_list; /* member in file ctx list */
|
||||
};
|
||||
|
||||
struct ib_ucm_event {
|
||||
struct ib_ucm_context *ctx;
|
||||
struct list_head file_list; /* member in file event list */
|
||||
struct list_head ctx_list; /* member in ctx event list */
|
||||
|
||||
struct ib_ucm_event_resp resp;
|
||||
void *data;
|
||||
void *info;
|
||||
int data_len;
|
||||
int info_len;
|
||||
/*
|
||||
* new connection identifiers needs to be saved until
|
||||
* userspace can get a handle on them.
|
||||
*/
|
||||
struct ib_cm_id *cm_id;
|
||||
};
|
||||
|
||||
#endif /* UCM_H */
|
@@ -1,5 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -29,7 +31,7 @@
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $Id: user_mad.c 1389 2004-12-27 22:56:47Z roland $
|
||||
* $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@@ -94,10 +96,12 @@ struct ib_umad_file {
|
||||
};
|
||||
|
||||
struct ib_umad_packet {
|
||||
struct ib_user_mad mad;
|
||||
struct ib_ah *ah;
|
||||
struct ib_mad_send_buf *msg;
|
||||
struct list_head list;
|
||||
int length;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
||||
struct ib_user_mad mad;
|
||||
};
|
||||
|
||||
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
|
||||
@@ -114,10 +118,10 @@ static int queue_packet(struct ib_umad_file *file,
|
||||
int ret = 1;
|
||||
|
||||
down_read(&file->agent_mutex);
|
||||
for (packet->mad.id = 0;
|
||||
packet->mad.id < IB_UMAD_MAX_AGENTS;
|
||||
packet->mad.id++)
|
||||
if (agent == file->agent[packet->mad.id]) {
|
||||
for (packet->mad.hdr.id = 0;
|
||||
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
|
||||
packet->mad.hdr.id++)
|
||||
if (agent == file->agent[packet->mad.hdr.id]) {
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
list_add_tail(&packet->list, &file->recv_list);
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
@@ -135,22 +139,30 @@ static void send_handler(struct ib_mad_agent *agent,
|
||||
struct ib_mad_send_wc *send_wc)
|
||||
{
|
||||
struct ib_umad_file *file = agent->context;
|
||||
struct ib_umad_packet *packet =
|
||||
struct ib_umad_packet *timeout, *packet =
|
||||
(void *) (unsigned long) send_wc->wr_id;
|
||||
|
||||
dma_unmap_single(agent->device->dma_device,
|
||||
pci_unmap_addr(packet, mapping),
|
||||
sizeof packet->mad.data,
|
||||
DMA_TO_DEVICE);
|
||||
ib_destroy_ah(packet->ah);
|
||||
ib_destroy_ah(packet->msg->send_wr.wr.ud.ah);
|
||||
ib_free_send_mad(packet->msg);
|
||||
|
||||
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
|
||||
packet->mad.status = ETIMEDOUT;
|
||||
timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr),
|
||||
GFP_KERNEL);
|
||||
if (!timeout)
|
||||
goto out;
|
||||
|
||||
if (!queue_packet(file, agent, packet))
|
||||
return;
|
||||
memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr));
|
||||
|
||||
timeout->length = sizeof (struct ib_mad_hdr);
|
||||
timeout->mad.hdr.id = packet->mad.hdr.id;
|
||||
timeout->mad.hdr.status = ETIMEDOUT;
|
||||
memcpy(timeout->mad.data, packet->mad.data,
|
||||
sizeof (struct ib_mad_hdr));
|
||||
|
||||
if (!queue_packet(file, agent, timeout))
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(packet);
|
||||
}
|
||||
|
||||
@@ -159,30 +171,35 @@ static void recv_handler(struct ib_mad_agent *agent,
|
||||
{
|
||||
struct ib_umad_file *file = agent->context;
|
||||
struct ib_umad_packet *packet;
|
||||
int length;
|
||||
|
||||
if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
|
||||
goto out;
|
||||
|
||||
packet = kmalloc(sizeof *packet, GFP_KERNEL);
|
||||
length = mad_recv_wc->mad_len;
|
||||
packet = kmalloc(sizeof *packet + length, GFP_KERNEL);
|
||||
if (!packet)
|
||||
goto out;
|
||||
|
||||
memset(packet, 0, sizeof *packet);
|
||||
memset(packet, 0, sizeof *packet + length);
|
||||
packet->length = length;
|
||||
|
||||
memcpy(packet->mad.data, mad_recv_wc->recv_buf.mad, sizeof packet->mad.data);
|
||||
packet->mad.status = 0;
|
||||
packet->mad.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
|
||||
packet->mad.lid = cpu_to_be16(mad_recv_wc->wc->slid);
|
||||
packet->mad.sl = mad_recv_wc->wc->sl;
|
||||
packet->mad.path_bits = mad_recv_wc->wc->dlid_path_bits;
|
||||
packet->mad.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
|
||||
if (packet->mad.grh_present) {
|
||||
ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
|
||||
|
||||
packet->mad.hdr.status = 0;
|
||||
packet->mad.hdr.length = length + sizeof (struct ib_user_mad);
|
||||
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
|
||||
packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
|
||||
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
|
||||
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
|
||||
packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
|
||||
if (packet->mad.hdr.grh_present) {
|
||||
/* XXX parse GRH */
|
||||
packet->mad.gid_index = 0;
|
||||
packet->mad.hop_limit = 0;
|
||||
packet->mad.traffic_class = 0;
|
||||
memset(packet->mad.gid, 0, 16);
|
||||
packet->mad.flow_label = 0;
|
||||
packet->mad.hdr.gid_index = 0;
|
||||
packet->mad.hdr.hop_limit = 0;
|
||||
packet->mad.hdr.traffic_class = 0;
|
||||
memset(packet->mad.hdr.gid, 0, 16);
|
||||
packet->mad.hdr.flow_label = 0;
|
||||
}
|
||||
|
||||
if (queue_packet(file, agent, packet))
|
||||
@@ -199,7 +216,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
||||
struct ib_umad_packet *packet;
|
||||
ssize_t ret;
|
||||
|
||||
if (count < sizeof (struct ib_user_mad))
|
||||
if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
@@ -222,12 +239,25 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
||||
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
|
||||
if (copy_to_user(buf, &packet->mad, sizeof packet->mad))
|
||||
if (count < packet->length + sizeof (struct ib_user_mad)) {
|
||||
/* Return length needed (and first RMPP segment) if too small */
|
||||
if (copy_to_user(buf, &packet->mad,
|
||||
sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = -ENOSPC;
|
||||
} else if (copy_to_user(buf, &packet->mad,
|
||||
packet->length + sizeof (struct ib_user_mad)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = sizeof packet->mad;
|
||||
|
||||
kfree(packet);
|
||||
ret = packet->length + sizeof (struct ib_user_mad);
|
||||
if (ret < 0) {
|
||||
/* Requeue packet */
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
list_add(&packet->list, &file->recv_list);
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
} else
|
||||
kfree(packet);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -238,69 +268,57 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
struct ib_umad_packet *packet;
|
||||
struct ib_mad_agent *agent;
|
||||
struct ib_ah_attr ah_attr;
|
||||
struct ib_sge gather_list;
|
||||
struct ib_send_wr *bad_wr, wr = {
|
||||
.opcode = IB_WR_SEND,
|
||||
.sg_list = &gather_list,
|
||||
.num_sge = 1,
|
||||
.send_flags = IB_SEND_SIGNALED,
|
||||
};
|
||||
struct ib_send_wr *bad_wr;
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
u8 method;
|
||||
u64 *tid;
|
||||
int ret;
|
||||
int ret, length, hdr_len, data_len, rmpp_hdr_size;
|
||||
int rmpp_active = 0;
|
||||
|
||||
if (count < sizeof (struct ib_user_mad))
|
||||
return -EINVAL;
|
||||
|
||||
packet = kmalloc(sizeof *packet, GFP_KERNEL);
|
||||
length = count - sizeof (struct ib_user_mad);
|
||||
packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) +
|
||||
sizeof(struct ib_rmpp_hdr), GFP_KERNEL);
|
||||
if (!packet)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(&packet->mad, buf, sizeof packet->mad)) {
|
||||
kfree(packet);
|
||||
return -EFAULT;
|
||||
if (copy_from_user(&packet->mad, buf,
|
||||
sizeof (struct ib_user_mad) +
|
||||
sizeof(struct ib_mad_hdr) +
|
||||
sizeof(struct ib_rmpp_hdr))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (packet->mad.id < 0 || packet->mad.id >= IB_UMAD_MAX_AGENTS) {
|
||||
if (packet->mad.hdr.id < 0 ||
|
||||
packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
packet->length = length;
|
||||
|
||||
down_read(&file->agent_mutex);
|
||||
|
||||
agent = file->agent[packet->mad.id];
|
||||
agent = file->agent[packet->mad.hdr.id];
|
||||
if (!agent) {
|
||||
ret = -EINVAL;
|
||||
goto err_up;
|
||||
}
|
||||
|
||||
/*
|
||||
* If userspace is generating a request that will generate a
|
||||
* response, we need to make sure the high-order part of the
|
||||
* transaction ID matches the agent being used to send the
|
||||
* MAD.
|
||||
*/
|
||||
method = ((struct ib_mad_hdr *) packet->mad.data)->method;
|
||||
|
||||
if (!(method & IB_MGMT_METHOD_RESP) &&
|
||||
method != IB_MGMT_METHOD_TRAP_REPRESS &&
|
||||
method != IB_MGMT_METHOD_SEND) {
|
||||
tid = &((struct ib_mad_hdr *) packet->mad.data)->tid;
|
||||
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
|
||||
(be64_to_cpup(tid) & 0xffffffff));
|
||||
}
|
||||
|
||||
memset(&ah_attr, 0, sizeof ah_attr);
|
||||
ah_attr.dlid = be16_to_cpu(packet->mad.lid);
|
||||
ah_attr.sl = packet->mad.sl;
|
||||
ah_attr.src_path_bits = packet->mad.path_bits;
|
||||
ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
|
||||
ah_attr.sl = packet->mad.hdr.sl;
|
||||
ah_attr.src_path_bits = packet->mad.hdr.path_bits;
|
||||
ah_attr.port_num = file->port->port_num;
|
||||
if (packet->mad.grh_present) {
|
||||
if (packet->mad.hdr.grh_present) {
|
||||
ah_attr.ah_flags = IB_AH_GRH;
|
||||
memcpy(ah_attr.grh.dgid.raw, packet->mad.gid, 16);
|
||||
ah_attr.grh.flow_label = packet->mad.flow_label;
|
||||
ah_attr.grh.hop_limit = packet->mad.hop_limit;
|
||||
ah_attr.grh.traffic_class = packet->mad.traffic_class;
|
||||
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
|
||||
ah_attr.grh.flow_label = packet->mad.hdr.flow_label;
|
||||
ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
|
||||
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
|
||||
}
|
||||
|
||||
packet->ah = ib_create_ah(agent->qp->pd, &ah_attr);
|
||||
@@ -309,34 +327,104 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
goto err_up;
|
||||
}
|
||||
|
||||
gather_list.addr = dma_map_single(agent->device->dma_device,
|
||||
packet->mad.data,
|
||||
sizeof packet->mad.data,
|
||||
DMA_TO_DEVICE);
|
||||
gather_list.length = sizeof packet->mad.data;
|
||||
gather_list.lkey = file->mr[packet->mad.id]->lkey;
|
||||
pci_unmap_addr_set(packet, mapping, gather_list.addr);
|
||||
|
||||
wr.wr.ud.mad_hdr = (struct ib_mad_hdr *) packet->mad.data;
|
||||
wr.wr.ud.ah = packet->ah;
|
||||
wr.wr.ud.remote_qpn = be32_to_cpu(packet->mad.qpn);
|
||||
wr.wr.ud.remote_qkey = be32_to_cpu(packet->mad.qkey);
|
||||
wr.wr.ud.timeout_ms = packet->mad.timeout_ms;
|
||||
|
||||
wr.wr_id = (unsigned long) packet;
|
||||
|
||||
ret = ib_post_send_mad(agent, &wr, &bad_wr);
|
||||
if (ret) {
|
||||
dma_unmap_single(agent->device->dma_device,
|
||||
pci_unmap_addr(packet, mapping),
|
||||
sizeof packet->mad.data,
|
||||
DMA_TO_DEVICE);
|
||||
goto err_up;
|
||||
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
|
||||
if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) {
|
||||
/* RMPP active */
|
||||
if (!agent->rmpp_version) {
|
||||
ret = -EINVAL;
|
||||
goto err_ah;
|
||||
}
|
||||
/* Validate that management class can support RMPP */
|
||||
if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
|
||||
hdr_len = offsetof(struct ib_sa_mad, data);
|
||||
data_len = length;
|
||||
} else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
|
||||
(rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
|
||||
hdr_len = offsetof(struct ib_vendor_mad, data);
|
||||
data_len = length - hdr_len;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto err_ah;
|
||||
}
|
||||
rmpp_active = 1;
|
||||
} else {
|
||||
if (length > sizeof(struct ib_mad)) {
|
||||
ret = -EINVAL;
|
||||
goto err_ah;
|
||||
}
|
||||
hdr_len = offsetof(struct ib_mad, data);
|
||||
data_len = length - hdr_len;
|
||||
}
|
||||
|
||||
packet->msg = ib_create_send_mad(agent,
|
||||
be32_to_cpu(packet->mad.hdr.qpn),
|
||||
0, packet->ah, rmpp_active,
|
||||
hdr_len, data_len,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(packet->msg)) {
|
||||
ret = PTR_ERR(packet->msg);
|
||||
goto err_ah;
|
||||
}
|
||||
|
||||
packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms;
|
||||
packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries;
|
||||
|
||||
/* Override send WR WRID initialized in ib_create_send_mad */
|
||||
packet->msg->send_wr.wr_id = (unsigned long) packet;
|
||||
|
||||
if (!rmpp_active) {
|
||||
/* Copy message from user into send buffer */
|
||||
if (copy_from_user(packet->msg->mad,
|
||||
buf + sizeof(struct ib_user_mad), length)) {
|
||||
ret = -EFAULT;
|
||||
goto err_msg;
|
||||
}
|
||||
} else {
|
||||
rmpp_hdr_size = sizeof(struct ib_mad_hdr) +
|
||||
sizeof(struct ib_rmpp_hdr);
|
||||
|
||||
/* Only copy MAD headers (RMPP header in place) */
|
||||
memcpy(packet->msg->mad, packet->mad.data,
|
||||
sizeof(struct ib_mad_hdr));
|
||||
|
||||
/* Now, copy rest of message from user into send buffer */
|
||||
if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data,
|
||||
buf + sizeof (struct ib_user_mad) + rmpp_hdr_size,
|
||||
length - rmpp_hdr_size)) {
|
||||
ret = -EFAULT;
|
||||
goto err_msg;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If userspace is generating a request that will generate a
|
||||
* response, we need to make sure the high-order part of the
|
||||
* transaction ID matches the agent being used to send the
|
||||
* MAD.
|
||||
*/
|
||||
method = packet->msg->mad->mad_hdr.method;
|
||||
|
||||
if (!(method & IB_MGMT_METHOD_RESP) &&
|
||||
method != IB_MGMT_METHOD_TRAP_REPRESS &&
|
||||
method != IB_MGMT_METHOD_SEND) {
|
||||
tid = &packet->msg->mad->mad_hdr.tid;
|
||||
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
|
||||
(be64_to_cpup(tid) & 0xffffffff));
|
||||
}
|
||||
|
||||
ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr);
|
||||
if (ret)
|
||||
goto err_msg;
|
||||
|
||||
up_read(&file->agent_mutex);
|
||||
|
||||
return sizeof packet->mad;
|
||||
return sizeof (struct ib_user_mad_hdr) + packet->length;
|
||||
|
||||
err_msg:
|
||||
ib_free_send_mad(packet->msg);
|
||||
|
||||
err_ah:
|
||||
ib_destroy_ah(packet->ah);
|
||||
|
||||
err_up:
|
||||
up_read(&file->agent_mutex);
|
||||
@@ -399,7 +487,8 @@ found:
|
||||
agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
|
||||
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
|
||||
ureq.mgmt_class ? &req : NULL,
|
||||
0, send_handler, recv_handler, file);
|
||||
ureq.rmpp_version,
|
||||
send_handler, recv_handler, file);
|
||||
if (IS_ERR(agent)) {
|
||||
ret = PTR_ERR(agent);
|
||||
goto out;
|
||||
@@ -460,8 +549,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long ib_umad_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case IB_USER_MAD_REGISTER_AGENT:
|
||||
@@ -517,14 +606,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
static struct file_operations umad_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = ib_umad_read,
|
||||
.write = ib_umad_write,
|
||||
.poll = ib_umad_poll,
|
||||
.owner = THIS_MODULE,
|
||||
.read = ib_umad_read,
|
||||
.write = ib_umad_write,
|
||||
.poll = ib_umad_poll,
|
||||
.unlocked_ioctl = ib_umad_ioctl,
|
||||
.compat_ioctl = ib_umad_ioctl,
|
||||
.open = ib_umad_open,
|
||||
.release = ib_umad_close
|
||||
.compat_ioctl = ib_umad_ioctl,
|
||||
.open = ib_umad_open,
|
||||
.release = ib_umad_close
|
||||
};
|
||||
|
||||
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
|
||||
|
@@ -41,6 +41,7 @@
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <ib_verbs.h>
|
||||
#include <ib_cache.h>
|
||||
|
||||
/* Protection domains */
|
||||
|
||||
@@ -88,6 +89,40 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_ah);
|
||||
|
||||
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
|
||||
struct ib_grh *grh, u8 port_num)
|
||||
{
|
||||
struct ib_ah_attr ah_attr;
|
||||
u32 flow_class;
|
||||
u16 gid_index;
|
||||
int ret;
|
||||
|
||||
memset(&ah_attr, 0, sizeof ah_attr);
|
||||
ah_attr.dlid = wc->slid;
|
||||
ah_attr.sl = wc->sl;
|
||||
ah_attr.src_path_bits = wc->dlid_path_bits;
|
||||
ah_attr.port_num = port_num;
|
||||
|
||||
if (wc->wc_flags & IB_WC_GRH) {
|
||||
ah_attr.ah_flags = IB_AH_GRH;
|
||||
ah_attr.grh.dgid = grh->dgid;
|
||||
|
||||
ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
|
||||
&gid_index);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ah_attr.grh.sgid_index = (u8) gid_index;
|
||||
flow_class = be32_to_cpu(grh->version_tclass_flow);
|
||||
ah_attr.grh.flow_label = flow_class & 0xFFFFF;
|
||||
ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
|
||||
ah_attr.grh.hop_limit = grh->hop_limit;
|
||||
}
|
||||
|
||||
return ib_create_ah(pd, &ah_attr);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_ah_from_wc);
|
||||
|
||||
int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
return ah->device->modify_ah ?
|
||||
|
Reference in New Issue
Block a user