Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52aef818 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (47 commits)
  IB/mthca: Query SRQ srq_limit fixes
  IPoIB: Get rid of useless test of queue length
  IB/mthca: Correct reported SRQ size in MemFree case.
  IB/mad: Fix oopsable race on device removal
  IB/srp: Coverity fix to srp_parse_options()
  IB/mthca: Coverity fix to mthca_init_eq_table()
  IB: Coverity fixes to sysfs.c
  IPoIB: Move ipoib_ib_dev_flush() to ipoib workqueue
  IPoIB: Fix build now that neighbour destructor is in neigh_params
  IB/uverbs: Use correct alt_pkey_index in modify QP
  IB/umad: Add support for large RMPP transfers
  IB/srp: Add SCSI host attributes to show target port
  IB/cm: Check cm_id state before handling a REP
  IB/mthca: Update firmware versions
  IB/mthca: Optimize large messages on Sinai HCAs
  IB/uverbs: Fix query QP return of sq_sig_all
  IB: Fix modify QP checking of "current QP state" attribute
  IPoIB: Fix multicast race between canceling and completing
  IPoIB: Clean up if posting receives fails
  IB/mthca: Use an enum for HCA page size
  ...
parents 28c006c1 fd02e803
Loading
Loading
Loading
Loading
+0 −19
Original line number Original line Diff line number Diff line
@@ -78,25 +78,6 @@ ib_get_agent_port(struct ib_device *device, int port_num)
	return entry;
	return entry;
}
}


int smi_check_local_dr_smp(struct ib_smp *smp,
			   struct ib_device *device,
			   int port_num)
{
	struct ib_agent_port_private *port_priv;

	if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		return 1;

	port_priv = ib_get_agent_port(device, port_num);
	if (!port_priv) {
		printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
		       "not open\n", device->name, port_num);
		return 1;
	}

	return smi_check_local_smp(port_priv->agent[0], smp);
}

int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
			struct ib_wc *wc, struct ib_device *device,
			struct ib_wc *wc, struct ib_device *device,
			int port_num, int qpn)
			int port_num, int qpn)
+24 −18
Original line number Original line Diff line number Diff line
@@ -121,7 +121,7 @@ struct cm_id_private {


	struct rb_node service_node;
	struct rb_node service_node;
	struct rb_node sidr_id_node;
	struct rb_node sidr_id_node;
	spinlock_t lock;
	spinlock_t lock;	/* Do not acquire inside cm.lock */
	wait_queue_head_t wait;
	wait_queue_head_t wait;
	atomic_t refcount;
	atomic_t refcount;


@@ -1547,40 +1547,46 @@ static int cm_rep_handler(struct cm_work *work)
		return -EINVAL;
		return -EINVAL;
	}
	}


	cm_format_rep_event(work);

	spin_lock_irqsave(&cm_id_priv->lock, flags);
	switch (cm_id_priv->id.state) {
	case IB_CM_REQ_SENT:
	case IB_CM_MRA_REQ_RCVD:
		break;
	default:
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
		ret = -EINVAL;
		goto error;
	}

	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);


	spin_lock_irqsave(&cm.lock, flags);
	spin_lock(&cm.lock);
	/* Check for duplicate REP. */
	/* Check for duplicate REP. */
	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
		spin_unlock_irqrestore(&cm.lock, flags);
		spin_unlock(&cm.lock);
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
		ret = -EINVAL;
		ret = -EINVAL;
		goto error;
		goto error;
	}
	}
	/* Check for a stale connection. */
	/* Check for a stale connection. */
	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
		spin_unlock_irqrestore(&cm.lock, flags);
		rb_erase(&cm_id_priv->timewait_info->remote_id_node,
			 &cm.remote_id_table);
		cm_id_priv->timewait_info->inserted_remote_id = 0;
		spin_unlock(&cm.lock);
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
		cm_issue_rej(work->port, work->mad_recv_wc,
		cm_issue_rej(work->port, work->mad_recv_wc,
			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
			     NULL, 0);
			     NULL, 0);
		ret = -EINVAL;
		ret = -EINVAL;
		goto error;
		goto error;
	}
	}
	spin_unlock_irqrestore(&cm.lock, flags);
	spin_unlock(&cm.lock);

	cm_format_rep_event(work);


	spin_lock_irqsave(&cm_id_priv->lock, flags);
	switch (cm_id_priv->id.state) {
	case IB_CM_REQ_SENT:
	case IB_CM_MRA_REQ_RCVD:
		break;
	default:
		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
		ret = -EINVAL;
		goto error;
	}
	cm_id_priv->id.state = IB_CM_REP_RCVD;
	cm_id_priv->id.state = IB_CM_REP_RCVD;
	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
@@ -1603,7 +1609,7 @@ static int cm_rep_handler(struct cm_work *work)
		cm_deref_id(cm_id_priv);
		cm_deref_id(cm_id_priv);
	return 0;
	return 0;


error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
error:
	cm_deref_id(cm_id_priv);
	cm_deref_id(cm_id_priv);
	return ret;
	return ret;
}
}
+3 −3
Original line number Original line Diff line number Diff line
@@ -280,7 +280,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
		struct ib_fmr_attr attr = {
		struct ib_fmr_attr attr = {
			.max_pages  = params->max_pages_per_fmr,
			.max_pages  = params->max_pages_per_fmr,
			.max_maps   = IB_FMR_MAX_REMAPS,
			.max_maps   = IB_FMR_MAX_REMAPS,
			.page_size = PAGE_SHIFT
			.page_shift = params->page_shift
		};
		};


		for (i = 0; i < params->pool_size; ++i) {
		for (i = 0; i < params->pool_size; ++i) {
+152 −43
Original line number Original line Diff line number Diff line
@@ -31,7 +31,7 @@
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 * SOFTWARE.
 *
 *
 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
 */
 */
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>


@@ -679,8 +679,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
		goto out;
		goto out;
	}
	}
	/* Check to post send on QP or process locally */
	/* Check to post send on QP or process locally */
	ret = smi_check_local_dr_smp(smp, device, port_num);
	ret = smi_check_local_smp(smp, device);
	if (!ret || !device->process_mad)
	if (!ret)
		goto out;
		goto out;


	local = kmalloc(sizeof *local, GFP_ATOMIC);
	local = kmalloc(sizeof *local, GFP_ATOMIC);
@@ -765,18 +765,67 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
	return ret;
	return ret;
}
}


static int get_buf_length(int hdr_len, int data_len)
static int get_pad_size(int hdr_len, int data_len)
{
{
	int seg_size, pad;
	int seg_size, pad;


	seg_size = sizeof(struct ib_mad) - hdr_len;
	seg_size = sizeof(struct ib_mad) - hdr_len;
	if (data_len && seg_size) {
	if (data_len && seg_size) {
		pad = seg_size - data_len % seg_size;
		pad = seg_size - data_len % seg_size;
		if (pad == seg_size)
		return pad == seg_size ? 0 : pad;
			pad = 0;
	} else
	} else
		pad = seg_size;
		return seg_size;
	return hdr_len + data_len + pad;
}

static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
{
	struct ib_rmpp_segment *s, *t;

	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
		list_del(&s->list);
		kfree(s);
	}
}

static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
				gfp_t gfp_mask)
{
	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
	struct ib_rmpp_segment *seg = NULL;
	int left, seg_size, pad;

	send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
	seg_size = send_buf->seg_size;
	pad = send_wr->pad;

	/* Allocate data segments. */
	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
		if (!seg) {
			printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
			       "alloc failed for len %zd, gfp %#x\n",
			       sizeof (*seg) + seg_size, gfp_mask);
			free_send_rmpp_list(send_wr);
			return -ENOMEM;
		}
		seg->num = ++send_buf->seg_count;
		list_add_tail(&seg->list, &send_wr->rmpp_list);
	}

	/* Zero any padding */
	if (pad)
		memset(seg->data + seg_size - pad, 0, pad);

	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
					  agent.rmpp_version;
	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);

	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
					struct ib_rmpp_segment, list);
	send_wr->last_ack_seg = send_wr->cur_seg;
	return 0;
}
}


struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
@@ -787,32 +836,40 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
{
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_send_wr_private *mad_send_wr;
	struct ib_mad_send_wr_private *mad_send_wr;
	int buf_size;
	int pad, message_size, ret, size;
	void *buf;
	void *buf;


	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
				      agent);
				      agent);
	buf_size = get_buf_length(hdr_len, data_len);
	pad = get_pad_size(hdr_len, data_len);
	message_size = hdr_len + data_len + pad;


	if ((!mad_agent->rmpp_version &&
	if ((!mad_agent->rmpp_version &&
	     (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
	     (rmpp_active || message_size > sizeof(struct ib_mad))) ||
	    (!rmpp_active && buf_size > sizeof(struct ib_mad)))
	    (!rmpp_active && message_size > sizeof(struct ib_mad)))
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
	size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
	if (!buf)
	if (!buf)
		return ERR_PTR(-ENOMEM);
		return ERR_PTR(-ENOMEM);


	mad_send_wr = buf + buf_size;
	mad_send_wr = buf + size;
	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
	mad_send_wr->send_buf.mad = buf;
	mad_send_wr->send_buf.mad = buf;
	mad_send_wr->send_buf.hdr_len = hdr_len;
	mad_send_wr->send_buf.data_len = data_len;
	mad_send_wr->pad = pad;


	mad_send_wr->mad_agent_priv = mad_agent_priv;
	mad_send_wr->mad_agent_priv = mad_agent_priv;
	mad_send_wr->sg_list[0].length = buf_size;
	mad_send_wr->sg_list[0].length = hdr_len;
	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
	mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;


	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
	mad_send_wr->send_wr.num_sge = 1;
	mad_send_wr->send_wr.num_sge = 2;
	mad_send_wr->send_wr.opcode = IB_WR_SEND;
	mad_send_wr->send_wr.opcode = IB_WR_SEND;
	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
@@ -820,13 +877,11 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;


	if (rmpp_active) {
	if (rmpp_active) {
		struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad;
		ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
		rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
		if (ret) {
						   IB_MGMT_RMPP_HDR + data_len);
			kfree(buf);
		rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
			return ERR_PTR(ret);
		rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
		}
		ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
				  IB_MGMT_RMPP_FLAG_ACTIVE);
	}
	}


	mad_send_wr->send_buf.mad_agent = mad_agent;
	mad_send_wr->send_buf.mad_agent = mad_agent;
@@ -835,14 +890,50 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
}
}
EXPORT_SYMBOL(ib_create_send_mad);
EXPORT_SYMBOL(ib_create_send_mad);


void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
{
	struct ib_mad_send_wr_private *mad_send_wr;
	struct list_head *list;

	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
				   send_buf);
	list = &mad_send_wr->cur_seg->list;

	if (mad_send_wr->cur_seg->num < seg_num) {
		list_for_each_entry(mad_send_wr->cur_seg, list, list)
			if (mad_send_wr->cur_seg->num == seg_num)
				break;
	} else if (mad_send_wr->cur_seg->num > seg_num) {
		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
			if (mad_send_wr->cur_seg->num == seg_num)
				break;
	}
	return mad_send_wr->cur_seg->data;
}
EXPORT_SYMBOL(ib_get_rmpp_segment);

static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
{
	if (mad_send_wr->send_buf.seg_count)
		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
					   mad_send_wr->seg_num);
	else
		return mad_send_wr->send_buf.mad +
		       mad_send_wr->send_buf.hdr_len;
}

void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
{
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_send_wr_private *mad_send_wr;


	mad_agent_priv = container_of(send_buf->mad_agent,
	mad_agent_priv = container_of(send_buf->mad_agent,
				      struct ib_mad_agent_private, agent);
				      struct ib_mad_agent_private, agent);
	kfree(send_buf->mad);
	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
				   send_buf);


	free_send_rmpp_list(mad_send_wr);
	kfree(send_buf->mad);
	if (atomic_dec_and_test(&mad_agent_priv->refcount))
	if (atomic_dec_and_test(&mad_agent_priv->refcount))
		wake_up(&mad_agent_priv->wait);
		wake_up(&mad_agent_priv->wait);
}
}
@@ -865,10 +956,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)


	mad_agent = mad_send_wr->send_buf.mad_agent;
	mad_agent = mad_send_wr->send_buf.mad_agent;
	sge = mad_send_wr->sg_list;
	sge = mad_send_wr->sg_list;
	sge->addr = dma_map_single(mad_agent->device->dma_device,
	sge[0].addr = dma_map_single(mad_agent->device->dma_device,
				   mad_send_wr->send_buf.mad, sge->length,
				     mad_send_wr->send_buf.mad,
				     sge[0].length,
				     DMA_TO_DEVICE);
	pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);

	sge[1].addr = dma_map_single(mad_agent->device->dma_device,
				     ib_get_payload(mad_send_wr),
				     sge[1].length,
				     DMA_TO_DEVICE);
				     DMA_TO_DEVICE);
	pci_unmap_addr_set(mad_send_wr, mapping, sge->addr);
	pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);


	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -885,11 +983,14 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
		list_add_tail(&mad_send_wr->mad_list.list, list);
		list_add_tail(&mad_send_wr->mad_list.list, list);
	}
	}
	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
	if (ret)
	if (ret) {
		dma_unmap_single(mad_agent->device->dma_device,
		dma_unmap_single(mad_agent->device->dma_device,
				 pci_unmap_addr(mad_send_wr, mapping),
				 pci_unmap_addr(mad_send_wr, header_mapping),
				 sge->length, DMA_TO_DEVICE);
				 sge[0].length, DMA_TO_DEVICE);

		dma_unmap_single(mad_agent->device->dma_device,
				 pci_unmap_addr(mad_send_wr, payload_mapping),
				 sge[1].length, DMA_TO_DEVICE);
	}
	return ret;
	return ret;
}
}


@@ -1661,9 +1762,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
					    port_priv->device->node_type,
					    port_priv->device->node_type,
					    port_priv->port_num))
					    port_priv->port_num))
			goto out;
			goto out;
		if (!smi_check_local_dr_smp(&recv->mad.smp,
		if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
					    port_priv->device,
					    port_priv->port_num))
			goto out;
			goto out;
	}
	}


@@ -1862,8 +1961,11 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,


retry:
retry:
	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
			 pci_unmap_addr(mad_send_wr, mapping),
			 pci_unmap_addr(mad_send_wr, header_mapping),
			 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
			 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
	dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
			 pci_unmap_addr(mad_send_wr, payload_mapping),
			 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
	queued_send_wr = NULL;
	queued_send_wr = NULL;
	spin_lock_irqsave(&send_queue->lock, flags);
	spin_lock_irqsave(&send_queue->lock, flags);
	list_del(&mad_list->list);
	list_del(&mad_list->list);
@@ -2262,8 +2364,12 @@ static void timeout_sends(void *data)
static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
{
{
	struct ib_mad_port_private *port_priv = cq->cq_context;
	struct ib_mad_port_private *port_priv = cq->cq_context;
	unsigned long flags;


	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	if (!list_empty(&port_priv->port_list))
		queue_work(port_priv->wq, &port_priv->work);
		queue_work(port_priv->wq, &port_priv->work);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
}
}


/*
/*
@@ -2575,18 +2681,23 @@ static int ib_mad_port_open(struct ib_device *device,
	}
	}
	INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
	INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);


	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

	ret = ib_mad_port_start(port_priv);
	ret = ib_mad_port_start(port_priv);
	if (ret) {
	if (ret) {
		printk(KERN_ERR PFX "Couldn't start port\n");
		printk(KERN_ERR PFX "Couldn't start port\n");
		goto error9;
		goto error9;
	}
	}


	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
	return 0;
	return 0;


error9:
error9:
	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	list_del_init(&port_priv->port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

	destroy_workqueue(port_priv->wq);
	destroy_workqueue(port_priv->wq);
error8:
error8:
	destroy_mad_qp(&port_priv->qp_info[1]);
	destroy_mad_qp(&port_priv->qp_info[1]);
@@ -2623,11 +2734,9 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
		printk(KERN_ERR PFX "Port %d not found\n", port_num);
		printk(KERN_ERR PFX "Port %d not found\n", port_num);
		return -ENODEV;
		return -ENODEV;
	}
	}
	list_del(&port_priv->port_list);
	list_del_init(&port_priv->port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);


	/* Stop processing completions. */
	flush_workqueue(port_priv->wq);
	destroy_workqueue(port_priv->wq);
	destroy_workqueue(port_priv->wq);
	destroy_mad_qp(&port_priv->qp_info[1]);
	destroy_mad_qp(&port_priv->qp_info[1]);
	destroy_mad_qp(&port_priv->qp_info[0]);
	destroy_mad_qp(&port_priv->qp_info[0]);
+12 −4
Original line number Original line Diff line number Diff line
@@ -31,7 +31,7 @@
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 * SOFTWARE.
 *
 *
 * $Id: mad_priv.h 2730 2005-06-28 16:43:03Z sean.hefty $
 * $Id: mad_priv.h 5596 2006-03-03 01:00:07Z sean.hefty $
 */
 */


#ifndef __IB_MAD_PRIV_H__
#ifndef __IB_MAD_PRIV_H__
@@ -85,6 +85,12 @@ struct ib_mad_private {
	} mad;
	} mad;
} __attribute__ ((packed));
} __attribute__ ((packed));


struct ib_rmpp_segment {
	struct list_head list;
	u32 num;
	u8 data[0];
};

struct ib_mad_agent_private {
struct ib_mad_agent_private {
	struct list_head agent_list;
	struct list_head agent_list;
	struct ib_mad_agent agent;
	struct ib_mad_agent agent;
@@ -119,7 +125,8 @@ struct ib_mad_send_wr_private {
	struct list_head agent_list;
	struct list_head agent_list;
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_send_buf send_buf;
	struct ib_mad_send_buf send_buf;
	DECLARE_PCI_UNMAP_ADDR(mapping)
	DECLARE_PCI_UNMAP_ADDR(header_mapping)
	DECLARE_PCI_UNMAP_ADDR(payload_mapping)
	struct ib_send_wr send_wr;
	struct ib_send_wr send_wr;
	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
	struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
	__be64 tid;
	__be64 tid;
@@ -130,11 +137,12 @@ struct ib_mad_send_wr_private {
	enum ib_wc_status status;
	enum ib_wc_status status;


	/* RMPP control */
	/* RMPP control */
	struct list_head rmpp_list;
	struct ib_rmpp_segment *last_ack_seg;
	struct ib_rmpp_segment *cur_seg;
	int last_ack;
	int last_ack;
	int seg_num;
	int seg_num;
	int newwin;
	int newwin;
	int total_seg;
	int data_offset;
	int pad;
	int pad;
};
};


Loading