Loading drivers/infiniband/core/mad.c +21 −0 Original line number Diff line number Diff line Loading @@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } static bool generate_unmatched_resp(struct ib_mad_private *recv, struct ib_mad_private *response) { if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { memcpy(response, recv, sizeof *response); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.grh = &response->grh; response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; response->mad.mad.mad_hdr.status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); return true; } else { return false; } } static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { Loading Loading @@ -1963,6 +1981,9 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; } else if (generate_unmatched_resp(recv, response)) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num); } out: Loading drivers/infiniband/core/ucma.c +18 −19 Original line number Diff line number Diff line Loading @@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) mutex_unlock(&mut); } static void ucma_cleanup_events(struct ucma_context *ctx) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx != ctx) continue; list_del(&uevent->list); /* clear incoming connections. */ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; Loading @@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) } } /* * We cannot hold file->mut when calling rdma_destroy_id() or we can * deadlock. We also acquire file->mut in ucma_event_handler(), and * rdma_destroy_id() will wait until all callbacks have completed. */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); Loading @@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); ucma_cleanup_events(ctx); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx == ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { list_del(&uevent->list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } events_reported = ctx->events_reported; kfree(ctx); return events_reported; Loading drivers/infiniband/hw/cxgb3/iwch_qp.c +20 −20 Original line number Diff line number Diff line Loading @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) * Assumes qhp lock is held. */ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, struct iwch_cq *schp, unsigned long *flag) struct iwch_cq *schp) { int count; int flushed; Loading @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); /* take a ref on the qhp since we must release the lock */ atomic_inc(&qhp->refcnt); spin_unlock_irqrestore(&qhp->lock, *flag); spin_unlock(&qhp->lock); /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, *flag); spin_lock(&rchp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, *flag); spin_unlock(&rchp->lock); if (flushed) { spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock); } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, *flag); spin_lock(&schp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, *flag); spin_unlock(&schp->lock); if (flushed) { spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock); } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); spin_lock_irqsave(&qhp->lock, *flag); spin_lock(&qhp->lock); } static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) static void flush_qp(struct iwch_qp *qhp) { struct iwch_cq *rchp, *schp; Loading @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock); } return; } __flush_qp(qhp, rchp, schp, flag); __flush_qp(qhp, rchp, schp); } Loading Loading @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, break; case IWCH_QP_STATE_ERROR: qhp->attr.state = IWCH_QP_STATE_ERROR; flush_qp(qhp, &flag); flush_qp(qhp); break; default: ret = -EINVAL; Loading Loading @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: flush_qp(qhp, &flag); flush_qp(qhp); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); Loading Loading @@ -1132,7 +1132,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, free=1; wake_up(&qhp->wait); BUG_ON(!ep); flush_qp(qhp, &flag); flush_qp(qhp); out: spin_unlock_irqrestore(&qhp->lock, flag); Loading drivers/infiniband/hw/cxgb4/cm.c +1 −1 Original line number Diff line number Diff line Loading @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; Loading drivers/infiniband/hw/ehca/ehca_irq.c +2 −1 Original line number Diff line number Diff line Loading @@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, spin_lock_init(&cct->task_lock); INIT_LIST_HEAD(&cct->cq_list); init_waitqueue_head(&cct->wait_queue); cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), "ehca_comp/%d", cpu); return cct->task; } Loading Loading
drivers/infiniband/core/mad.c +21 −0 Original line number Diff line number Diff line Loading @@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } static bool generate_unmatched_resp(struct ib_mad_private *recv, struct ib_mad_private *response) { if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { memcpy(response, recv, sizeof *response); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.grh = &response->grh; response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; response->mad.mad.mad_hdr.status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); return true; } else { return false; } } static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { Loading Loading @@ -1963,6 +1981,9 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; } else if (generate_unmatched_resp(recv, response)) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num); } out: Loading
drivers/infiniband/core/ucma.c +18 −19 Original line number Diff line number Diff line Loading @@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) mutex_unlock(&mut); } static void ucma_cleanup_events(struct ucma_context *ctx) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx != ctx) continue; list_del(&uevent->list); /* clear incoming connections. */ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; Loading @@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) } } /* * We cannot hold file->mut when calling rdma_destroy_id() or we can * deadlock. We also acquire file->mut in ucma_event_handler(), and * rdma_destroy_id() will wait until all callbacks have completed. */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); Loading @@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); ucma_cleanup_events(ctx); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx == ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { list_del(&uevent->list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } events_reported = ctx->events_reported; kfree(ctx); return events_reported; Loading
drivers/infiniband/hw/cxgb3/iwch_qp.c +20 −20 Original line number Diff line number Diff line Loading @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) * Assumes qhp lock is held. */ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, struct iwch_cq *schp, unsigned long *flag) struct iwch_cq *schp) { int count; int flushed; Loading @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); /* take a ref on the qhp since we must release the lock */ atomic_inc(&qhp->refcnt); spin_unlock_irqrestore(&qhp->lock, *flag); spin_unlock(&qhp->lock); /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, *flag); spin_lock(&rchp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, *flag); spin_unlock(&rchp->lock); if (flushed) { spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock); } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, *flag); spin_lock(&schp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, *flag); spin_unlock(&schp->lock); if (flushed) { spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock); } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); spin_lock_irqsave(&qhp->lock, *flag); spin_lock(&qhp->lock); } static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) static void flush_qp(struct iwch_qp *qhp) { struct iwch_cq *rchp, *schp; Loading @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, *flag); spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); spin_unlock(&rchp->comp_handler_lock); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); spin_lock_irqsave(&schp->comp_handler_lock, *flag); spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); spin_unlock(&schp->comp_handler_lock); } return; } __flush_qp(qhp, rchp, schp, flag); __flush_qp(qhp, rchp, schp); } Loading Loading @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, break; case IWCH_QP_STATE_ERROR: qhp->attr.state = IWCH_QP_STATE_ERROR; flush_qp(qhp, &flag); flush_qp(qhp); break; default: ret = -EINVAL; Loading Loading @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: flush_qp(qhp, &flag); flush_qp(qhp); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); Loading Loading @@ -1132,7 +1132,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, free=1; wake_up(&qhp->wait); BUG_ON(!ep); flush_qp(qhp, &flag); flush_qp(qhp); out: spin_unlock_irqrestore(&qhp->lock, flag); Loading
drivers/infiniband/hw/cxgb4/cm.c +1 −1 Original line number Diff line number Diff line Loading @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; Loading
drivers/infiniband/hw/ehca/ehca_irq.c +2 −1 Original line number Diff line number Diff line Loading @@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, spin_lock_init(&cct->task_lock); INIT_LIST_HEAD(&cct->cq_list); init_waitqueue_head(&cct->wait_queue); cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), "ehca_comp/%d", cpu); return cct->task; } Loading