Loading drivers/infiniband/hw/amso1100/c2_cq.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, struct c2_qp *qp; struct c2_qp *qp; int is_recv = 0; int is_recv = 0; ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq); if (!ce) { if (!ce) { return -EAGAIN; return -EAGAIN; } } Loading @@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, while ((qp = while ((qp = (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { c2_mq_free(&cq->mq); c2_mq_free(&cq->mq); ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq); if (!ce) if (!ce) return -EAGAIN; return -EAGAIN; } } Loading drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +0 −28 Original line number Original line Diff line number Diff line Loading @@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) #define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) Loading @@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) #define MQPCB_DLID EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) #define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) #define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) Loading drivers/infiniband/hw/ehca/ehca_irq.c +4 −5 Original line number Original line Diff line number Diff line Loading @@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data) struct ehca_eqe *eqe; struct ehca_eqe *eqe; u64 ret; u64 ret; eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq); while (eqe) { while (eqe) { if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) parse_ec(shca, eqe->entry); parse_ec(shca, eqe->entry); eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq); } } ret = hipz_h_reset_event(shca->ipz_hca_handle, ret = hipz_h_reset_event(shca->ipz_hca_handle, Loading Loading @@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) eqe_cnt = 0; eqe_cnt = 0; do { do { u32 token; u32 token; eqe_cache[eqe_cnt].eqe = eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq); (struct ehca_eqe *)ehca_poll_eq(shca, eq); if (!eqe_cache[eqe_cnt].eqe) if (!eqe_cache[eqe_cnt].eqe) break; break; eqe_value = eqe_cache[eqe_cnt].eqe->entry; eqe_value = eqe_cache[eqe_cnt].eqe->entry; Loading Loading @@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) goto unlock_irq_spinlock; goto unlock_irq_spinlock; do { do { struct ehca_eqe *eqe; struct ehca_eqe *eqe; eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); eqe = ehca_poll_eq(shca, &shca->eq); if (!eqe) if (!eqe) break; break; process_eqe(shca, eqe); process_eqe(shca, eqe); Loading drivers/infiniband/hw/ehca/ehca_main.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -52,7 +52,7 @@ #include "ehca_tools.h" #include "ehca_tools.h" #include "hcp_if.h" #include "hcp_if.h" #define HCAD_VERSION "0026" #define HCAD_VERSION "0027" MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); Loading drivers/infiniband/hw/ehca/ehca_qp.c +58 −54 Original line number Original line Diff line number Diff line Loading @@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp( ib_device); ib_device); struct ib_ucontext *context = NULL; struct ib_ucontext *context = NULL; u64 h_ret; u64 h_ret; int is_llqp = 0, has_srq = 0; int is_llqp = 0, has_srq = 0, is_user = 0; int qp_type, max_send_sge, max_recv_sge, ret; int qp_type, max_send_sge, max_recv_sge, ret; /* h_call's out parameters */ /* h_call's out parameters */ Loading Loading @@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp( } } } } if (pd->uobject && udata) context = pd->uobject->context; my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); if (!my_qp) { if (!my_qp) { ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); Loading @@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp( return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); } } if (pd->uobject && udata) { is_user = 1; context = pd->uobject->context; } atomic_set(&my_qp->nr_events, 0); atomic_set(&my_qp->nr_events, 0); init_waitqueue_head(&my_qp->wait_completion); init_waitqueue_head(&my_qp->wait_completion); spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_s); Loading Loading @@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp( (parms.squeue.is_small || parms.rqueue.is_small); (parms.squeue.is_small || parms.rqueue.is_small); } } h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user); if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) { ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", h_ret); h_ret); Loading Loading @@ -769,6 +771,7 @@ static struct ehca_qp *internal_create_qp( goto create_qp_exit2; goto create_qp_exit2; } } if (!is_user) { my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / my_qp->ipz_squeue.qe_size; my_qp->ipz_squeue.qe_size; my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * Loading @@ -782,6 +785,7 @@ static struct ehca_qp *internal_create_qp( /* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */ reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map); } } } if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) { ret = init_qp_queue( ret = init_qp_queue( Loading @@ -792,7 +796,7 @@ static struct ehca_qp *internal_create_qp( "and pages ret=%i", ret); "and pages ret=%i", ret); goto create_qp_exit4; goto create_qp_exit4; } } if (!is_user) { my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / my_qp->ipz_rqueue.qe_size; my_qp->ipz_rqueue.qe_size; my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * Loading @@ -805,7 +809,8 @@ static struct ehca_qp *internal_create_qp( INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node); /* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */ reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map); } else if (init_attr->srq) { } } else if (init_attr->srq && !is_user) { /* this is a base QP, use the queue map of the SRQ */ /* this is a base QP, use the queue map of the SRQ */ my_qp->rq_map = my_srq->rq_map; my_qp->rq_map = my_srq->rq_map; INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node); Loading Loading @@ -918,7 +923,7 @@ static struct ehca_qp *internal_create_qp( kfree(my_qp->mod_qp_parm); kfree(my_qp->mod_qp_parm); create_qp_exit6: create_qp_exit6: if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user) vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map); create_qp_exit5: create_qp_exit5: Loading @@ -926,7 +931,7 @@ static struct ehca_qp *internal_create_qp( ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); create_qp_exit4: create_qp_exit4: if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user) vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map); create_qp_exit3: create_qp_exit3: Loading Loading @@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, u64 update_mask; u64 update_mask; u64 h_ret; u64 h_ret; int bad_wqe_cnt = 0; int bad_wqe_cnt = 0; int is_user = 0; int squeue_locked = 0; int squeue_locked = 0; unsigned long flags = 0; unsigned long flags = 0; Loading @@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ret = ehca2ib_return_code(h_ret); ret = ehca2ib_return_code(h_ret); goto modify_qp_exit1; goto modify_qp_exit1; } } if (ibqp->uobject) is_user = 1; qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); Loading Loading @@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, goto modify_qp_exit2; goto modify_qp_exit2; } } } } if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) && !is_user) { ret = check_for_left_cqes(my_qp, shca); ret = check_for_left_cqes(my_qp, shca); if (ret) if (ret) goto modify_qp_exit2; goto modify_qp_exit2; Loading @@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp, ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_squeue); ipz_qeit_reset(&my_qp->ipz_squeue); if (qp_cur_state == IB_QPS_ERR) { if (qp_cur_state == IB_QPS_ERR && !is_user) { del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp)) del_from_err_list(my_qp->recv_cq, del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); &my_qp->rq_err_node); } } if (!is_user) reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map); if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user) reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map); } } Loading Loading @@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp, qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->dest_qp_num = qpcb->dest_qp_nr; qp_attr->dest_qp_num = qpcb->dest_qp_nr; qp_attr->pkey_index = qp_attr->pkey_index = qpcb->prim_p_key_idx; EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); qp_attr->port_num = qpcb->prim_phys_port; qp_attr->port_num = EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); qp_attr->timeout = qpcb->timeout; qp_attr->timeout = qpcb->timeout; qp_attr->retry_cnt = qpcb->retry_count; qp_attr->retry_cnt = qpcb->retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count; qp_attr->alt_pkey_index = qp_attr->alt_pkey_index = qpcb->alt_p_key_idx; EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_timeout = qpcb->timeout_al; qp_attr->alt_timeout = qpcb->timeout_al; Loading Loading @@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, update_mask |= update_mask |= EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); mqpcb->curr_srq_limit = mqpcb->curr_srq_limit = attr->srq_limit; EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit); mqpcb->qp_aff_asyn_ev_log_reg = mqpcb->qp_aff_asyn_ev_log_reg = EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); } } Loading Loading @@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_sge = 3; srq_attr->max_sge = 3; srq_attr->srq_limit = EHCA_BMASK_GET( srq_attr->srq_limit = qpcb->curr_srq_limit; MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); if (ehca_debug_level >= 2) if (ehca_debug_level >= 2) ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); Loading @@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, int ret; int ret; u64 h_ret; u64 h_ret; u8 port_num; u8 port_num; int is_user = 0; enum ib_qp_type qp_type; enum ib_qp_type qp_type; unsigned long flags; unsigned long flags; if (uobject) { if (uobject) { is_user = 1; if (my_qp->mm_count_galpa || if (my_qp->mm_count_galpa || my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { ehca_err(dev, "Resources still referenced in " ehca_err(dev, "Resources still referenced in " Loading @@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, * SRQs will never get into an error list and do not have a recv_cq, * SRQs will never get into an error list and do not have a recv_cq, * so we need to skip them here. * so we need to skip them here. */ */ if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user) del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user) del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); /* now wait until all pending events have completed */ /* now wait until all pending events have completed */ Loading Loading @@ -2209,12 +2213,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); if (!is_user) vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map); } } if (HAS_SQ(my_qp)) { if (HAS_SQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); if (!is_user) vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map); } } kmem_cache_free(qp_cache, my_qp); kmem_cache_free(qp_cache, my_qp); Loading Loading
drivers/infiniband/hw/amso1100/c2_cq.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, struct c2_qp *qp; struct c2_qp *qp; int is_recv = 0; int is_recv = 0; ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq); if (!ce) { if (!ce) { return -EAGAIN; return -EAGAIN; } } Loading @@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, while ((qp = while ((qp = (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { c2_mq_free(&cq->mq); c2_mq_free(&cq->mq); ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); ce = c2_mq_consume(&cq->mq); if (!ce) if (!ce) return -EAGAIN; return -EAGAIN; } } Loading
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +0 −28 Original line number Original line Diff line number Diff line Loading @@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) #define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) Loading @@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block { #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) #define MQPCB_DLID EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) #define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) #define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) Loading
drivers/infiniband/hw/ehca/ehca_irq.c +4 −5 Original line number Original line Diff line number Diff line Loading @@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data) struct ehca_eqe *eqe; struct ehca_eqe *eqe; u64 ret; u64 ret; eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq); while (eqe) { while (eqe) { if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) parse_ec(shca, eqe->entry); parse_ec(shca, eqe->entry); eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); eqe = ehca_poll_eq(shca, &shca->neq); } } ret = hipz_h_reset_event(shca->ipz_hca_handle, ret = hipz_h_reset_event(shca->ipz_hca_handle, Loading Loading @@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) eqe_cnt = 0; eqe_cnt = 0; do { do { u32 token; u32 token; eqe_cache[eqe_cnt].eqe = eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq); (struct ehca_eqe *)ehca_poll_eq(shca, eq); if (!eqe_cache[eqe_cnt].eqe) if (!eqe_cache[eqe_cnt].eqe) break; break; eqe_value = eqe_cache[eqe_cnt].eqe->entry; eqe_value = eqe_cache[eqe_cnt].eqe->entry; Loading Loading @@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) goto unlock_irq_spinlock; goto unlock_irq_spinlock; do { do { struct ehca_eqe *eqe; struct ehca_eqe *eqe; eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); eqe = ehca_poll_eq(shca, &shca->eq); if (!eqe) if (!eqe) break; break; process_eqe(shca, eqe); process_eqe(shca, eqe); Loading
drivers/infiniband/hw/ehca/ehca_main.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -52,7 +52,7 @@ #include "ehca_tools.h" #include "ehca_tools.h" #include "hcp_if.h" #include "hcp_if.h" #define HCAD_VERSION "0026" #define HCAD_VERSION "0027" MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); Loading
drivers/infiniband/hw/ehca/ehca_qp.c +58 −54 Original line number Original line Diff line number Diff line Loading @@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp( ib_device); ib_device); struct ib_ucontext *context = NULL; struct ib_ucontext *context = NULL; u64 h_ret; u64 h_ret; int is_llqp = 0, has_srq = 0; int is_llqp = 0, has_srq = 0, is_user = 0; int qp_type, max_send_sge, max_recv_sge, ret; int qp_type, max_send_sge, max_recv_sge, ret; /* h_call's out parameters */ /* h_call's out parameters */ Loading Loading @@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp( } } } } if (pd->uobject && udata) context = pd->uobject->context; my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); if (!my_qp) { if (!my_qp) { ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); Loading @@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp( return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); } } if (pd->uobject && udata) { is_user = 1; context = pd->uobject->context; } atomic_set(&my_qp->nr_events, 0); atomic_set(&my_qp->nr_events, 0); init_waitqueue_head(&my_qp->wait_completion); init_waitqueue_head(&my_qp->wait_completion); spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_s); Loading Loading @@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp( (parms.squeue.is_small || parms.rqueue.is_small); (parms.squeue.is_small || parms.rqueue.is_small); } } h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user); if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) { ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", h_ret); h_ret); Loading Loading @@ -769,6 +771,7 @@ static struct ehca_qp *internal_create_qp( goto create_qp_exit2; goto create_qp_exit2; } } if (!is_user) { my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / my_qp->ipz_squeue.qe_size; my_qp->ipz_squeue.qe_size; my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * Loading @@ -782,6 +785,7 @@ static struct ehca_qp *internal_create_qp( /* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */ reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map); } } } if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) { ret = init_qp_queue( ret = init_qp_queue( Loading @@ -792,7 +796,7 @@ static struct ehca_qp *internal_create_qp( "and pages ret=%i", ret); "and pages ret=%i", ret); goto create_qp_exit4; goto create_qp_exit4; } } if (!is_user) { my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / my_qp->ipz_rqueue.qe_size; my_qp->ipz_rqueue.qe_size; my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * Loading @@ -805,7 +809,8 @@ static struct ehca_qp *internal_create_qp( INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node); /* to avoid the generation of bogus flush CQEs */ /* to avoid the generation of bogus flush CQEs */ reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map); } else if (init_attr->srq) { } } else if (init_attr->srq && !is_user) { /* this is a base QP, use the queue map of the SRQ */ /* this is a base QP, use the queue map of the SRQ */ my_qp->rq_map = my_srq->rq_map; my_qp->rq_map = my_srq->rq_map; INIT_LIST_HEAD(&my_qp->rq_err_node); INIT_LIST_HEAD(&my_qp->rq_err_node); Loading Loading @@ -918,7 +923,7 @@ static struct ehca_qp *internal_create_qp( kfree(my_qp->mod_qp_parm); kfree(my_qp->mod_qp_parm); create_qp_exit6: create_qp_exit6: if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user) vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map); create_qp_exit5: create_qp_exit5: Loading @@ -926,7 +931,7 @@ static struct ehca_qp *internal_create_qp( ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); create_qp_exit4: create_qp_exit4: if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user) vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map); create_qp_exit3: create_qp_exit3: Loading Loading @@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, u64 update_mask; u64 update_mask; u64 h_ret; u64 h_ret; int bad_wqe_cnt = 0; int bad_wqe_cnt = 0; int is_user = 0; int squeue_locked = 0; int squeue_locked = 0; unsigned long flags = 0; unsigned long flags = 0; Loading @@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, ret = ehca2ib_return_code(h_ret); ret = ehca2ib_return_code(h_ret); goto modify_qp_exit1; goto modify_qp_exit1; } } if (ibqp->uobject) is_user = 1; qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); Loading Loading @@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, goto modify_qp_exit2; goto modify_qp_exit2; } } } } if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) && !is_user) { ret = check_for_left_cqes(my_qp, shca); ret = check_for_left_cqes(my_qp, shca); if (ret) if (ret) goto modify_qp_exit2; goto modify_qp_exit2; Loading @@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp, ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_rqueue); ipz_qeit_reset(&my_qp->ipz_squeue); ipz_qeit_reset(&my_qp->ipz_squeue); if (qp_cur_state == IB_QPS_ERR) { if (qp_cur_state == IB_QPS_ERR && !is_user) { del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp)) del_from_err_list(my_qp->recv_cq, del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); &my_qp->rq_err_node); } } if (!is_user) reset_queue_map(&my_qp->sq_map); reset_queue_map(&my_qp->sq_map); if (HAS_RQ(my_qp)) if (HAS_RQ(my_qp) && !is_user) reset_queue_map(&my_qp->rq_map); reset_queue_map(&my_qp->rq_map); } } Loading Loading @@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp, qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; qp_attr->dest_qp_num = qpcb->dest_qp_nr; qp_attr->dest_qp_num = qpcb->dest_qp_nr; qp_attr->pkey_index = qp_attr->pkey_index = qpcb->prim_p_key_idx; EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); qp_attr->port_num = qpcb->prim_phys_port; qp_attr->port_num = EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); qp_attr->timeout = qpcb->timeout; qp_attr->timeout = qpcb->timeout; qp_attr->retry_cnt = qpcb->retry_count; qp_attr->retry_cnt = qpcb->retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count; qp_attr->rnr_retry = qpcb->rnr_retry_count; qp_attr->alt_pkey_index = qp_attr->alt_pkey_index = qpcb->alt_p_key_idx; EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_port_num = qpcb->alt_phys_port; qp_attr->alt_timeout = qpcb->timeout_al; qp_attr->alt_timeout = qpcb->timeout_al; Loading Loading @@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, update_mask |= update_mask |= EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); mqpcb->curr_srq_limit = mqpcb->curr_srq_limit = attr->srq_limit; EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit); mqpcb->qp_aff_asyn_ev_log_reg = mqpcb->qp_aff_asyn_ev_log_reg = EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); } } Loading Loading @@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; srq_attr->max_sge = 3; srq_attr->max_sge = 3; srq_attr->srq_limit = EHCA_BMASK_GET( srq_attr->srq_limit = qpcb->curr_srq_limit; MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); if (ehca_debug_level >= 2) if (ehca_debug_level >= 2) ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); Loading @@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, int ret; int ret; u64 h_ret; u64 h_ret; u8 port_num; u8 port_num; int is_user = 0; enum ib_qp_type qp_type; enum ib_qp_type qp_type; unsigned long flags; unsigned long flags; if (uobject) { if (uobject) { is_user = 1; if (my_qp->mm_count_galpa || if (my_qp->mm_count_galpa || my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { ehca_err(dev, "Resources still referenced in " ehca_err(dev, "Resources still referenced in " Loading @@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, * SRQs will never get into an error list and do not have a recv_cq, * SRQs will never get into an error list and do not have a recv_cq, * so we need to skip them here. * so we need to skip them here. */ */ if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user) del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); if (HAS_SQ(my_qp)) if (HAS_SQ(my_qp) && !is_user) del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); /* now wait until all pending events have completed */ /* now wait until all pending events have completed */ Loading Loading @@ -2209,12 +2213,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, if (HAS_RQ(my_qp)) { if (HAS_RQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); if (!is_user) vfree(my_qp->rq_map.map); vfree(my_qp->rq_map.map); } } if (HAS_SQ(my_qp)) { if (HAS_SQ(my_qp)) { ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); if (!is_user) vfree(my_qp->sq_map.map); vfree(my_qp->sq_map.map); } } kmem_cache_free(qp_cache, my_qp); kmem_cache_free(qp_cache, my_qp); Loading