Loading drivers/infiniband/core/iwcm.c +17 −37 Original line number Original line Diff line number Diff line Loading @@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv) /* /* * Release a reference on cm_id. If the last reference is being * Release a reference on cm_id. If the last reference is being * released, enable the waiting thread (in iw_destroy_cm_id) to * released, free the cm_id and return 1. * get woken up, and return 1 if a thread is already waiting. */ */ static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { { BUG_ON(atomic_read(&cm_id_priv->refcount)==0); BUG_ON(atomic_read(&cm_id_priv->refcount)==0); if (atomic_dec_and_test(&cm_id_priv->refcount)) { if (atomic_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); BUG_ON(!list_empty(&cm_id_priv->work_list)); complete(&cm_id_priv->destroy_comp); free_cm_id(cm_id_priv); return 1; return 1; } } Loading @@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id) { { struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv; int cb_destroy; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* (void)iwcm_deref_id(cm_id_priv); * Test bit before deref in case the cm_id gets freed on another * thread. */ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); if (iwcm_deref_id(cm_id_priv) && cb_destroy) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } } } static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); Loading Loading @@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) wait_event(cm_id_priv->connect_wait, wait_event(cm_id_priv->connect_wait, !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); /* * Since we're deleting the cm_id, drop any events that * might arrive before the last dereference. */ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { switch (cm_id_priv->state) { case IW_CM_STATE_LISTEN: case IW_CM_STATE_LISTEN: Loading Loading @@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id) struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); destroy_cm_id(cm_id); destroy_cm_id(cm_id); wait_for_completion(&cm_id_priv->destroy_comp); free_cm_id(cm_id_priv); } } EXPORT_SYMBOL(iw_destroy_cm_id); EXPORT_SYMBOL(iw_destroy_cm_id); Loading Loading @@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, ret = cm_id->cm_handler(cm_id, iw_event); ret = cm_id->cm_handler(cm_id, iw_event); if (ret) { if (ret) { iw_cm_reject(cm_id, NULL, 0); iw_cm_reject(cm_id, NULL, 0); set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); iw_destroy_cm_id(cm_id); destroy_cm_id(cm_id); if (atomic_read(&cm_id_priv->refcount)==0) free_cm_id(cm_id_priv); } } out: out: Loading Loading @@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work) unsigned long flags; unsigned long flags; int empty; int empty; int ret = 0; int ret = 0; int destroy_id; spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); empty = list_empty(&cm_id_priv->work_list); empty = list_empty(&cm_id_priv->work_list); Loading @@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work) put_work(work); put_work(work); spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); ret = process_event(cm_id_priv, &levent); if (ret) { if (ret) set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); destroy_cm_id(&cm_id_priv->id); destroy_cm_id(&cm_id_priv->id); } } else BUG_ON(atomic_read(&cm_id_priv->refcount)==0); pr_debug("dropping event %d\n", levent.event); destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); if (iwcm_deref_id(cm_id_priv)) if (iwcm_deref_id(cm_id_priv)) { if (destroy_id) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } return; return; } if (empty) if (empty) return; return; spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); Loading drivers/infiniband/core/iwcm.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -56,7 +56,7 @@ struct iwcm_id_private { struct list_head work_free_list; struct list_head work_free_list; }; }; #define IWCM_F_CALLBACK_DESTROY 1 #define IWCM_F_DROP_EVENTS 1 #define IWCM_F_CONNECT_WAIT 2 #define IWCM_F_CONNECT_WAIT 2 #endif /* IWCM_H */ #endif /* IWCM_H */ drivers/infiniband/core/netlink.c +5 −1 Original line number Original line Diff line number Diff line Loading @@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb) int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, __u32 pid) __u32 pid) { { return nlmsg_unicast(nls, skb, pid); int err; err = netlink_unicast(nls, skb, pid, 0); return (err < 0) ? err : 0; } } EXPORT_SYMBOL(ibnl_unicast); EXPORT_SYMBOL(ibnl_unicast); Loading @@ -252,6 +255,7 @@ int __init ibnl_init(void) return -ENOMEM; return -ENOMEM; } } nls->sk_sndtimeo = 10 * HZ; return 0; return 0; } } Loading drivers/infiniband/hw/cxgb3/iwch_cm.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) state_set(&child_ep->com, CONNECTING); state_set(&child_ep->com, CONNECTING); child_ep->com.tdev = tdev; child_ep->com.tdev = tdev; child_ep->com.cm_id = NULL; child_ep->com.cm_id = NULL; child_ep->com.local_addr.sin_family = PF_INET; child_ep->com.local_addr.sin_family = AF_INET; child_ep->com.local_addr.sin_port = req->local_port; child_ep->com.local_addr.sin_port = req->local_port; child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; child_ep->com.remote_addr.sin_family = PF_INET; child_ep->com.remote_addr.sin_family = AF_INET; child_ep->com.remote_addr.sin_port = req->peer_port; child_ep->com.remote_addr.sin_port = req->peer_port; child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; get_ep(&parent_ep->com); get_ep(&parent_ep->com); Loading drivers/infiniband/hw/cxgb4/cm.c +11 −1 Original line number Original line Diff line number Diff line Loading @@ -3068,9 +3068,9 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) PDBG("%s last streaming msg ack ep %p tid %u state %u " PDBG("%s last streaming msg ack ep %p tid %u state %u " "initiator %u freeing skb\n", __func__, ep, ep->hwtid, "initiator %u freeing skb\n", __func__, ep, ep->hwtid, state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); mutex_lock(&ep->com.mutex); kfree_skb(ep->mpa_skb); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; ep->mpa_skb = NULL; mutex_lock(&ep->com.mutex); if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) stop_ep_timer(ep); stop_ep_timer(ep); mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex); Loading Loading @@ -3647,6 +3647,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ep->com.state = ABORTING; ep->com.state = ABORTING; else { else { ep->com.state = CLOSING; ep->com.state = CLOSING; /* * if we close before we see the fw4_ack() then we fix * up the timer state since we're reusing it. */ if (ep->mpa_skb && test_bit(STOP_MPA_TIMER, &ep->com.flags)) { clear_bit(STOP_MPA_TIMER, &ep->com.flags); stop_ep_timer(ep); } start_ep_timer(ep); start_ep_timer(ep); } } set_bit(CLOSE_SENT, &ep->com.flags); set_bit(CLOSE_SENT, &ep->com.flags); Loading Loading
drivers/infiniband/core/iwcm.c +17 −37 Original line number Original line Diff line number Diff line Loading @@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv) /* /* * Release a reference on cm_id. If the last reference is being * Release a reference on cm_id. If the last reference is being * released, enable the waiting thread (in iw_destroy_cm_id) to * released, free the cm_id and return 1. * get woken up, and return 1 if a thread is already waiting. */ */ static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { { BUG_ON(atomic_read(&cm_id_priv->refcount)==0); BUG_ON(atomic_read(&cm_id_priv->refcount)==0); if (atomic_dec_and_test(&cm_id_priv->refcount)) { if (atomic_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); BUG_ON(!list_empty(&cm_id_priv->work_list)); complete(&cm_id_priv->destroy_comp); free_cm_id(cm_id_priv); return 1; return 1; } } Loading @@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id) { { struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv; int cb_destroy; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* (void)iwcm_deref_id(cm_id_priv); * Test bit before deref in case the cm_id gets freed on another * thread. */ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); if (iwcm_deref_id(cm_id_priv) && cb_destroy) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } } } static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); Loading Loading @@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) wait_event(cm_id_priv->connect_wait, wait_event(cm_id_priv->connect_wait, !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); /* * Since we're deleting the cm_id, drop any events that * might arrive before the last dereference. */ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->state) { switch (cm_id_priv->state) { case IW_CM_STATE_LISTEN: case IW_CM_STATE_LISTEN: Loading Loading @@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id) struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); destroy_cm_id(cm_id); destroy_cm_id(cm_id); wait_for_completion(&cm_id_priv->destroy_comp); free_cm_id(cm_id_priv); } } EXPORT_SYMBOL(iw_destroy_cm_id); EXPORT_SYMBOL(iw_destroy_cm_id); Loading Loading @@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, ret = cm_id->cm_handler(cm_id, iw_event); ret = cm_id->cm_handler(cm_id, iw_event); if (ret) { if (ret) { iw_cm_reject(cm_id, NULL, 0); iw_cm_reject(cm_id, NULL, 0); set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); iw_destroy_cm_id(cm_id); destroy_cm_id(cm_id); if (atomic_read(&cm_id_priv->refcount)==0) free_cm_id(cm_id_priv); } } out: out: Loading Loading @@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work) unsigned long flags; unsigned long flags; int empty; int empty; int ret = 0; int ret = 0; int destroy_id; spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); empty = list_empty(&cm_id_priv->work_list); empty = list_empty(&cm_id_priv->work_list); Loading @@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work) put_work(work); put_work(work); spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); ret = process_event(cm_id_priv, &levent); if (ret) { if (ret) set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); destroy_cm_id(&cm_id_priv->id); destroy_cm_id(&cm_id_priv->id); } } else BUG_ON(atomic_read(&cm_id_priv->refcount)==0); pr_debug("dropping event %d\n", levent.event); destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); if (iwcm_deref_id(cm_id_priv)) if (iwcm_deref_id(cm_id_priv)) { if (destroy_id) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } return; return; } if (empty) if (empty) return; return; spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags); Loading
drivers/infiniband/core/iwcm.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -56,7 +56,7 @@ struct iwcm_id_private { struct list_head work_free_list; struct list_head work_free_list; }; }; #define IWCM_F_CALLBACK_DESTROY 1 #define IWCM_F_DROP_EVENTS 1 #define IWCM_F_CONNECT_WAIT 2 #define IWCM_F_CONNECT_WAIT 2 #endif /* IWCM_H */ #endif /* IWCM_H */
drivers/infiniband/core/netlink.c +5 −1 Original line number Original line Diff line number Diff line Loading @@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb) int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, __u32 pid) __u32 pid) { { return nlmsg_unicast(nls, skb, pid); int err; err = netlink_unicast(nls, skb, pid, 0); return (err < 0) ? err : 0; } } EXPORT_SYMBOL(ibnl_unicast); EXPORT_SYMBOL(ibnl_unicast); Loading @@ -252,6 +255,7 @@ int __init ibnl_init(void) return -ENOMEM; return -ENOMEM; } } nls->sk_sndtimeo = 10 * HZ; return 0; return 0; } } Loading
drivers/infiniband/hw/cxgb3/iwch_cm.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) state_set(&child_ep->com, CONNECTING); state_set(&child_ep->com, CONNECTING); child_ep->com.tdev = tdev; child_ep->com.tdev = tdev; child_ep->com.cm_id = NULL; child_ep->com.cm_id = NULL; child_ep->com.local_addr.sin_family = PF_INET; child_ep->com.local_addr.sin_family = AF_INET; child_ep->com.local_addr.sin_port = req->local_port; child_ep->com.local_addr.sin_port = req->local_port; child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; child_ep->com.remote_addr.sin_family = PF_INET; child_ep->com.remote_addr.sin_family = AF_INET; child_ep->com.remote_addr.sin_port = req->peer_port; child_ep->com.remote_addr.sin_port = req->peer_port; child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; get_ep(&parent_ep->com); get_ep(&parent_ep->com); Loading
drivers/infiniband/hw/cxgb4/cm.c +11 −1 Original line number Original line Diff line number Diff line Loading @@ -3068,9 +3068,9 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) PDBG("%s last streaming msg ack ep %p tid %u state %u " PDBG("%s last streaming msg ack ep %p tid %u state %u " "initiator %u freeing skb\n", __func__, ep, ep->hwtid, "initiator %u freeing skb\n", __func__, ep, ep->hwtid, state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); mutex_lock(&ep->com.mutex); kfree_skb(ep->mpa_skb); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; ep->mpa_skb = NULL; mutex_lock(&ep->com.mutex); if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) stop_ep_timer(ep); stop_ep_timer(ep); mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex); Loading Loading @@ -3647,6 +3647,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ep->com.state = ABORTING; ep->com.state = ABORTING; else { else { ep->com.state = CLOSING; ep->com.state = CLOSING; /* * if we close before we see the fw4_ack() then we fix * up the timer state since we're reusing it. */ if (ep->mpa_skb && test_bit(STOP_MPA_TIMER, &ep->com.flags)) { clear_bit(STOP_MPA_TIMER, &ep->com.flags); stop_ep_timer(ep); } start_ep_timer(ep); start_ep_timer(ep); } } set_bit(CLOSE_SENT, &ep->com.flags); set_bit(CLOSE_SENT, &ep->com.flags); Loading