Loading drivers/infiniband/hw/cxgb3/cxio_hal.c +7 −8 Original line number Diff line number Diff line Loading @@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { udelay(1); if (i++ > 1000000) { BUG_ON(1); printk(KERN_ERR "%s: stalled rnic\n", rdev_p->dev_name); return -EIO; Loading Loading @@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); } int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) { struct rdma_cq_setup setup; int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); Loading @@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); if (!cq->cqid) return -ENOMEM; if (kernel) { cq->sw_queue = kzalloc(size, GFP_KERNEL); if (!cq->sw_queue) return -ENOMEM; cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (cq->size_log2)) * sizeof(struct t3_cqe), } cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, &(cq->dma_addr), GFP_KERNEL); if (!cq->queue) { kfree(cq->sw_queue); Loading drivers/infiniband/hw/cxgb3/cxio_hal.h +2 −2 Original line number Diff line number Diff line Loading @@ -53,7 +53,7 @@ #define T3_MAX_PBL_SIZE 256 #define T3_MAX_RQ_SIZE 1024 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) #define T3_MAX_CQ_DEPTH 8192 #define T3_MAX_CQ_DEPTH 262144 #define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_MR_SIZE 0x100000000ULL #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ Loading Loading @@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev); void cxio_rdev_close(struct cxio_rdev *rdev); int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, enum t3_cq_opcode op, u32 credit); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel); int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); Loading drivers/infiniband/hw/cxgb3/cxio_wr.h +16 −1 Original line number Diff line number Diff line Loading @@ -730,7 +730,22 @@ struct t3_cq { static inline void cxio_set_wq_in_error(struct t3_wq *wq) { wq->queue->wq_in_err.err = 1; wq->queue->wq_in_err.err |= 1; } static inline void cxio_disable_wq_db(struct t3_wq *wq) { wq->queue->wq_in_err.err |= 2; } static inline void cxio_enable_wq_db(struct t3_wq *wq) { wq->queue->wq_in_err.err &= ~2; } static inline int cxio_wq_db_enabled(struct t3_wq *wq) { return !(wq->queue->wq_in_err.err & 2); } static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) Loading drivers/infiniband/hw/cxgb3/iwch.c +76 −4 Original line number Diff line number Diff line Loading @@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = { static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_mutex); static int disable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; cxio_disable_wq_db(&qhp->wq); return 0; } static int enable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; if (data) ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); cxio_enable_wq_db(&qhp->wq); return 0; } static void disable_dbs(struct iwch_dev *rnicp) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, disable_qp_db, NULL); spin_unlock_irq(&rnicp->lock); } static void enable_dbs(struct iwch_dev *rnicp, int ring_db) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, enable_qp_db, (void *)(unsigned long)ring_db); spin_unlock_irq(&rnicp->lock); } static void iwch_db_drop_task(struct work_struct *work) { struct iwch_dev *rnicp = container_of(work, struct iwch_dev, db_drop_task.work); enable_dbs(rnicp, 1); } static void rnic_init(struct iwch_dev *rnicp) { PDBG("%s iwch_dev %p\n", __func__, rnicp); Loading @@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp) idr_init(&rnicp->qpidr); idr_init(&rnicp->mmidr); spin_lock_init(&rnicp->lock); INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task); rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; Loading Loading @@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { if (dev->rdev.t3cdev_p == tdev) { dev->rdev.flags = CXIO_ERROR_FATAL; cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); iwch_unregister_device(dev); cxio_rdev_close(&dev->rdev); Loading @@ -166,6 +209,7 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) struct iwch_dev *rnicp; struct ib_event event; u32 portnum = port_id + 1; int dispatch = 0; if (!rdev) return; Loading @@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) case OFFLOAD_STATUS_DOWN: { rdev->flags = CXIO_ERROR_FATAL; event.event = IB_EVENT_DEVICE_FATAL; dispatch = 1; break; } case OFFLOAD_PORT_DOWN: { event.event = IB_EVENT_PORT_ERR; dispatch = 1; break; } case OFFLOAD_PORT_UP: { event.event = IB_EVENT_PORT_ACTIVE; dispatch = 1; break; } case OFFLOAD_DB_FULL: { disable_dbs(rnicp); break; } case OFFLOAD_DB_EMPTY: { enable_dbs(rnicp, 1); break; } case OFFLOAD_DB_DROP: { unsigned long delay = 1000; unsigned short r; disable_dbs(rnicp); get_random_bytes(&r, 2); delay += r & 1023; /* * delay is between 1000-2023 usecs. */ schedule_delayed_work(&rnicp->db_drop_task, usecs_to_jiffies(delay)); break; } } if (dispatch) { event.device = &rnicp->ibdev; event.element.port_num = portnum; ib_dispatch_event(&event); } return; } Loading drivers/infiniband/hw/cxgb3/iwch.h +2 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/workqueue.h> #include <rdma/ib_verbs.h> Loading Loading @@ -110,6 +111,7 @@ struct iwch_dev { struct idr mmidr; spinlock_t lock; struct list_head entry; struct delayed_work db_drop_task; }; static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) Loading Loading
drivers/infiniband/hw/cxgb3/cxio_hal.c +7 −8 Original line number Diff line number Diff line Loading @@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { udelay(1); if (i++ > 1000000) { BUG_ON(1); printk(KERN_ERR "%s: stalled rnic\n", rdev_p->dev_name); return -EIO; Loading Loading @@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); } int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) { struct rdma_cq_setup setup; int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); Loading @@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); if (!cq->cqid) return -ENOMEM; if (kernel) { cq->sw_queue = kzalloc(size, GFP_KERNEL); if (!cq->sw_queue) return -ENOMEM; cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (cq->size_log2)) * sizeof(struct t3_cqe), } cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, &(cq->dma_addr), GFP_KERNEL); if (!cq->queue) { kfree(cq->sw_queue); Loading
drivers/infiniband/hw/cxgb3/cxio_hal.h +2 −2 Original line number Diff line number Diff line Loading @@ -53,7 +53,7 @@ #define T3_MAX_PBL_SIZE 256 #define T3_MAX_RQ_SIZE 1024 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) #define T3_MAX_CQ_DEPTH 8192 #define T3_MAX_CQ_DEPTH 262144 #define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_MR_SIZE 0x100000000ULL #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ Loading Loading @@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev); void cxio_rdev_close(struct cxio_rdev *rdev); int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, enum t3_cq_opcode op, u32 credit); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel); int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); Loading
drivers/infiniband/hw/cxgb3/cxio_wr.h +16 −1 Original line number Diff line number Diff line Loading @@ -730,7 +730,22 @@ struct t3_cq { static inline void cxio_set_wq_in_error(struct t3_wq *wq) { wq->queue->wq_in_err.err = 1; wq->queue->wq_in_err.err |= 1; } static inline void cxio_disable_wq_db(struct t3_wq *wq) { wq->queue->wq_in_err.err |= 2; } static inline void cxio_enable_wq_db(struct t3_wq *wq) { wq->queue->wq_in_err.err &= ~2; } static inline int cxio_wq_db_enabled(struct t3_wq *wq) { return !(wq->queue->wq_in_err.err & 2); } static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) Loading
drivers/infiniband/hw/cxgb3/iwch.c +76 −4 Original line number Diff line number Diff line Loading @@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = { static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_mutex); static int disable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; cxio_disable_wq_db(&qhp->wq); return 0; } static int enable_qp_db(int id, void *p, void *data) { struct iwch_qp *qhp = p; if (data) ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); cxio_enable_wq_db(&qhp->wq); return 0; } static void disable_dbs(struct iwch_dev *rnicp) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, disable_qp_db, NULL); spin_unlock_irq(&rnicp->lock); } static void enable_dbs(struct iwch_dev *rnicp, int ring_db) { spin_lock_irq(&rnicp->lock); idr_for_each(&rnicp->qpidr, enable_qp_db, (void *)(unsigned long)ring_db); spin_unlock_irq(&rnicp->lock); } static void iwch_db_drop_task(struct work_struct *work) { struct iwch_dev *rnicp = container_of(work, struct iwch_dev, db_drop_task.work); enable_dbs(rnicp, 1); } static void rnic_init(struct iwch_dev *rnicp) { PDBG("%s iwch_dev %p\n", __func__, rnicp); Loading @@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp) idr_init(&rnicp->qpidr); idr_init(&rnicp->mmidr); spin_lock_init(&rnicp->lock); INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task); rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; Loading Loading @@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { if (dev->rdev.t3cdev_p == tdev) { dev->rdev.flags = CXIO_ERROR_FATAL; cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); iwch_unregister_device(dev); cxio_rdev_close(&dev->rdev); Loading @@ -166,6 +209,7 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) struct iwch_dev *rnicp; struct ib_event event; u32 portnum = port_id + 1; int dispatch = 0; if (!rdev) return; Loading @@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) case OFFLOAD_STATUS_DOWN: { rdev->flags = CXIO_ERROR_FATAL; event.event = IB_EVENT_DEVICE_FATAL; dispatch = 1; break; } case OFFLOAD_PORT_DOWN: { event.event = IB_EVENT_PORT_ERR; dispatch = 1; break; } case OFFLOAD_PORT_UP: { event.event = IB_EVENT_PORT_ACTIVE; dispatch = 1; break; } case OFFLOAD_DB_FULL: { disable_dbs(rnicp); break; } case OFFLOAD_DB_EMPTY: { enable_dbs(rnicp, 1); break; } case OFFLOAD_DB_DROP: { unsigned long delay = 1000; unsigned short r; disable_dbs(rnicp); get_random_bytes(&r, 2); delay += r & 1023; /* * delay is between 1000-2023 usecs. */ schedule_delayed_work(&rnicp->db_drop_task, usecs_to_jiffies(delay)); break; } } if (dispatch) { event.device = &rnicp->ibdev; event.element.port_num = portnum; ib_dispatch_event(&event); } return; } Loading
drivers/infiniband/hw/cxgb3/iwch.h +2 −0 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/workqueue.h> #include <rdma/ib_verbs.h> Loading Loading @@ -110,6 +111,7 @@ struct iwch_dev { struct idr mmidr; spinlock_t lock; struct list_head entry; struct delayed_work db_drop_task; }; static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) Loading