Loading Documentation/filesystems/nfs/nfs-rdma.txt +6 −3 Original line number Original line Diff line number Diff line Loading @@ -187,8 +187,10 @@ Check RDMA and NFS Setup To further test the InfiniBand software stack, use IPoIB (this To further test the InfiniBand software stack, use IPoIB (this assumes you have two IB hosts named host1 and host2): assumes you have two IB hosts named host1 and host2): host1$ ifconfig ib0 a.b.c.x host1$ ip link set dev ib0 up host2$ ifconfig ib0 a.b.c.y host1$ ip address add dev ib0 a.b.c.x host2$ ip link set dev ib0 up host2$ ip address add dev ib0 a.b.c.y host1$ ping a.b.c.y host1$ ping a.b.c.y host2$ ping a.b.c.x host2$ ping a.b.c.x Loading Loading @@ -229,7 +231,8 @@ NFS/RDMA Setup $ modprobe ib_mthca $ modprobe ib_mthca $ modprobe ib_ipoib $ modprobe ib_ipoib $ ifconfig ib0 a.b.c.d $ ip li set dev ib0 up $ ip addr add dev ib0 a.b.c.d NOTE: use unique addresses for the client and server NOTE: use unique addresses for the client and server Loading MAINTAINERS +9 −0 Original line number Original line Diff line number Diff line Loading @@ -8791,6 +8791,15 @@ W: http://www.emulex.com S: Supported S: Supported F: drivers/net/ethernet/emulex/benet/ F: drivers/net/ethernet/emulex/benet/ EMULEX ONECONNECT ROCE DRIVER M: Selvin Xavier <selvin.xavier@emulex.com> M: Devesh Sharma <devesh.sharma@emulex.com> M: Mitesh Ahuja <mitesh.ahuja@emulex.com> L: linux-rdma@vger.kernel.org W: http://www.emulex.com S: Supported F: drivers/infiniband/hw/ocrdma/ SFC NETWORK DRIVER SFC NETWORK DRIVER M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> M: Shradha Shah <sshah@solarflare.com> M: Shradha Shah <sshah@solarflare.com> Loading drivers/infiniband/core/uverbs_main.c +11 −11 Original line number Original line Diff line number Diff line Loading @@ -246,6 +246,17 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(uqp); kfree(uqp); } } list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { struct ib_srq *srq = uobj->object; struct ib_uevent_object *uevent = container_of(uobj, struct ib_uevent_object, uobject); idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { struct ib_cq *cq = uobj->object; struct ib_cq *cq = uobj->object; struct ib_uverbs_event_file *ev_file = cq->cq_context; struct ib_uverbs_event_file *ev_file = cq->cq_context; Loading @@ -258,17 +269,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(ucq); kfree(ucq); } } list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { struct ib_srq *srq = uobj->object; struct ib_uevent_object *uevent = container_of(uobj, struct ib_uevent_object, uobject); idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { struct ib_mr *mr = uobj->object; struct ib_mr *mr = uobj->object; Loading drivers/infiniband/hw/mlx4/alias_GUID.c +335 −122 Original line number Original line Diff line number Diff line Loading @@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { int query_id; int query_id; struct list_head list; struct list_head list; int block_num; int block_num; ib_sa_comp_mask guid_indexes; u8 method; }; }; struct mlx4_next_alias_guid_work { struct mlx4_next_alias_guid_work { u8 port; u8 port; u8 block_num; u8 block_num; u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; }; static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) u8 port_num, u8 *p_data) Loading Loading @@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) return IB_SA_COMP_MASK(4 + index); return IB_SA_COMP_MASK(4 + index); } } void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, int port, int slave_init) { __be64 curr_guid, required_guid; int record_num = slave / 8; int index = slave % 8; int port_index = port - 1; unsigned long flags; int do_work = 0; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & GUID_STATE_NEED_PORT_INIT) goto unlock; if (!slave_init) { curr_guid = *(__be64 *)&dev->sriov. alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index]; if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || !curr_guid) goto unlock; required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } else { required_guid = mlx4_get_admin_guid(dev->dev, slave, port); if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) goto unlock; } *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index] = required_guid; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(index); dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE; /* set to run immediately */ dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].time_to_run = 0; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. guids_retry_schedule[index] = 0; do_work = 1; unlock: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); if (do_work) mlx4_ib_init_alias_guid_work(dev, port_index); } /* /* * Whenever new GUID is set/unset (guid table change) create event and * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). * notify the relevant slave (master also should be notified). Loading @@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, enum slave_port_state prev_state; enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; enum slave_port_gen_event gen_event; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags; __be64 required_value; if (!mlx4_is_master(dev->dev)) if (!mlx4_is_master(dev->dev)) return; return; rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); all_rec_per_port[block_num].guid_indexes); Loading @@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, */ */ if (tmp_cur_ag != form_cache_ag) if (tmp_cur_ag != form_cache_ag) continue; continue; mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) required_value = 0; if (tmp_cur_ag == required_value) { rec->guid_indexes = rec->guid_indexes & ~mlx4_ib_get_aguid_comp_mask_from_ix(i); } else { /* may notify port down if value is 0 */ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { spin_unlock_irqrestore(&dev->sriov. alias_guid.ag_work_lock, flags); continue; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ Loading @@ -188,13 +268,17 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); &gen_event); if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", slave_id, port_num); slave_id, port_num); mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, MLX4_PORT_CHANGE_SUBTYPE_DOWN); MLX4_PORT_CHANGE_SUBTYPE_DOWN); } } } } } } } static void aliasguid_query_handler(int status, static void aliasguid_query_handler(int status, struct ib_sa_guidinfo_rec *guid_rec, struct ib_sa_guidinfo_rec *guid_rec, Loading @@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status, int i; int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; unsigned long flags, flags1; ib_sa_comp_mask declined_guid_indexes = 0; ib_sa_comp_mask applied_guid_indexes = 0; unsigned int resched_delay_sec = 0; if (!context) if (!context) return; return; Loading @@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status, all_rec_per_port[cb_ctx->block_num]; all_rec_per_port[cb_ctx->block_num]; if (status) { if (status) { rec->status = MLX4_GUID_INFO_STATUS_IDLE; pr_debug("(port: %d) failed: status = %d\n", pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); cb_ctx->port, status); rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; goto out; goto out; } } Loading @@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status, rec = &dev->sriov.alias_guid.ports_guid[port_index]. rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; all_rec_per_port[guid_rec->block_num]; rec->status = MLX4_GUID_INFO_STATUS_SET; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); rec->method = MLX4_GUID_INFO_RECORD_SET; for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { __be64 tmp_cur_ag; __be64 sm_response, required_val; tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; /* check if the SM didn't assign one of the records. if (!(cb_ctx->guid_indexes & * if it didn't, if it was not sysadmin request: mlx4_ib_get_aguid_comp_mask_from_ix(i))) * ask the SM to give a new GUID, (instead of the driver request). continue; */ sm_response = *(__be64 *)&guid_rec->guid_info_list if (tmp_cur_ag == MLX4_NOT_SET_GUID) { [i * GUID_REC_SIZE]; mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; "block_num: %d was declined by SM, " if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { "ownership by %d (0 = driver, 1=sysAdmin," if (required_val == " 2=None)\n", __func__, i, cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) guid_rec->block_num, rec->ownership); goto next_entry; if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { /* if it is driver assign, asks for new GUID from SM*/ *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = MLX4_NOT_SET_GUID; /* Mark the record as not assigned, and let it /* A new value was set till we got the response */ * be sent again in the next work sched.*/ pr_debug("need to set new value %llx, record num %d, block_num:%d\n", rec->status = MLX4_GUID_INFO_STATUS_IDLE; be64_to_cpu(required_val), rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); i, guid_rec->block_num); goto entry_declined; } } /* check if the SM didn't assign one of the records. * if it didn't, re-ask for. */ if (sm_response == MLX4_NOT_SET_GUID) { if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in block_num: %d was declined by SM\n", __func__, i, guid_rec->block_num); goto entry_declined; } else { } else { /* properly assigned record. */ /* properly assigned record. */ /* We save the GUID we just got from the SM in the /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ * request from the sm the process will ask for the same GUID */ if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && if (required_val && tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { sm_response != required_val) { /* the sysadmin assignment failed.*/ /* Warn only on first retry */ if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" " admin guid after SysAdmin " " admin guid after SysAdmin " "configuration. " "configuration. " "Record num %d in block_num:%d " "Record num %d in block_num:%d " "was declined by SM, " "was declined by SM, " "new val(0x%llx) was kept\n", "new val(0x%llx) was kept, SM returned (0x%llx)\n", __func__, i, __func__, i, guid_rec->block_num, guid_rec->block_num, be64_to_cpu(*(__be64 *) & be64_to_cpu(required_val), rec->all_recs[i * GUID_REC_SIZE])); be64_to_cpu(sm_response)); goto entry_declined; } else { } else { memcpy(&rec->all_recs[i * GUID_REC_SIZE], *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = &guid_rec->guid_info_list[i * GUID_REC_SIZE], sm_response; GUID_REC_SIZE); if (required_val == 0) } mlx4_set_admin_guid(dev->dev, sm_response, (guid_rec->block_num * NUM_ALIAS_GUID_IN_REC) + i, cb_ctx->port); goto next_entry; } } entry_declined: declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); rec->guids_retry_schedule[i] = (rec->guids_retry_schedule[i] == 0) ? 1 : min((unsigned int)60, rec->guids_retry_schedule[i] * 2); /* using the minimum value among all entries in that record */ resched_delay_sec = (resched_delay_sec == 0) ? rec->guids_retry_schedule[i] : min(resched_delay_sec, rec->guids_retry_schedule[i]); continue; next_entry: rec->guids_retry_schedule[i] = 0; } } applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; if (declined_guid_indexes || rec->guid_indexes & ~(applied_guid_indexes)) { pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", guid_rec->block_num, be64_to_cpu((__force __be64)rec->guid_indexes), be64_to_cpu((__force __be64)applied_guid_indexes), be64_to_cpu((__force __be64)declined_guid_indexes)); rec->time_to_run = ktime_get_real_ns() + resched_delay_sec * NSEC_PER_SEC; } else { rec->status = MLX4_GUID_INFO_STATUS_SET; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* /* The func is call here to close the cases when the The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver sm doesn't send smp, so in the sa response the driver Loading @@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status, out: out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) if (!dev->sriov.is_going_down) { get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. &dev->sriov.alias_guid.ports_guid[port_index]. alias_guid_work, 0); alias_guid_work, msecs_to_jiffies(resched_delay_sec * 1000)); } if (cb_ctx->sa_query) { if (cb_ctx->sa_query) { list_del(&cb_ctx->list); list_del(&cb_ctx->list); kfree(cb_ctx); kfree(cb_ctx); Loading @@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) ib_sa_comp_mask comp_mask = 0; ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; = MLX4_GUID_INFO_STATUS_SET; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method = MLX4_GUID_INFO_RECORD_SET; /* calculate the comp_mask for that record.*/ /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { Loading @@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) need to assign GUIDs, then don't put it up for assignment. need to assign GUIDs, then don't put it up for assignment. */ */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || (!index && !i) || (!index && !i)) MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. ports_guid[port - 1].all_rec_per_port[index].ownership) continue; continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } } dev->sriov.alias_guid.ports_guid[port - 1]. dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes = comp_mask; all_rec_per_port[index].guid_indexes |= comp_mask; if (dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes) dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; } } static int set_guid_rec(struct ib_device *ibdev, static int set_guid_rec(struct ib_device *ibdev, u8 port, int index, struct mlx4_next_alias_guid_work *rec) struct mlx4_sriov_alias_guid_info_rec_det *rec_det) { { int err; int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev); Loading @@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev, struct ib_port_attr attr; struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; unsigned long resched_delay, flags, flags1; u8 port = rec->port + 1; int index = rec->block_num; struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; Loading @@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->port = port; callback_context->port = port; callback_context->dev = dev; callback_context->dev = dev; callback_context->block_num = index; callback_context->block_num = index; callback_context->guid_indexes = rec_det->guid_indexes; callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); Loading @@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->query_id = callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, ibdev, port, &guid_info_rec, comp_mask, rec_det->method, 1000, comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, GFP_KERNEL, aliasguid_query_handler, callback_context, callback_context, &callback_context->sa_query); &callback_context->sa_query); Loading Loading @@ -434,6 +573,30 @@ out: return err; return err; } } static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) { int j, k, entry; __be64 guid; /*Check if the SM doesn't need to assign the GUIDs*/ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { entry = j * NUM_ALIAS_GUID_IN_REC + k; /* no request for the 0 entry (hw guid) */ if (!entry || entry > dev->dev->persist->num_vfs || !mlx4_is_slave_active(dev->dev, entry)) continue; guid = mlx4_get_admin_guid(dev->dev, entry, port); *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[j].all_recs [GUID_REC_SIZE * k] = guid; pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", entry, be64_to_cpu(guid), port); } } } void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { { int i; int i; Loading @@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & GUID_STATE_NEED_PORT_INIT) { mlx4_ib_guid_port_init(dev, port); dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= (~GUID_STATE_NEED_PORT_INIT); } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); invalidate_guid_record(dev, port, i); Loading @@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } } /* The function returns the next record that was static void set_required_record(struct mlx4_ib_dev *dev, u8 port, * not configured (or failed to be configured) */ struct mlx4_next_alias_guid_work *next_rec, static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, int record_index) struct mlx4_next_alias_guid_work *rec) { { int i; int lowset_time_entry = -1; int lowest_time = 0; ib_sa_comp_mask delete_guid_indexes = 0; ib_sa_comp_mask set_guid_indexes = 0; struct mlx4_sriov_alias_guid_info_rec_det *rec = &dev->sriov.alias_guid.ports_guid[port]. all_rec_per_port[record_index]; for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { if (!(rec->guid_indexes & mlx4_ib_get_aguid_comp_mask_from_ix(i))) continue; if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) delete_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); else set_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= lowest_time) { lowset_time_entry = i; lowest_time = rec->guids_retry_schedule[i]; } } memcpy(&next_rec->rec_det, rec, sizeof(*rec)); next_rec->port = port; next_rec->block_num = record_index; if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { next_rec->rec_det.guid_indexes = delete_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; } else { next_rec->rec_det.guid_indexes = set_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_SET; } } /* return index of record that should be updated based on lowest * rescheduled time */ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec) { int record_index = -1; u64 low_record_time = 0; struct mlx4_sriov_alias_guid_info_rec_det rec; int j; int j; unsigned long flags; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); rec = dev->sriov.alias_guid.ports_guid[port]. if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == all_rec_per_port[j]; MLX4_GUID_INFO_STATUS_IDLE) { if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && memcpy(&rec->rec_det, rec.guid_indexes) { &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], if (record_index == -1 || sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); rec.time_to_run < low_record_time) { rec->port = port; record_index = j; rec->block_num = j; low_record_time = rec.time_to_run; dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = MLX4_GUID_INFO_STATUS_PENDING; spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); return 0; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); } } return -ENOENT; } } if (resched_delay_sec) { u64 curr_time = ktime_get_real_ns(); static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, *resched_delay_sec = (low_record_time < curr_time) ? 0 : int rec_index, div_u64((low_record_time - curr_time), NSEC_PER_SEC); struct mlx4_sriov_alias_guid_info_rec_det *rec_det) } { dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = return record_index; rec_det->guid_indexes; memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = rec_det->status; } } static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) /* The function returns the next record that was * not configured (or failed to be configured) */ static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *rec) { { int j; unsigned long flags; struct mlx4_sriov_alias_guid_info_rec_det rec_det ; int record_index; int ret = 0; for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); record_index = get_low_record_time_index(dev, port, NULL); rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | if (record_index < 0) { IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | ret = -ENOENT; IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | goto out; IB_SA_GUIDINFO_REC_GID7; rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; set_administratively_guid_record(dev, port, j, &rec_det); } } set_required_record(dev, port, rec, record_index); out: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); return ret; } } static void alias_guid_work(struct work_struct *work) static void alias_guid_work(struct work_struct *work) Loading Loading @@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work) goto out; goto out; } } set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, set_guid_rec(&dev->ib_dev, rec); &rec->rec_det); out: out: kfree(rec); kfree(rec); } } Loading @@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { if (!dev->sriov.is_going_down) { /* If there is pending one should cancell then run, otherwise * won't run till previous one is ended as same work * struct is used. */ cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } } Loading Loading @@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { { char alias_wq_name[15]; char alias_wq_name[15]; int ret = 0; int ret = 0; int i, j, k; int i, j; union ib_gid gid; union ib_gid gid; if (!mlx4_is_master(dev->dev)) if (!mlx4_is_master(dev->dev)) Loading @@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) for (i = 0 ; i < dev->num_ports; i++) { for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); /*Check if the SM doesn't need to assign the GUIDs*/ dev->sriov.alias_guid.ports_guid[i].state_flags |= GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { if (mlx4_ib_sm_guid_assign) { /* mark each val as it was deleted */ dev->sriov.alias_guid.ports_guid[i]. memset(dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j]. all_rec_per_port[j].all_recs, 0xFF, ownership = MLX4_GUID_DRIVER_ASSIGN; sizeof(dev->sriov.alias_guid.ports_guid[i]. continue; all_rec_per_port[j].all_recs)); } dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. ownership = MLX4_GUID_NONE_ASSIGN; /*mark each val as it was deleted, till the sysAdmin will give it valid val*/ for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } } } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ /*prepare the records, set them to be allocated by sm*/ if (mlx4_ib_sm_guid_assign) for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; dev->sriov.alias_guid.ports_guid[i].port = i; if (mlx4_ib_sm_guid_assign) set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = dev->sriov.alias_guid.ports_guid[i].wq = Loading drivers/infiniband/hw/mlx4/mad.c +9 −0 Original line number Original line Diff line number Diff line Loading @@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->ring[i].addr, tun_qp->ring[i].addr, rx_buf_size, rx_buf_size, DMA_FROM_DEVICE); DMA_FROM_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { kfree(tun_qp->ring[i].addr); goto err; } } } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { Loading @@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->tx_ring[i].buf.addr, tun_qp->tx_ring[i].buf.addr, tx_buf_size, tx_buf_size, DMA_TO_DEVICE); DMA_TO_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->tx_ring[i].buf.map)) { kfree(tun_qp->tx_ring[i].buf.addr); goto tx_err; } tun_qp->tx_ring[i].ah = NULL; tun_qp->tx_ring[i].ah = NULL; } } spin_lock_init(&tun_qp->tx_lock); spin_lock_init(&tun_qp->tx_lock); Loading Loading
Documentation/filesystems/nfs/nfs-rdma.txt +6 −3 Original line number Original line Diff line number Diff line Loading @@ -187,8 +187,10 @@ Check RDMA and NFS Setup To further test the InfiniBand software stack, use IPoIB (this To further test the InfiniBand software stack, use IPoIB (this assumes you have two IB hosts named host1 and host2): assumes you have two IB hosts named host1 and host2): host1$ ifconfig ib0 a.b.c.x host1$ ip link set dev ib0 up host2$ ifconfig ib0 a.b.c.y host1$ ip address add dev ib0 a.b.c.x host2$ ip link set dev ib0 up host2$ ip address add dev ib0 a.b.c.y host1$ ping a.b.c.y host1$ ping a.b.c.y host2$ ping a.b.c.x host2$ ping a.b.c.x Loading Loading @@ -229,7 +231,8 @@ NFS/RDMA Setup $ modprobe ib_mthca $ modprobe ib_mthca $ modprobe ib_ipoib $ modprobe ib_ipoib $ ifconfig ib0 a.b.c.d $ ip li set dev ib0 up $ ip addr add dev ib0 a.b.c.d NOTE: use unique addresses for the client and server NOTE: use unique addresses for the client and server Loading
MAINTAINERS +9 −0 Original line number Original line Diff line number Diff line Loading @@ -8791,6 +8791,15 @@ W: http://www.emulex.com S: Supported S: Supported F: drivers/net/ethernet/emulex/benet/ F: drivers/net/ethernet/emulex/benet/ EMULEX ONECONNECT ROCE DRIVER M: Selvin Xavier <selvin.xavier@emulex.com> M: Devesh Sharma <devesh.sharma@emulex.com> M: Mitesh Ahuja <mitesh.ahuja@emulex.com> L: linux-rdma@vger.kernel.org W: http://www.emulex.com S: Supported F: drivers/infiniband/hw/ocrdma/ SFC NETWORK DRIVER SFC NETWORK DRIVER M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> M: Shradha Shah <sshah@solarflare.com> M: Shradha Shah <sshah@solarflare.com> Loading
drivers/infiniband/core/uverbs_main.c +11 −11 Original line number Original line Diff line number Diff line Loading @@ -246,6 +246,17 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(uqp); kfree(uqp); } } list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { struct ib_srq *srq = uobj->object; struct ib_uevent_object *uevent = container_of(uobj, struct ib_uevent_object, uobject); idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { struct ib_cq *cq = uobj->object; struct ib_cq *cq = uobj->object; struct ib_uverbs_event_file *ev_file = cq->cq_context; struct ib_uverbs_event_file *ev_file = cq->cq_context; Loading @@ -258,17 +269,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(ucq); kfree(ucq); } } list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { struct ib_srq *srq = uobj->object; struct ib_uevent_object *uevent = container_of(uobj, struct ib_uevent_object, uobject); idr_remove_uobj(&ib_uverbs_srq_idr, uobj); ib_destroy_srq(srq); ib_uverbs_release_uevent(file, uevent); kfree(uevent); } list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { struct ib_mr *mr = uobj->object; struct ib_mr *mr = uobj->object; Loading
drivers/infiniband/hw/mlx4/alias_GUID.c +335 −122 Original line number Original line Diff line number Diff line Loading @@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { int query_id; int query_id; struct list_head list; struct list_head list; int block_num; int block_num; ib_sa_comp_mask guid_indexes; u8 method; }; }; struct mlx4_next_alias_guid_work { struct mlx4_next_alias_guid_work { u8 port; u8 port; u8 block_num; u8 block_num; u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; }; static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) u8 port_num, u8 *p_data) Loading Loading @@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) return IB_SA_COMP_MASK(4 + index); return IB_SA_COMP_MASK(4 + index); } } void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, int port, int slave_init) { __be64 curr_guid, required_guid; int record_num = slave / 8; int index = slave % 8; int port_index = port - 1; unsigned long flags; int do_work = 0; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & GUID_STATE_NEED_PORT_INIT) goto unlock; if (!slave_init) { curr_guid = *(__be64 *)&dev->sriov. alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index]; if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || !curr_guid) goto unlock; required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } else { required_guid = mlx4_get_admin_guid(dev->dev, slave, port); if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) goto unlock; } *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. all_recs[GUID_REC_SIZE * index] = required_guid; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(index); dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE; /* set to run immediately */ dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num].time_to_run = 0; dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[record_num]. guids_retry_schedule[index] = 0; do_work = 1; unlock: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); if (do_work) mlx4_ib_init_alias_guid_work(dev, port_index); } /* /* * Whenever new GUID is set/unset (guid table change) create event and * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). * notify the relevant slave (master also should be notified). Loading @@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, enum slave_port_state prev_state; enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; enum slave_port_gen_event gen_event; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags; __be64 required_value; if (!mlx4_is_master(dev->dev)) if (!mlx4_is_master(dev->dev)) return; return; rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); all_rec_per_port[block_num].guid_indexes); Loading @@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, */ */ if (tmp_cur_ag != form_cache_ag) if (tmp_cur_ag != form_cache_ag) continue; continue; mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) required_value = 0; if (tmp_cur_ag == required_value) { rec->guid_indexes = rec->guid_indexes & ~mlx4_ib_get_aguid_comp_mask_from_ix(i); } else { /* may notify port down if value is 0 */ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { spin_unlock_irqrestore(&dev->sriov. alias_guid.ag_work_lock, flags); continue; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ Loading @@ -188,13 +268,17 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); &gen_event); if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", slave_id, port_num); slave_id, port_num); mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, MLX4_PORT_CHANGE_SUBTYPE_DOWN); MLX4_PORT_CHANGE_SUBTYPE_DOWN); } } } } } } } static void aliasguid_query_handler(int status, static void aliasguid_query_handler(int status, struct ib_sa_guidinfo_rec *guid_rec, struct ib_sa_guidinfo_rec *guid_rec, Loading @@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status, int i; int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; unsigned long flags, flags1; ib_sa_comp_mask declined_guid_indexes = 0; ib_sa_comp_mask applied_guid_indexes = 0; unsigned int resched_delay_sec = 0; if (!context) if (!context) return; return; Loading @@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status, all_rec_per_port[cb_ctx->block_num]; all_rec_per_port[cb_ctx->block_num]; if (status) { if (status) { rec->status = MLX4_GUID_INFO_STATUS_IDLE; pr_debug("(port: %d) failed: status = %d\n", pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); cb_ctx->port, status); rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; goto out; goto out; } } Loading @@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status, rec = &dev->sriov.alias_guid.ports_guid[port_index]. rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; all_rec_per_port[guid_rec->block_num]; rec->status = MLX4_GUID_INFO_STATUS_SET; spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); rec->method = MLX4_GUID_INFO_RECORD_SET; for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { __be64 tmp_cur_ag; __be64 sm_response, required_val; tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; /* check if the SM didn't assign one of the records. if (!(cb_ctx->guid_indexes & * if it didn't, if it was not sysadmin request: mlx4_ib_get_aguid_comp_mask_from_ix(i))) * ask the SM to give a new GUID, (instead of the driver request). continue; */ sm_response = *(__be64 *)&guid_rec->guid_info_list if (tmp_cur_ag == MLX4_NOT_SET_GUID) { [i * GUID_REC_SIZE]; mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; "block_num: %d was declined by SM, " if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { "ownership by %d (0 = driver, 1=sysAdmin," if (required_val == " 2=None)\n", __func__, i, cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) guid_rec->block_num, rec->ownership); goto next_entry; if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { /* if it is driver assign, asks for new GUID from SM*/ *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = MLX4_NOT_SET_GUID; /* Mark the record as not assigned, and let it /* A new value was set till we got the response */ * be sent again in the next work sched.*/ pr_debug("need to set new value %llx, record num %d, block_num:%d\n", rec->status = MLX4_GUID_INFO_STATUS_IDLE; be64_to_cpu(required_val), rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); i, guid_rec->block_num); goto entry_declined; } } /* check if the SM didn't assign one of the records. * if it didn't, re-ask for. */ if (sm_response == MLX4_NOT_SET_GUID) { if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in block_num: %d was declined by SM\n", __func__, i, guid_rec->block_num); goto entry_declined; } else { } else { /* properly assigned record. */ /* properly assigned record. */ /* We save the GUID we just got from the SM in the /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ * request from the sm the process will ask for the same GUID */ if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && if (required_val && tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { sm_response != required_val) { /* the sysadmin assignment failed.*/ /* Warn only on first retry */ if (rec->guids_retry_schedule[i] == 0) mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" " admin guid after SysAdmin " " admin guid after SysAdmin " "configuration. " "configuration. " "Record num %d in block_num:%d " "Record num %d in block_num:%d " "was declined by SM, " "was declined by SM, " "new val(0x%llx) was kept\n", "new val(0x%llx) was kept, SM returned (0x%llx)\n", __func__, i, __func__, i, guid_rec->block_num, guid_rec->block_num, be64_to_cpu(*(__be64 *) & be64_to_cpu(required_val), rec->all_recs[i * GUID_REC_SIZE])); be64_to_cpu(sm_response)); goto entry_declined; } else { } else { memcpy(&rec->all_recs[i * GUID_REC_SIZE], *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = &guid_rec->guid_info_list[i * GUID_REC_SIZE], sm_response; GUID_REC_SIZE); if (required_val == 0) } mlx4_set_admin_guid(dev->dev, sm_response, (guid_rec->block_num * NUM_ALIAS_GUID_IN_REC) + i, cb_ctx->port); goto next_entry; } } entry_declined: declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); rec->guids_retry_schedule[i] = (rec->guids_retry_schedule[i] == 0) ? 1 : min((unsigned int)60, rec->guids_retry_schedule[i] * 2); /* using the minimum value among all entries in that record */ resched_delay_sec = (resched_delay_sec == 0) ? rec->guids_retry_schedule[i] : min(resched_delay_sec, rec->guids_retry_schedule[i]); continue; next_entry: rec->guids_retry_schedule[i] = 0; } } applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; if (declined_guid_indexes || rec->guid_indexes & ~(applied_guid_indexes)) { pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", guid_rec->block_num, be64_to_cpu((__force __be64)rec->guid_indexes), be64_to_cpu((__force __be64)applied_guid_indexes), be64_to_cpu((__force __be64)declined_guid_indexes)); rec->time_to_run = ktime_get_real_ns() + resched_delay_sec * NSEC_PER_SEC; } else { rec->status = MLX4_GUID_INFO_STATUS_SET; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* /* The func is call here to close the cases when the The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver sm doesn't send smp, so in the sa response the driver Loading @@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status, out: out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) if (!dev->sriov.is_going_down) { get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. &dev->sriov.alias_guid.ports_guid[port_index]. alias_guid_work, 0); alias_guid_work, msecs_to_jiffies(resched_delay_sec * 1000)); } if (cb_ctx->sa_query) { if (cb_ctx->sa_query) { list_del(&cb_ctx->list); list_del(&cb_ctx->list); kfree(cb_ctx); kfree(cb_ctx); Loading @@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) ib_sa_comp_mask comp_mask = 0; ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; = MLX4_GUID_INFO_STATUS_SET; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method = MLX4_GUID_INFO_RECORD_SET; /* calculate the comp_mask for that record.*/ /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { Loading @@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) need to assign GUIDs, then don't put it up for assignment. need to assign GUIDs, then don't put it up for assignment. */ */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || (!index && !i) || (!index && !i)) MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. ports_guid[port - 1].all_rec_per_port[index].ownership) continue; continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } } dev->sriov.alias_guid.ports_guid[port - 1]. dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes = comp_mask; all_rec_per_port[index].guid_indexes |= comp_mask; if (dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].guid_indexes) dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; } } static int set_guid_rec(struct ib_device *ibdev, static int set_guid_rec(struct ib_device *ibdev, u8 port, int index, struct mlx4_next_alias_guid_work *rec) struct mlx4_sriov_alias_guid_info_rec_det *rec_det) { { int err; int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev); Loading @@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev, struct ib_port_attr attr; struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; unsigned long resched_delay, flags, flags1; u8 port = rec->port + 1; int index = rec->block_num; struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; Loading @@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->port = port; callback_context->port = port; callback_context->dev = dev; callback_context->dev = dev; callback_context->block_num = index; callback_context->block_num = index; callback_context->guid_indexes = rec_det->guid_indexes; callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); Loading @@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->query_id = callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, ibdev, port, &guid_info_rec, comp_mask, rec_det->method, 1000, comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, GFP_KERNEL, aliasguid_query_handler, callback_context, callback_context, &callback_context->sa_query); &callback_context->sa_query); Loading Loading @@ -434,6 +573,30 @@ out: return err; return err; } } static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) { int j, k, entry; __be64 guid; /*Check if the SM doesn't need to assign the GUIDs*/ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { entry = j * NUM_ALIAS_GUID_IN_REC + k; /* no request for the 0 entry (hw guid) */ if (!entry || entry > dev->dev->persist->num_vfs || !mlx4_is_slave_active(dev->dev, entry)) continue; guid = mlx4_get_admin_guid(dev->dev, entry, port); *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. all_rec_per_port[j].all_recs [GUID_REC_SIZE * k] = guid; pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", entry, be64_to_cpu(guid), port); } } } void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { { int i; int i; Loading @@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & GUID_STATE_NEED_PORT_INIT) { mlx4_ib_guid_port_init(dev, port); dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= (~GUID_STATE_NEED_PORT_INIT); } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); invalidate_guid_record(dev, port, i); Loading @@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } } /* The function returns the next record that was static void set_required_record(struct mlx4_ib_dev *dev, u8 port, * not configured (or failed to be configured) */ struct mlx4_next_alias_guid_work *next_rec, static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, int record_index) struct mlx4_next_alias_guid_work *rec) { { int i; int lowset_time_entry = -1; int lowest_time = 0; ib_sa_comp_mask delete_guid_indexes = 0; ib_sa_comp_mask set_guid_indexes = 0; struct mlx4_sriov_alias_guid_info_rec_det *rec = &dev->sriov.alias_guid.ports_guid[port]. all_rec_per_port[record_index]; for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { if (!(rec->guid_indexes & mlx4_ib_get_aguid_comp_mask_from_ix(i))) continue; if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) delete_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); else set_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= lowest_time) { lowset_time_entry = i; lowest_time = rec->guids_retry_schedule[i]; } } memcpy(&next_rec->rec_det, rec, sizeof(*rec)); next_rec->port = port; next_rec->block_num = record_index; if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { next_rec->rec_det.guid_indexes = delete_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; } else { next_rec->rec_det.guid_indexes = set_guid_indexes; next_rec->method = MLX4_GUID_INFO_RECORD_SET; } } /* return index of record that should be updated based on lowest * rescheduled time */ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, int *resched_delay_sec) { int record_index = -1; u64 low_record_time = 0; struct mlx4_sriov_alias_guid_info_rec_det rec; int j; int j; unsigned long flags; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); rec = dev->sriov.alias_guid.ports_guid[port]. if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == all_rec_per_port[j]; MLX4_GUID_INFO_STATUS_IDLE) { if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && memcpy(&rec->rec_det, rec.guid_indexes) { &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], if (record_index == -1 || sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); rec.time_to_run < low_record_time) { rec->port = port; record_index = j; rec->block_num = j; low_record_time = rec.time_to_run; dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = MLX4_GUID_INFO_STATUS_PENDING; spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); return 0; } } spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); } } return -ENOENT; } } if (resched_delay_sec) { u64 curr_time = ktime_get_real_ns(); static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, *resched_delay_sec = (low_record_time < curr_time) ? 0 : int rec_index, div_u64((low_record_time - curr_time), NSEC_PER_SEC); struct mlx4_sriov_alias_guid_info_rec_det *rec_det) } { dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = return record_index; rec_det->guid_indexes; memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = rec_det->status; } } static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) /* The function returns the next record that was * not configured (or failed to be configured) */ static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *rec) { { int j; unsigned long flags; struct mlx4_sriov_alias_guid_info_rec_det rec_det ; int record_index; int ret = 0; for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); record_index = get_low_record_time_index(dev, port, NULL); rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | if (record_index < 0) { IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | ret = -ENOENT; IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | goto out; IB_SA_GUIDINFO_REC_GID7; rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; set_administratively_guid_record(dev, port, j, &rec_det); } } set_required_record(dev, port, rec, record_index); out: spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); return ret; } } static void alias_guid_work(struct work_struct *work) static void alias_guid_work(struct work_struct *work) Loading Loading @@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work) goto out; goto out; } } set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, set_guid_rec(&dev->ib_dev, rec); &rec->rec_det); out: out: kfree(rec); kfree(rec); } } Loading @@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { if (!dev->sriov.is_going_down) { /* If there is pending one should cancell then run, otherwise * won't run till previous one is ended as same work * struct is used. */ cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } } Loading Loading @@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { { char alias_wq_name[15]; char alias_wq_name[15]; int ret = 0; int ret = 0; int i, j, k; int i, j; union ib_gid gid; union ib_gid gid; if (!mlx4_is_master(dev->dev)) if (!mlx4_is_master(dev->dev)) Loading @@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) for (i = 0 ; i < dev->num_ports; i++) { for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); /*Check if the SM doesn't need to assign the GUIDs*/ dev->sriov.alias_guid.ports_guid[i].state_flags |= GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { if (mlx4_ib_sm_guid_assign) { /* mark each val as it was deleted */ dev->sriov.alias_guid.ports_guid[i]. memset(dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j]. all_rec_per_port[j].all_recs, 0xFF, ownership = MLX4_GUID_DRIVER_ASSIGN; sizeof(dev->sriov.alias_guid.ports_guid[i]. continue; all_rec_per_port[j].all_recs)); } dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. ownership = MLX4_GUID_NONE_ASSIGN; /*mark each val as it was deleted, till the sysAdmin will give it valid val*/ for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } } } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ /*prepare the records, set them to be allocated by sm*/ if (mlx4_ib_sm_guid_assign) for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; dev->sriov.alias_guid.ports_guid[i].port = i; if (mlx4_ib_sm_guid_assign) set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = dev->sriov.alias_guid.ports_guid[i].wq = Loading
drivers/infiniband/hw/mlx4/mad.c +9 −0 Original line number Original line Diff line number Diff line Loading @@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->ring[i].addr, tun_qp->ring[i].addr, rx_buf_size, rx_buf_size, DMA_FROM_DEVICE); DMA_FROM_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { kfree(tun_qp->ring[i].addr); goto err; } } } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { Loading @@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->tx_ring[i].buf.addr, tun_qp->tx_ring[i].buf.addr, tx_buf_size, tx_buf_size, DMA_TO_DEVICE); DMA_TO_DEVICE); if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->tx_ring[i].buf.map)) { kfree(tun_qp->tx_ring[i].buf.addr); goto tx_err; } tun_qp->tx_ring[i].ah = NULL; tun_qp->tx_ring[i].ah = NULL; } } spin_lock_init(&tun_qp->tx_lock); spin_lock_init(&tun_qp->tx_lock); Loading