Loading drivers/infiniband/core/uverbs_marshall.c +4 −0 Original line number Diff line number Diff line Loading @@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst, dst->packet_life_time = src->packet_life_time; dst->preference = src->preference; dst->packet_life_time_selector = src->packet_life_time_selector; memset(dst->smac, 0, sizeof(dst->smac)); memset(dst->dmac, 0, sizeof(dst->dmac)); dst->vlan_id = 0xffff; } EXPORT_SYMBOL(ib_copy_path_rec_from_user); drivers/infiniband/hw/ipath/ipath_user_pages.c +3 −3 Original line number Diff line number Diff line Loading @@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, /* call with current->mm->mmap_sem held */ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, struct page **p, struct vm_area_struct **vma) struct page **p) { unsigned long lock_limit; size_t got; Loading @@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, ret = get_user_pages(current, current->mm, start_page + got * PAGE_SIZE, num_pages - got, 1, 1, p + got, vma); p + got, NULL); if (ret < 0) goto bail_release; } Loading Loading @@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); ret = __ipath_get_user_pages(start_page, num_pages, p); up_write(¤t->mm->mmap_sem); Loading drivers/infiniband/hw/mlx4/main.c +115 −54 Original line number Diff line number Diff line Loading @@ -59,6 +59,7 @@ #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF #define MLX4_IB_CARD_REV_A0 0xA0 MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); Loading Loading @@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) return dmfs; } static int num_ib_ports(struct mlx4_dev *dev) { int ib_ports = 0; int i; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_ports++; return ib_ports; } static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { Loading @@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; int have_ib_ports; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); Loading @@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, memset(props, 0, sizeof *props); have_ib_ports = num_ib_ports(dev->dev); props->fw_ver = dev->dev->caps.fw_ver; props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | Loading @@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) if (dev->dev->caps.max_gso_sz && (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) props->device_cap_flags |= IB_DEVICE_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; Loading Loading @@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) goto out_unlock; Loading @@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; Loading Loading @@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, if (!mqp->port) return 0; spin_lock(&mdev->iboe.lock); spin_lock_bh(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); spin_unlock_bh(&mdev->iboe.lock); if (ndev) { ret = 1; Loading Loading @@ -1262,11 +1279,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { spin_lock(&mdev->iboe.lock); spin_lock_bh(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); spin_unlock_bh(&mdev->iboe.lock); if (ndev) dev_put(ndev); list_del(&ge->list); Loading Loading @@ -1387,6 +1404,9 @@ static void update_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; if (!gw->dev->ib_active) return; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); Loading Loading @@ -1417,6 +1437,9 @@ static void reset_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; if (!gw->dev->ib_active) return; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("reset gid table failed\n"); Loading Loading @@ -1551,7 +1574,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, return 0; iboe = &ibdev->iboe; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) if ((netif_is_bond_master(real_dev) && Loading @@ -1561,7 +1584,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, update_gid_table(ibdev, port, gid, event == NETDEV_DOWN, 0); spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); return 0; } Loading Loading @@ -1634,13 +1657,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, new_smac = mlx4_mac_to_u64(dev->dev_addr); read_unlock(&dev_base_lock); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); /* no need for update QP1 and mac registration in non-SRIOV */ if (!mlx4_is_mfunc(ibdev->dev)) return; mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); qp = ibdev->qp1_proxy[port - 1]; if (qp) { int new_smac_index; u64 old_smac = qp->pri.smac; u64 old_smac; struct mlx4_update_qp_params update_params; mutex_lock(&qp->mutex); old_smac = qp->pri.smac; if (new_smac == old_smac) goto unlock; Loading @@ -1655,17 +1686,20 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, release_mac = new_smac; goto unlock; } /* if old port was zero, no mac was yet registered for this QP */ if (qp->pri.smac_port) release_mac = old_smac; qp->pri.smac = new_smac; qp->pri.smac_port = port; qp->pri.smac_index = new_smac_index; release_mac = old_smac; } unlock: mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); if (release_mac != MLX4_IB_INVALID_MAC) mlx4_unregister_mac(ibdev->dev, port, release_mac); if (qp) mutex_unlock(&qp->mutex); mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } static void mlx4_ib_get_dev_addr(struct net_device *dev, Loading @@ -1676,6 +1710,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, struct inet6_dev *in6_dev; union ib_gid *pgid; struct inet6_ifaddr *ifp; union ib_gid default_gid; #endif union ib_gid gid; Loading @@ -1696,12 +1731,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, in_dev_put(in_dev); } #if IS_ENABLED(CONFIG_IPV6) mlx4_make_default_gid(dev, &default_gid); /* IPv6 gids */ in6_dev = in6_dev_get(dev); if (in6_dev) { read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { pgid = (union ib_gid *)&ifp->addr; if (!memcmp(pgid, &default_gid, sizeof(*pgid))) continue; update_gid_table(ibdev, port, pgid, 0, 0); } read_unlock_bh(&in6_dev->lock); Loading @@ -1723,24 +1761,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) struct net_device *dev; struct mlx4_ib_iboe *iboe = &ibdev->iboe; int i; int err = 0; for (i = 1; i <= ibdev->num_ports; ++i) if (reset_gid_table(ibdev, i)) return -1; for (i = 1; i <= ibdev->num_ports; ++i) { if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == IB_LINK_LAYER_ETHERNET) { err = reset_gid_table(ibdev, i); if (err) goto out; } } read_lock(&dev_base_lock); spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); for_each_netdev(&init_net, dev) { u8 port = mlx4_ib_get_dev_port(dev, ibdev); if (port) /* port will be non-zero only for ETH ports */ if (port) { mlx4_ib_set_default_gid(ibdev, dev, port); mlx4_ib_get_dev_addr(dev, ibdev, port); } } spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); read_unlock(&dev_base_lock); return 0; out: return err; } static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, Loading @@ -1754,7 +1801,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, iboe = &ibdev->iboe; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { enum ib_port_state port_state = IB_PORT_NOP; struct net_device *old_master = iboe->masters[port - 1]; Loading Loading @@ -1786,35 +1833,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; mlx4_ib_set_default_gid(ibdev, curr_netdev, port); } else { reset_gid_table(ibdev, port); } /* if using bonding/team and a slave port is down, we don't the bond IP * based gids in the table since flows that select port by gid may get * the down port. if (curr_master) { /* if using bonding/team and a slave port is down, we * don't want the bond IP based gids in the table since * flows that select port by gid may get the down port. */ if (curr_master && (port_state == IB_PORT_DOWN)) { if (port_state == IB_PORT_DOWN) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); } else { /* gids from the upper dev (bond/team) * should appear in port's gid table */ mlx4_ib_get_dev_addr(curr_master, ibdev, port); } /* if bonding is used it is possible that we add it to masters * only after IP address is assigned to the net bonding * interface. } /* if bonding is used it is possible that we add it to * masters only after IP address is assigned to the * net bonding interface. */ if (curr_master && (old_master != curr_master)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_get_dev_addr(curr_master, ibdev, port); } if (!curr_master && (old_master != curr_master)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); } } else { reset_gid_table(ibdev, port); } } spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); if (update_qps_port > 0) mlx4_ib_update_qps(ibdev, dev, update_qps_port); Loading Loading @@ -2156,6 +2215,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_steer_free_bitmap; } for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_steer_free_bitmap; Loading Loading @@ -2192,12 +2254,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } } #endif for (i = 1 ; i <= ibdev->num_ports ; ++i) reset_gid_table(ibdev, i); rtnl_lock(); mlx4_ib_scan_netdevs(ibdev, NULL, 0); rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); if (mlx4_ib_init_gid_table(ibdev)) goto err_notif; } for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { Loading Loading @@ -2345,6 +2403,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; ibdev->ib_active = false; flush_workqueue(wq); mlx4_ib_close_sriov(ibdev); mlx4_ib_mad_cleanup(ibdev); ib_unregister_device(&ibdev->ib_dev); Loading drivers/infiniband/hw/mlx4/mlx4_ib.h +1 −0 Original line number Diff line number Diff line Loading @@ -451,6 +451,7 @@ struct mlx4_ib_iboe { spinlock_t lock; struct net_device *netdevs[MLX4_MAX_PORTS]; struct net_device *masters[MLX4_MAX_PORTS]; atomic64_t mac[MLX4_MAX_PORTS]; struct notifier_block nb; struct notifier_block nb_inet; struct notifier_block nb_inet6; Loading drivers/infiniband/hw/mlx4/mr.c +5 −2 Original line number Diff line number Diff line Loading @@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, 0); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ mmr->umem = NULL; goto release_mpt_entry; } n = ib_umem_page_count(mmr->umem); shift = ilog2(mmr->umem->page_size); mmr->mmr.iova = virt_addr; mmr->mmr.size = length; err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, virt_addr, length, n, shift, *pmpt_entry); Loading @@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, ib_umem_release(mmr->umem); goto release_mpt_entry; } mmr->mmr.iova = virt_addr; mmr->mmr.size = length; err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); if (err) { Loading @@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, * return a failure. But dereg_mr will free the resources. */ err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); if (!err && flags & IB_MR_REREG_ACCESS) mmr->mmr.access = mr_access_flags; release_mpt_entry: mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); Loading Loading
drivers/infiniband/core/uverbs_marshall.c +4 −0 Original line number Diff line number Diff line Loading @@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst, dst->packet_life_time = src->packet_life_time; dst->preference = src->preference; dst->packet_life_time_selector = src->packet_life_time_selector; memset(dst->smac, 0, sizeof(dst->smac)); memset(dst->dmac, 0, sizeof(dst->dmac)); dst->vlan_id = 0xffff; } EXPORT_SYMBOL(ib_copy_path_rec_from_user);
drivers/infiniband/hw/ipath/ipath_user_pages.c +3 −3 Original line number Diff line number Diff line Loading @@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, /* call with current->mm->mmap_sem held */ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, struct page **p, struct vm_area_struct **vma) struct page **p) { unsigned long lock_limit; size_t got; Loading @@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, ret = get_user_pages(current, current->mm, start_page + got * PAGE_SIZE, num_pages - got, 1, 1, p + got, vma); p + got, NULL); if (ret < 0) goto bail_release; } Loading Loading @@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); ret = __ipath_get_user_pages(start_page, num_pages, p); up_write(¤t->mm->mmap_sem); Loading
drivers/infiniband/hw/mlx4/main.c +115 −54 Original line number Diff line number Diff line Loading @@ -59,6 +59,7 @@ #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF #define MLX4_IB_CARD_REV_A0 0xA0 MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); Loading Loading @@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) return dmfs; } static int num_ib_ports(struct mlx4_dev *dev) { int ib_ports = 0; int i; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_ports++; return ib_ports; } static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { Loading @@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; int have_ib_ports; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); Loading @@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, memset(props, 0, sizeof *props); have_ib_ports = num_ib_ports(dev->dev); props->fw_ver = dev->dev->caps.fw_ver; props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | Loading @@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) if (dev->dev->caps.max_gso_sz && (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) props->device_cap_flags |= IB_DEVICE_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; Loading Loading @@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) goto out_unlock; Loading @@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; Loading Loading @@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, if (!mqp->port) return 0; spin_lock(&mdev->iboe.lock); spin_lock_bh(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); spin_unlock_bh(&mdev->iboe.lock); if (ndev) { ret = 1; Loading Loading @@ -1262,11 +1279,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { spin_lock(&mdev->iboe.lock); spin_lock_bh(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); spin_unlock_bh(&mdev->iboe.lock); if (ndev) dev_put(ndev); list_del(&ge->list); Loading Loading @@ -1387,6 +1404,9 @@ static void update_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; if (!gw->dev->ib_active) return; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); Loading Loading @@ -1417,6 +1437,9 @@ static void reset_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; if (!gw->dev->ib_active) return; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("reset gid table failed\n"); Loading Loading @@ -1551,7 +1574,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, return 0; iboe = &ibdev->iboe; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) if ((netif_is_bond_master(real_dev) && Loading @@ -1561,7 +1584,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, update_gid_table(ibdev, port, gid, event == NETDEV_DOWN, 0); spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); return 0; } Loading Loading @@ -1634,13 +1657,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, new_smac = mlx4_mac_to_u64(dev->dev_addr); read_unlock(&dev_base_lock); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); /* no need for update QP1 and mac registration in non-SRIOV */ if (!mlx4_is_mfunc(ibdev->dev)) return; mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); qp = ibdev->qp1_proxy[port - 1]; if (qp) { int new_smac_index; u64 old_smac = qp->pri.smac; u64 old_smac; struct mlx4_update_qp_params update_params; mutex_lock(&qp->mutex); old_smac = qp->pri.smac; if (new_smac == old_smac) goto unlock; Loading @@ -1655,17 +1686,20 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, release_mac = new_smac; goto unlock; } /* if old port was zero, no mac was yet registered for this QP */ if (qp->pri.smac_port) release_mac = old_smac; qp->pri.smac = new_smac; qp->pri.smac_port = port; qp->pri.smac_index = new_smac_index; release_mac = old_smac; } unlock: mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); if (release_mac != MLX4_IB_INVALID_MAC) mlx4_unregister_mac(ibdev->dev, port, release_mac); if (qp) mutex_unlock(&qp->mutex); mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } static void mlx4_ib_get_dev_addr(struct net_device *dev, Loading @@ -1676,6 +1710,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, struct inet6_dev *in6_dev; union ib_gid *pgid; struct inet6_ifaddr *ifp; union ib_gid default_gid; #endif union ib_gid gid; Loading @@ -1696,12 +1731,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, in_dev_put(in_dev); } #if IS_ENABLED(CONFIG_IPV6) mlx4_make_default_gid(dev, &default_gid); /* IPv6 gids */ in6_dev = in6_dev_get(dev); if (in6_dev) { read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { pgid = (union ib_gid *)&ifp->addr; if (!memcmp(pgid, &default_gid, sizeof(*pgid))) continue; update_gid_table(ibdev, port, pgid, 0, 0); } read_unlock_bh(&in6_dev->lock); Loading @@ -1723,24 +1761,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) struct net_device *dev; struct mlx4_ib_iboe *iboe = &ibdev->iboe; int i; int err = 0; for (i = 1; i <= ibdev->num_ports; ++i) if (reset_gid_table(ibdev, i)) return -1; for (i = 1; i <= ibdev->num_ports; ++i) { if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == IB_LINK_LAYER_ETHERNET) { err = reset_gid_table(ibdev, i); if (err) goto out; } } read_lock(&dev_base_lock); spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); for_each_netdev(&init_net, dev) { u8 port = mlx4_ib_get_dev_port(dev, ibdev); if (port) /* port will be non-zero only for ETH ports */ if (port) { mlx4_ib_set_default_gid(ibdev, dev, port); mlx4_ib_get_dev_addr(dev, ibdev, port); } } spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); read_unlock(&dev_base_lock); return 0; out: return err; } static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, Loading @@ -1754,7 +1801,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, iboe = &ibdev->iboe; spin_lock(&iboe->lock); spin_lock_bh(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { enum ib_port_state port_state = IB_PORT_NOP; struct net_device *old_master = iboe->masters[port - 1]; Loading Loading @@ -1786,35 +1833,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; mlx4_ib_set_default_gid(ibdev, curr_netdev, port); } else { reset_gid_table(ibdev, port); } /* if using bonding/team and a slave port is down, we don't the bond IP * based gids in the table since flows that select port by gid may get * the down port. if (curr_master) { /* if using bonding/team and a slave port is down, we * don't want the bond IP based gids in the table since * flows that select port by gid may get the down port. */ if (curr_master && (port_state == IB_PORT_DOWN)) { if (port_state == IB_PORT_DOWN) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); } else { /* gids from the upper dev (bond/team) * should appear in port's gid table */ mlx4_ib_get_dev_addr(curr_master, ibdev, port); } /* if bonding is used it is possible that we add it to masters * only after IP address is assigned to the net bonding * interface. } /* if bonding is used it is possible that we add it to * masters only after IP address is assigned to the * net bonding interface. */ if (curr_master && (old_master != curr_master)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_get_dev_addr(curr_master, ibdev, port); } if (!curr_master && (old_master != curr_master)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, port); mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); } } else { reset_gid_table(ibdev, port); } } spin_unlock(&iboe->lock); spin_unlock_bh(&iboe->lock); if (update_qps_port > 0) mlx4_ib_update_qps(ibdev, dev, update_qps_port); Loading Loading @@ -2156,6 +2215,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_steer_free_bitmap; } for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_steer_free_bitmap; Loading Loading @@ -2192,12 +2254,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } } #endif for (i = 1 ; i <= ibdev->num_ports ; ++i) reset_gid_table(ibdev, i); rtnl_lock(); mlx4_ib_scan_netdevs(ibdev, NULL, 0); rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); if (mlx4_ib_init_gid_table(ibdev)) goto err_notif; } for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { Loading Loading @@ -2345,6 +2403,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; ibdev->ib_active = false; flush_workqueue(wq); mlx4_ib_close_sriov(ibdev); mlx4_ib_mad_cleanup(ibdev); ib_unregister_device(&ibdev->ib_dev); Loading
drivers/infiniband/hw/mlx4/mlx4_ib.h +1 −0 Original line number Diff line number Diff line Loading @@ -451,6 +451,7 @@ struct mlx4_ib_iboe { spinlock_t lock; struct net_device *netdevs[MLX4_MAX_PORTS]; struct net_device *masters[MLX4_MAX_PORTS]; atomic64_t mac[MLX4_MAX_PORTS]; struct notifier_block nb; struct notifier_block nb_inet; struct notifier_block nb_inet6; Loading
drivers/infiniband/hw/mlx4/mr.c +5 −2 Original line number Diff line number Diff line Loading @@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, 0); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ mmr->umem = NULL; goto release_mpt_entry; } n = ib_umem_page_count(mmr->umem); shift = ilog2(mmr->umem->page_size); mmr->mmr.iova = virt_addr; mmr->mmr.size = length; err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, virt_addr, length, n, shift, *pmpt_entry); Loading @@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, ib_umem_release(mmr->umem); goto release_mpt_entry; } mmr->mmr.iova = virt_addr; mmr->mmr.size = length; err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); if (err) { Loading @@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, * return a failure. But dereg_mr will free the resources. */ err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); if (!err && flags & IB_MR_REREG_ACCESS) mmr->mmr.access = mr_access_flags; release_mpt_entry: mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); Loading