Loading drivers/infiniband/core/Makefile +2 −1 Original line number Diff line number Diff line Loading @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ multicast.o mad.o smi.o agent.o mad_rmpp.o multicast.o mad.o smi.o agent.o mad_rmpp.o \ security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o Loading drivers/infiniband/core/cache.c +40 −3 Original line number Diff line number Diff line Loading @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; bool enforce_security; }; union ib_gid zgid; Loading Loading @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); int ib_get_cached_subnet_prefix(struct ib_device *device, u8 port_num, u64 *sn_pfx) { unsigned long flags; int p; if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; p = port_num - rdma_start_port(device); read_lock_irqsave(&device->cache.lock, flags); *sn_pfx = device->cache.ports[p].subnet_prefix; read_unlock_irqrestore(&device->cache.lock, flags); return 0; } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, Loading Loading @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, u8 port) u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; Loading Loading @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; device->cache.ports[port - rdma_start_port(device)].subnet_prefix = tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); if (enforce_security) ib_security_cache_change(device, port, tprops->subnet_prefix); kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); Loading @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); ib_cache_update(work->device, work->port_num); ib_cache_update(work->device, work->port_num, work->enforce_security); kfree(work); } Loading @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; if (event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_GID_CHANGE) work->enforce_security = true; else work->enforce_security = false; queue_work(ib_wq, &work->work); } } Loading @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) ib_cache_update(device, p + rdma_start_port(device)); ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); Loading drivers/infiniband/core/core_priv.h +115 −0 Original line number Diff line number Diff line Loading @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> #include <rdma/ib_mad.h> #include "mad_priv.h" struct pkey_index_qp_list { struct list_head pkey_index_list; u16 pkey_index; /* Lock to hold while iterating the qp_list. */ spinlock_t qp_list_lock; struct list_head qp_list; }; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); Loading Loading @@ -176,4 +186,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); int ib_get_cached_subnet_prefix(struct ib_device *device, u8 port_num, u64 *sn_pfx); #ifdef CONFIG_SECURITY_INFINIBAND int ib_security_pkey_access(struct ib_device *dev, u8 port_num, u16 pkey_index, void *sec); void ib_security_destroy_port_pkey_list(struct ib_device *device); void ib_security_cache_change(struct ib_device *device, u8 port_num, u64 subnet_prefix); int ib_security_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata); int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); void ib_destroy_qp_security_begin(struct ib_qp_security *sec); void ib_destroy_qp_security_abort(struct ib_qp_security *sec); void ib_destroy_qp_security_end(struct ib_qp_security *sec); int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); void ib_close_shared_qp_security(struct ib_qp_security *sec); int ib_mad_agent_security_setup(struct ib_mad_agent *agent, enum ib_qp_type qp_type); void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); #else static inline int ib_security_pkey_access(struct ib_device *dev, u8 port_num, u16 pkey_index, void *sec) { return 0; } static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) { } static inline void ib_security_cache_change(struct ib_device *device, u8 port_num, u64 subnet_prefix) { } static inline int ib_security_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata) { return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, udata); } static inline int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { return 0; } static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { } static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) { } static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) { } static inline int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) { return 0; } static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) { } static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, enum ib_qp_type qp_type) { return 0; } static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { } static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { return 0; } #endif #endif /* _CORE_PRIV_H */ drivers/infiniband/core/device.c +86 −0 Original line number Diff line number Diff line Loading @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/security.h> #include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> Loading Loading @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data); static void ib_policy_change_task(struct work_struct *work); static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); static struct notifier_block ibdev_lsm_nb = { .notifier_call = ib_security_change, }; static int ib_device_check_mandatory(struct ib_device *device) { Loading Loading @@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); static int setup_port_pkey_list(struct ib_device *device) { int i; /** * device->port_pkey_list is indexed directly by the port number, * Therefore it is declared as a 1 based array with potential empty * slots at the beginning. */ device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, sizeof(*device->port_pkey_list), GFP_KERNEL); if (!device->port_pkey_list) return -ENOMEM; for (i = 0; i < (rdma_end_port(device) + 1); i++) { spin_lock_init(&device->port_pkey_list[i].list_lock); INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); } return 0; } static void ib_policy_change_task(struct work_struct *work) { struct ib_device *dev; down_read(&lists_rwsem); list_for_each_entry(dev, &device_list, core_list) { int i; for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { u64 sp; int ret = ib_get_cached_subnet_prefix(dev, i, &sp); WARN_ONCE(ret, "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", ret); ib_security_cache_change(dev, i, sp); } } up_read(&lists_rwsem); } static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data) { if (event != LSM_POLICY_CHANGE) return NOTIFY_DONE; schedule_work(&ib_policy_change_work); return NOTIFY_OK; } /** * ib_register_device - Register an IB device with IB core * @device:Device to register Loading Loading @@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device, goto out; } ret = setup_port_pkey_list(device); if (ret) { pr_warn("Couldn't create per port_pkey_list\n"); goto out; } ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); Loading Loading @@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device) ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); ib_security_destroy_port_pkey_list(device); kfree(device->port_pkey_list); down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) Loading Loading @@ -1082,10 +1159,18 @@ static int __init ib_core_init(void) goto err_sa; } ret = register_lsm_notifier(&ibdev_lsm_nb); if (ret) { pr_warn("Couldn't register LSM notifier. ret %d\n", ret); goto err_ibnl_clients; } ib_cache_setup(); return 0; err_ibnl_clients: ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: Loading @@ -1105,6 +1190,7 @@ static int __init ib_core_init(void) static void __exit ib_core_cleanup(void) { unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); Loading drivers/infiniband/core/mad.c +44 −8 Original line number Diff line number Diff line Loading @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" #include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" Loading Loading @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); if (ret2) { ret = ERR_PTR(ret2); goto error4; } spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; Loading @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) goto error4; goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, Loading @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) goto error4; goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); goto error4; goto error5; } } Loading @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; error4: error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); error4: kfree(reg_req); error3: kfree(mad_agent_priv); Loading Loading @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || Loading Loading @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); if (err) { ret = ERR_PTR(err); goto error2; } mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); goto error2; goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; error3: ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: Loading Loading @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } Loading @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); kfree(mad_snoop_priv); } Loading Loading @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; ret = ib_mad_enforce_security(mad_agent_priv, mad_send_wr->send_wr.pkey_index); if (ret) goto error; if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { Loading Loading @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); Loading Loading @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, Loading Loading
drivers/infiniband/core/Makefile +2 −1 Original line number Diff line number Diff line Loading @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ multicast.o mad.o smi.o agent.o mad_rmpp.o multicast.o mad.o smi.o agent.o mad_rmpp.o \ security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o Loading
drivers/infiniband/core/cache.c +40 −3 Original line number Diff line number Diff line Loading @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; bool enforce_security; }; union ib_gid zgid; Loading Loading @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); int ib_get_cached_subnet_prefix(struct ib_device *device, u8 port_num, u64 *sn_pfx) { unsigned long flags; int p; if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; p = port_num - rdma_start_port(device); read_lock_irqsave(&device->cache.lock, flags); *sn_pfx = device->cache.ports[p].subnet_prefix; read_unlock_irqrestore(&device->cache.lock, flags); return 0; } EXPORT_SYMBOL(ib_get_cached_subnet_prefix); int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, Loading Loading @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, u8 port) u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; Loading Loading @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; device->cache.ports[port - rdma_start_port(device)].subnet_prefix = tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); if (enforce_security) ib_security_cache_change(device, port, tprops->subnet_prefix); kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); Loading @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); ib_cache_update(work->device, work->port_num); ib_cache_update(work->device, work->port_num, work->enforce_security); kfree(work); } Loading @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; if (event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_GID_CHANGE) work->enforce_security = true; else work->enforce_security = false; queue_work(ib_wq, &work->work); } } Loading @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) ib_cache_update(device, p + rdma_start_port(device)); ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); Loading
drivers/infiniband/core/core_priv.h +115 −0 Original line number Diff line number Diff line Loading @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> #include <rdma/ib_mad.h> #include "mad_priv.h" struct pkey_index_qp_list { struct list_head pkey_index_list; u16 pkey_index; /* Lock to hold while iterating the qp_list. */ spinlock_t qp_list_lock; struct list_head qp_list; }; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); Loading Loading @@ -176,4 +186,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); int ib_get_cached_subnet_prefix(struct ib_device *device, u8 port_num, u64 *sn_pfx); #ifdef CONFIG_SECURITY_INFINIBAND int ib_security_pkey_access(struct ib_device *dev, u8 port_num, u16 pkey_index, void *sec); void ib_security_destroy_port_pkey_list(struct ib_device *device); void ib_security_cache_change(struct ib_device *device, u8 port_num, u64 subnet_prefix); int ib_security_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata); int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); void ib_destroy_qp_security_begin(struct ib_qp_security *sec); void ib_destroy_qp_security_abort(struct ib_qp_security *sec); void ib_destroy_qp_security_end(struct ib_qp_security *sec); int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); void ib_close_shared_qp_security(struct ib_qp_security *sec); int ib_mad_agent_security_setup(struct ib_mad_agent *agent, enum ib_qp_type qp_type); void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); #else static inline int ib_security_pkey_access(struct ib_device *dev, u8 port_num, u16 pkey_index, void *sec) { return 0; } static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) { } static inline void ib_security_cache_change(struct ib_device *device, u8 port_num, u64 subnet_prefix) { } static inline int ib_security_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata) { return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, udata); } static inline int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { return 0; } static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { } static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) { } static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) { } static inline int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) { return 0; } static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) { } static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, enum ib_qp_type qp_type) { return 0; } static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { } static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { return 0; } #endif #endif /* _CORE_PRIV_H */
drivers/infiniband/core/device.c +86 −0 Original line number Diff line number Diff line Loading @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/security.h> #include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> Loading Loading @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data); static void ib_policy_change_task(struct work_struct *work); static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); static struct notifier_block ibdev_lsm_nb = { .notifier_call = ib_security_change, }; static int ib_device_check_mandatory(struct ib_device *device) { Loading Loading @@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); static int setup_port_pkey_list(struct ib_device *device) { int i; /** * device->port_pkey_list is indexed directly by the port number, * Therefore it is declared as a 1 based array with potential empty * slots at the beginning. */ device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, sizeof(*device->port_pkey_list), GFP_KERNEL); if (!device->port_pkey_list) return -ENOMEM; for (i = 0; i < (rdma_end_port(device) + 1); i++) { spin_lock_init(&device->port_pkey_list[i].list_lock); INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); } return 0; } static void ib_policy_change_task(struct work_struct *work) { struct ib_device *dev; down_read(&lists_rwsem); list_for_each_entry(dev, &device_list, core_list) { int i; for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { u64 sp; int ret = ib_get_cached_subnet_prefix(dev, i, &sp); WARN_ONCE(ret, "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", ret); ib_security_cache_change(dev, i, sp); } } up_read(&lists_rwsem); } static int ib_security_change(struct notifier_block *nb, unsigned long event, void *lsm_data) { if (event != LSM_POLICY_CHANGE) return NOTIFY_DONE; schedule_work(&ib_policy_change_work); return NOTIFY_OK; } /** * ib_register_device - Register an IB device with IB core * @device:Device to register Loading Loading @@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device, goto out; } ret = setup_port_pkey_list(device); if (ret) { pr_warn("Couldn't create per port_pkey_list\n"); goto out; } ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); Loading Loading @@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device) ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); ib_security_destroy_port_pkey_list(device); kfree(device->port_pkey_list); down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) Loading Loading @@ -1082,10 +1159,18 @@ static int __init ib_core_init(void) goto err_sa; } ret = register_lsm_notifier(&ibdev_lsm_nb); if (ret) { pr_warn("Couldn't register LSM notifier. ret %d\n", ret); goto err_ibnl_clients; } ib_cache_setup(); return 0; err_ibnl_clients: ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: Loading @@ -1105,6 +1190,7 @@ static int __init ib_core_init(void) static void __exit ib_core_cleanup(void) { unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); Loading
drivers/infiniband/core/mad.c +44 −8 Original line number Diff line number Diff line Loading @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" #include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" Loading Loading @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); if (ret2) { ret = ERR_PTR(ret2); goto error4; } spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; Loading @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) goto error4; goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, Loading @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) goto error4; goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); goto error4; goto error5; } } Loading @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; error4: error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); error4: kfree(reg_req); error3: kfree(mad_agent_priv); Loading Loading @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || Loading Loading @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); if (err) { ret = ERR_PTR(err); goto error2; } mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); goto error2; goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; error3: ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: Loading Loading @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } Loading @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); kfree(mad_snoop_priv); } Loading Loading @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; ret = ib_mad_enforce_security(mad_agent_priv, mad_send_wr->send_wr.pkey_index); if (ret) goto error; if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { Loading Loading @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); Loading Loading @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, Loading