diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 64e6e2fa1e5de6022abfe3550aae58bf618f80ae..aa3859d9e97818b62d6fc6e0834c6b5b7c5519fc 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -351,6 +351,7 @@ struct fastrpc_mmap { int uncached; int secure; uintptr_t attr; + bool is_filemap; /*flag to indicate map used in process init*/ }; enum fastrpc_perfkeys { @@ -406,6 +407,7 @@ struct fastrpc_file { struct mutex perf_mutex; struct pm_qos_request pm_qos_req; int qos_request; + struct mutex pm_qos_mutex; struct mutex map_mutex; struct mutex fl_map_mutex; int refcount; @@ -687,9 +689,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (map->raddr == va && + if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && - map->refs == 1) { + /*Remove map if not used in process initialization*/ + !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; @@ -701,9 +704,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - if (map->raddr == va && + if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && - map->refs == 1) { + /*Remove map if not used in process initialization*/ + !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; @@ -843,6 +847,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, map->fl = fl; map->fd = fd; map->attr = attr; + map->is_filemap = false; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING | @@ -2205,6 +2210,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl, mutex_lock(&fl->fl_map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); + if (file) + file->is_filemap = true; mutex_unlock(&fl->fl_map_mutex); if (err) goto bail; @@ -3060,6 +3067,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) mutex_destroy(&fl->perf_mutex); mutex_destroy(&fl->fl_map_mutex); mutex_destroy(&fl->map_mutex); + mutex_destroy(&fl->pm_qos_mutex); kfree(fl); return 0; } @@ -3561,6 +3569,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) hlist_add_head(&fl->hn, &me->drivers); spin_unlock(&me->hlock); mutex_init(&fl->perf_mutex); + mutex_init(&fl->pm_qos_mutex); return 0; } @@ -3657,12 +3666,14 @@ static int fastrpc_internal_control(struct fastrpc_file *fl, VERIFY(err, latency != 0); if (err) goto bail; + mutex_lock(&fl->pm_qos_mutex); if (!fl->qos_request) { pm_qos_add_request(&fl->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency); fl->qos_request = 1; } else pm_qos_update_request(&fl->pm_qos_req, latency); + mutex_unlock(&fl->pm_qos_mutex); break; case FASTRPC_CONTROL_SMMU: if (!me->legacy) diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 369869a29ebd4d2ead3fda4efcddeb1d745a6d5c..1654a1bc641413b391f3b206fe63d6c5ad52a33e 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -149,8 +149,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; - struct usb_device *loaded_dev; /* dev that's loaded into the chip */ - int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; @@ -488,39 +486,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, - int force_toggles) +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { - struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); - int old_epnum, same_ep, rcvtog, sndtog; - struct usb_device *old_dev; + int rcvtog, sndtog; u8 hctl; - old_dev = max3421_hcd->loaded_dev; - old_epnum = max3421_hcd->loaded_epnum; - - same_ep = (dev == old_dev && epnum == old_epnum); - if (same_ep && !force_toggles) - return; - - if (old_dev && !same_ep) { - /* save the old end-points toggles: */ - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); - - rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; - sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; - - /* no locking: HCD (i.e., we) own toggles, don't we? */ - usb_settoggle(old_dev, old_epnum, 0, rcvtog); - usb_settoggle(old_dev, old_epnum, 1, sndtog); - } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); - max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* @@ -528,7 +504,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ - max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } @@ -663,7 +638,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; - int epnum, force_toggles = 0; + int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; @@ -773,7 +748,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; - force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } @@ -781,7 +755,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; - max3421_set_address(hcd, urb->dev, epnum, force_toggles); + max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; @@ -1376,6 +1350,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + int epnum = usb_endpoint_num(&urb->ep->desc); + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(urb->dev, epnum, 0, rcvtog); + usb_settoggle(urb->dev, epnum, 1, sndtog); + max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4bcbab679afbab8d5ce2edcaf2fc959242ea6e8a..0824914e80a72044de0818b44b2ca3cf6f294d3f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1333,6 +1333,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1355,22 +1371,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); @@ -1396,6 +1396,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1403,9 +1405,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, rb_erase(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist diff --git a/fs/seq_file.c b/fs/seq_file.c index 368bfb92b115c0e99ce4c654f6fdecb6ec5a2763..3ade39e02bb731492737ec432921983b8140e80a 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -28,6 +28,9 @@ static void *seq_buf_alloc(unsigned long size) void *buf; gfp_t gfp = GFP_KERNEL; + if (unlikely(size > MAX_RW_COUNT)) + return NULL; + /* * For high order allocations, use __GFP_NORETRY to avoid oom-killing - * it's better to fall back to vmalloc() than to kill things. For small diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 69111fa2e5780ad12407864d40730d59d22e9ddd..ee62bca5e0afb59a517e6dce20436e69248f8a0d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -139,6 +139,7 @@ struct xt_match { const char *table; unsigned int matchsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -179,6 +180,7 @@ struct xt_target { const char *table; unsigned int targetsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -261,6 +263,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u); +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u); +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size); + void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 00329e03e57086d354823a9f82fae9c86263cfd2..fac621f1b29c2f7bdc0f909ef2d9af0803d27267 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1755,7 +1755,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, spinlock_t *ptl; struct mm_struct *mm = vma->vm_mm; unsigned long haddr = address & HPAGE_PMD_MASK; - bool was_locked = false; + bool do_unlock_page = false; pmd_t _pmd; mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); @@ -1768,7 +1768,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, VM_BUG_ON(freeze && !page); if (page) { VM_WARN_ON_ONCE(!PageLocked(page)); - was_locked = true; if (page != pmd_page(*pmd)) goto out; } @@ -1777,19 +1776,29 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (pmd_trans_huge(*pmd)) { if (!page) { page = pmd_page(*pmd); - if (unlikely(!trylock_page(page))) { - get_page(page); - _pmd = *pmd; - spin_unlock(ptl); - lock_page(page); - spin_lock(ptl); - if (unlikely(!pmd_same(*pmd, _pmd))) { - unlock_page(page); + /* + * An anonymous page must be locked, to ensure that a + * concurrent reuse_swap_page() sees stable mapcount; + * but reuse_swap_page() is not used on shmem or file, + * and page lock must not be taken when zap_pmd_range() + * calls __split_huge_pmd() while i_mmap_lock is held. + */ + if (PageAnon(page)) { + if (unlikely(!trylock_page(page))) { + get_page(page); + _pmd = *pmd; + spin_unlock(ptl); + lock_page(page); + spin_lock(ptl); + if (unlikely(!pmd_same(*pmd, _pmd))) { + unlock_page(page); + put_page(page); + page = NULL; + goto repeat; + } put_page(page); - page = NULL; - goto repeat; } - put_page(page); + do_unlock_page = true; } } if (PageMlocked(page)) @@ -1799,7 +1808,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, __split_huge_pmd_locked(vma, pmd, haddr, freeze); out: spin_unlock(ptl); - if (!was_locked && page) + if (do_unlock_page) unlock_page(page); mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9609ad71dd260a1162f6dd037010195c4f22f321..fe1801d9f05982eb28ccde2e1d228ef7845a355a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -353,6 +353,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index e065140d0c93b23ffa5e37e4ac06ec7524f6e3a2..1db69888cc14982516fb0cda5b4987ef0222c48f 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -267,6 +267,60 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) } EXPORT_SYMBOL_GPL(xt_request_find_target); + +static int xt_obj_to_user(u16 __user *psize, u16 size, + void __user *pname, const char *name, + u8 __user *prev, u8 rev) +{ + if (put_user(size, psize)) + return -EFAULT; + if (copy_to_user(pname, name, strlen(name) + 1)) + return -EFAULT; + if (put_user(rev, prev)) + return -EFAULT; + + return 0; +} + +#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ + xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ + U->u.user.name, K->u.kernel.TYPE->name, \ + &U->u.user.revision, K->u.kernel.TYPE->revision) + +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size) +{ + usersize = usersize ? : size; + if (copy_to_user(dst, src, usersize)) + return -EFAULT; + if (usersize != size && clear_user(dst + usersize, size - usersize)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(xt_data_to_user); + +#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ + xt_data_to_user(U->data, K->data, \ + K->u.kernel.TYPE->usersize, \ + C_SIZE ? : K->u.kernel.TYPE->TYPE##size) + +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u) +{ + return XT_OBJ_TO_USER(u, m, match, 0) || + XT_DATA_TO_USER(u, m, match, 0); +} +EXPORT_SYMBOL_GPL(xt_match_to_user); + +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u) +{ + return XT_OBJ_TO_USER(u, t, target, 0) || + XT_DATA_TO_USER(u, t, target, 0); +} +EXPORT_SYMBOL_GPL(xt_target_to_user); + static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_match *m; diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index 8c56ff2aa2fa5f71f3230f7b258bc22a673f532e..7f9d9981a9e2834ddd07608eb7994a5c1a78d0e2 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -135,6 +135,8 @@ static ssize_t quota_proc_write(struct file *file, const char __user *input, if (copy_from_user(buf, input, size) != 0) return -EFAULT; buf[sizeof(buf)-1] = '\0'; + if (size < sizeof(buf)) + buf[size] = '\0'; spin_lock_bh(&e->lock); e->quota = simple_strtoull(buf, NULL, 0); @@ -321,6 +323,7 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { .match = quota_mt2, .destroy = quota_mt2_destroy, .matchsize = sizeof(struct xt_quota_mtinfo2), + .usersize = offsetof(struct xt_quota_mtinfo2, master), .me = THIS_MODULE, }, { @@ -331,6 +334,7 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { .match = quota_mt2, .destroy = quota_mt2_destroy, .matchsize = sizeof(struct xt_quota_mtinfo2), + .usersize = offsetof(struct xt_quota_mtinfo2, master), .me = THIS_MODULE, }, }; diff --git a/net/socket.c b/net/socket.c index be7831fd7d464ea1ac94f2b8ee590776ea2efed7..48ee54f2998379e3ecfa034b13c844d8b0f1d37b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1426,9 +1426,10 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) (struct sockaddr *) &address, addrlen); } - fput_light(sock->file, fput_needed); if (!err) sockev_notify(SOCKEV_BIND, sock); + + fput_light(sock->file, fput_needed); } return err; } @@ -1455,9 +1456,10 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) if (!err) err = sock->ops->listen(sock, backlog); - fput_light(sock->file, fput_needed); if (!err) sockev_notify(SOCKEV_LISTEN, sock); + + fput_light(sock->file, fput_needed); } return err; }