Loading drivers/char/adsprpc.c +15 −4 Original line number Diff line number Diff line Loading @@ -351,6 +351,7 @@ struct fastrpc_mmap { int uncached; int secure; uintptr_t attr; bool is_filemap; /*flag to indicate map used in process init*/ }; enum fastrpc_perfkeys { Loading Loading @@ -406,6 +407,7 @@ struct fastrpc_file { struct mutex perf_mutex; struct pm_qos_request pm_qos_req; int qos_request; struct mutex pm_qos_mutex; struct mutex map_mutex; struct mutex fl_map_mutex; int refcount; Loading Loading @@ -687,9 +689,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { if (map->raddr == va && if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && map->refs == 1) { /*Remove map if not used in process initialization*/ !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; Loading @@ -701,9 +704,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (map->raddr == va && if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && map->refs == 1) { /*Remove map if not used in process initialization*/ !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; Loading Loading @@ -843,6 +847,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, map->fl = fl; map->fd = fd; map->attr = attr; map->is_filemap = false; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING | Loading Loading @@ -2205,6 +2210,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl, mutex_lock(&fl->fl_map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); if (file) file->is_filemap = true; mutex_unlock(&fl->fl_map_mutex); if (err) goto bail; Loading Loading @@ -3060,6 +3067,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) mutex_destroy(&fl->perf_mutex); mutex_destroy(&fl->fl_map_mutex); mutex_destroy(&fl->map_mutex); mutex_destroy(&fl->pm_qos_mutex); kfree(fl); return 0; } Loading Loading @@ -3561,6 +3569,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) hlist_add_head(&fl->hn, &me->drivers); spin_unlock(&me->hlock); mutex_init(&fl->perf_mutex); mutex_init(&fl->pm_qos_mutex); return 0; } Loading Loading @@ -3657,12 +3666,14 @@ static int fastrpc_internal_control(struct fastrpc_file *fl, VERIFY(err, latency != 0); if (err) goto bail; mutex_lock(&fl->pm_qos_mutex); if (!fl->qos_request) { pm_qos_add_request(&fl->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency); fl->qos_request = 1; } else pm_qos_update_request(&fl->pm_qos_req, latency); mutex_unlock(&fl->pm_qos_mutex); break; case FASTRPC_CONTROL_SMMU: if (!me->legacy) Loading drivers/usb/host/max3421-hcd.c +14 −30 Original line number Diff line number Diff line Loading @@ -149,8 +149,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; struct usb_device *loaded_dev; /* dev that's loaded into the chip */ int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; Loading Loading @@ -488,39 +486,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, int force_toggles) max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); int old_epnum, same_ep, rcvtog, sndtog; struct usb_device *old_dev; int rcvtog, sndtog; u8 hctl; old_dev = max3421_hcd->loaded_dev; old_epnum = max3421_hcd->loaded_epnum; same_ep = (dev == old_dev && epnum == old_epnum); if (same_ep && !force_toggles) return; if (old_dev && !same_ep) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(old_dev, old_epnum, 0, rcvtog); usb_settoggle(old_dev, old_epnum, 1, sndtog); } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* Loading @@ -528,7 +504,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } Loading Loading @@ -663,7 +638,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; int epnum, force_toggles = 0; int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; Loading Loading @@ -773,7 +748,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } Loading @@ -781,7 +755,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; max3421_set_address(hcd, urb->dev, epnum, force_toggles); max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; Loading Loading @@ -1376,6 +1350,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; int epnum = usb_endpoint_num(&urb->ep->desc); /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(urb->dev, epnum, 0, rcvtog); usb_settoggle(urb->dev, epnum, 1, sndtog); max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); Loading fs/eventpoll.c +18 −19 Original line number Diff line number Diff line Loading @@ -1333,6 +1333,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (full_check && reverse_path_check()) goto error_remove_epi; /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); Loading @@ -1355,22 +1371,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (full_check && reverse_path_check()) goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); Loading @@ -1396,6 +1396,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; error_unregister: ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); Loading @@ -1403,9 +1405,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, rb_erase(&epi->rbn, &ep->rbr); error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist Loading fs/seq_file.c +3 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,9 @@ static void *seq_buf_alloc(unsigned long size) void *buf; gfp_t gfp = GFP_KERNEL; if (unlikely(size > MAX_RW_COUNT)) return NULL; /* * For high order allocations, use __GFP_NORETRY to avoid oom-killing - * it's better to fall back to vmalloc() than to kill things. For small Loading include/linux/netfilter/x_tables.h +9 −0 Original line number Diff line number Diff line Loading @@ -139,6 +139,7 @@ struct xt_match { const char *table; unsigned int matchsize; unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif Loading Loading @@ -179,6 +180,7 @@ struct xt_target { const char *table; unsigned int targetsize; unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif Loading Loading @@ -261,6 +263,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_match_to_user(const struct xt_entry_match *m, struct xt_entry_match __user *u); int xt_target_to_user(const struct xt_entry_target *t, struct xt_entry_target __user *u); int xt_data_to_user(void __user *dst, const void *src, int usersize, int size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); Loading Loading
drivers/char/adsprpc.c +15 −4 Original line number Diff line number Diff line Loading @@ -351,6 +351,7 @@ struct fastrpc_mmap { int uncached; int secure; uintptr_t attr; bool is_filemap; /*flag to indicate map used in process init*/ }; enum fastrpc_perfkeys { Loading Loading @@ -406,6 +407,7 @@ struct fastrpc_file { struct mutex perf_mutex; struct pm_qos_request pm_qos_req; int qos_request; struct mutex pm_qos_mutex; struct mutex map_mutex; struct mutex fl_map_mutex; int refcount; Loading Loading @@ -687,9 +689,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { if (map->raddr == va && if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && map->refs == 1) { /*Remove map if not used in process initialization*/ !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; Loading @@ -701,9 +704,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (map->raddr == va && if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && map->refs == 1) { /*Remove map if not used in process initialization*/ !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; Loading Loading @@ -843,6 +847,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, map->fl = fl; map->fd = fd; map->attr = attr; map->is_filemap = false; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING | Loading Loading @@ -2205,6 +2210,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl, mutex_lock(&fl->fl_map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); if (file) file->is_filemap = true; mutex_unlock(&fl->fl_map_mutex); if (err) goto bail; Loading Loading @@ -3060,6 +3067,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) mutex_destroy(&fl->perf_mutex); mutex_destroy(&fl->fl_map_mutex); mutex_destroy(&fl->map_mutex); mutex_destroy(&fl->pm_qos_mutex); kfree(fl); return 0; } Loading Loading @@ -3561,6 +3569,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) hlist_add_head(&fl->hn, &me->drivers); spin_unlock(&me->hlock); mutex_init(&fl->perf_mutex); mutex_init(&fl->pm_qos_mutex); return 0; } Loading Loading @@ -3657,12 +3666,14 @@ static int fastrpc_internal_control(struct fastrpc_file *fl, VERIFY(err, latency != 0); if (err) goto bail; mutex_lock(&fl->pm_qos_mutex); if (!fl->qos_request) { pm_qos_add_request(&fl->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency); fl->qos_request = 1; } else pm_qos_update_request(&fl->pm_qos_req, latency); mutex_unlock(&fl->pm_qos_mutex); break; case FASTRPC_CONTROL_SMMU: if (!me->legacy) Loading
drivers/usb/host/max3421-hcd.c +14 −30 Original line number Diff line number Diff line Loading @@ -149,8 +149,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; struct usb_device *loaded_dev; /* dev that's loaded into the chip */ int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; Loading Loading @@ -488,39 +486,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, int force_toggles) max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); int old_epnum, same_ep, rcvtog, sndtog; struct usb_device *old_dev; int rcvtog, sndtog; u8 hctl; old_dev = max3421_hcd->loaded_dev; old_epnum = max3421_hcd->loaded_epnum; same_ep = (dev == old_dev && epnum == old_epnum); if (same_ep && !force_toggles) return; if (old_dev && !same_ep) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(old_dev, old_epnum, 0, rcvtog); usb_settoggle(old_dev, old_epnum, 1, sndtog); } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* Loading @@ -528,7 +504,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } Loading Loading @@ -663,7 +638,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; int epnum, force_toggles = 0; int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; Loading Loading @@ -773,7 +748,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } Loading @@ -781,7 +755,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; max3421_set_address(hcd, urb->dev, epnum, force_toggles); max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; Loading Loading @@ -1376,6 +1350,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; int epnum = usb_endpoint_num(&urb->ep->desc); /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(urb->dev, epnum, 0, rcvtog); usb_settoggle(urb->dev, epnum, 1, sndtog); max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); Loading
fs/eventpoll.c +18 −19 Original line number Diff line number Diff line Loading @@ -1333,6 +1333,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (full_check && reverse_path_check()) goto error_remove_epi; /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); Loading @@ -1355,22 +1371,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (full_check && reverse_path_check()) goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); Loading @@ -1396,6 +1396,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; error_unregister: ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); Loading @@ -1403,9 +1405,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, rb_erase(&epi->rbn, &ep->rbr); error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist Loading
fs/seq_file.c +3 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,9 @@ static void *seq_buf_alloc(unsigned long size) void *buf; gfp_t gfp = GFP_KERNEL; if (unlikely(size > MAX_RW_COUNT)) return NULL; /* * For high order allocations, use __GFP_NORETRY to avoid oom-killing - * it's better to fall back to vmalloc() than to kill things. For small Loading
include/linux/netfilter/x_tables.h +9 −0 Original line number Diff line number Diff line Loading @@ -139,6 +139,7 @@ struct xt_match { const char *table; unsigned int matchsize; unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif Loading Loading @@ -179,6 +180,7 @@ struct xt_target { const char *table; unsigned int targetsize; unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif Loading Loading @@ -261,6 +263,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_match_to_user(const struct xt_entry_match *m, struct xt_entry_match __user *u); int xt_target_to_user(const struct xt_entry_target *t, struct xt_entry_target __user *u); int xt_data_to_user(void __user *dst, const void *src, int usersize, int size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); Loading