Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c8e0181 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

mm: replace __get_cpu_var uses with this_cpu_ptr



Replace places where __get_cpu_var() is used for an address calculation
with this_cpu_ptr().

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dc6f6c97
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -194,7 +194,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
		 * succeed in getting a node here (and never reach
		 * succeed in getting a node here (and never reach
		 * kmem_cache_alloc)
		 * kmem_cache_alloc)
		 */
		 */
		rtp = &__get_cpu_var(radix_tree_preloads);
		rtp = this_cpu_ptr(&radix_tree_preloads);
		if (rtp->nr) {
		if (rtp->nr) {
			ret = rtp->nodes[rtp->nr - 1];
			ret = rtp->nodes[rtp->nr - 1];
			rtp->nodes[rtp->nr - 1] = NULL;
			rtp->nodes[rtp->nr - 1] = NULL;
@@ -250,14 +250,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
	int ret = -ENOMEM;
	int ret = -ENOMEM;


	preempt_disable();
	preempt_disable();
	rtp = &__get_cpu_var(radix_tree_preloads);
	rtp = this_cpu_ptr(&radix_tree_preloads);
	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
		preempt_enable();
		preempt_enable();
		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
		if (node == NULL)
		if (node == NULL)
			goto out;
			goto out;
		preempt_disable();
		preempt_disable();
		rtp = &__get_cpu_var(radix_tree_preloads);
		rtp = this_cpu_ptr(&radix_tree_preloads);
		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
			rtp->nodes[rtp->nr++] = node;
			rtp->nodes[rtp->nr++] = node;
		else
		else
+1 −1
Original line number Original line Diff line number Diff line
@@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
 */
 */
static void drain_local_stock(struct work_struct *dummy)
static void drain_local_stock(struct work_struct *dummy)
{
{
	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
	struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
	drain_stock(stock);
	drain_stock(stock);
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
}
}
+1 −1
Original line number Original line Diff line number Diff line
@@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work)
	unsigned long proc_flags;
	unsigned long proc_flags;
	int gotten;
	int gotten;


	mf_cpu = &__get_cpu_var(memory_failure_cpu);
	mf_cpu = this_cpu_ptr(&memory_failure_cpu);
	for (;;) {
	for (;;) {
		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
		gotten = kfifo_get(&mf_cpu->fifo, &entry);
		gotten = kfifo_get(&mf_cpu->fifo, &entry);
+2 −2
Original line number Original line Diff line number Diff line
@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
	 * 1000+ tasks, all of them start dirtying pages at exactly the same
	 * 1000+ tasks, all of them start dirtying pages at exactly the same
	 * time, hence all honoured too large initial task->nr_dirtied_pause.
	 * time, hence all honoured too large initial task->nr_dirtied_pause.
	 */
	 */
	p =  &__get_cpu_var(bdp_ratelimits);
	p =  this_cpu_ptr(&bdp_ratelimits);
	if (unlikely(current->nr_dirtied >= ratelimit))
	if (unlikely(current->nr_dirtied >= ratelimit))
		*p = 0;
		*p = 0;
	else if (unlikely(*p >= ratelimit_pages)) {
	else if (unlikely(*p >= ratelimit_pages)) {
@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
	 * the dirty throttling and livelock other long-run dirtiers.
	 * the dirty throttling and livelock other long-run dirtiers.
	 */
	 */
	p = &__get_cpu_var(dirty_throttle_leaks);
	p = this_cpu_ptr(&dirty_throttle_leaks);
	if (*p > 0 && current->nr_dirtied < ratelimit) {
	if (*p > 0 && current->nr_dirtied < ratelimit) {
		unsigned long nr_pages_dirtied;
		unsigned long nr_pages_dirtied;
		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
+3 −3
Original line number Original line Diff line number Diff line
@@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,


	page = new_slab(s, flags, node);
	page = new_slab(s, flags, node);
	if (page) {
	if (page) {
		c = __this_cpu_ptr(s->cpu_slab);
		c = raw_cpu_ptr(s->cpu_slab);
		if (c->page)
		if (c->page)
			flush_slab(s, c);
			flush_slab(s, c);


@@ -2425,7 +2425,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
	 * and the retrieval of the tid.
	 * and the retrieval of the tid.
	 */
	 */
	preempt_disable();
	preempt_disable();
	c = __this_cpu_ptr(s->cpu_slab);
	c = this_cpu_ptr(s->cpu_slab);


	/*
	/*
	 * The transaction ids are globally unique per cpu and per operation on
	 * The transaction ids are globally unique per cpu and per operation on
@@ -2681,7 +2681,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
	 * during the cmpxchg then the free will succedd.
	 * during the cmpxchg then the free will succedd.
	 */
	 */
	preempt_disable();
	preempt_disable();
	c = __this_cpu_ptr(s->cpu_slab);
	c = this_cpu_ptr(s->cpu_slab);


	tid = c->tid;
	tid = c->tid;
	preempt_enable();
	preempt_enable();
Loading