Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbf59bc9 authored by Tejun Heo's avatar Tejun Heo
Browse files

percpu: implement new dynamic percpu allocator



Impact: new scalable dynamic percpu allocator which allows dynamic
        percpu areas to be accessed the same way as static ones

Implement scalable dynamic percpu allocator which can be used for both
static and dynamic percpu areas.  This will allow static and dynamic
areas to share faster direct access methods.  This feature is optional
and enabled only when CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is defined by
arch.  Please read comment on top of mm/percpu.c for details.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
parent 8fc48985
Loading
Loading
Loading
Loading
+18 −4
Original line number Diff line number Diff line
@@ -76,23 +76,37 @@

#ifdef CONFIG_SMP

struct percpu_data {
	void *ptrs[1];
};
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA

#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
extern void *pcpu_base_addr;

typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);

extern size_t __init pcpu_setup_static(pcpu_populate_pte_fn_t populate_pte_fn,
				       struct page **pages, size_t cpu_size);
/*
 * Use this to get to a cpu's version of the per-cpu object
 * dynamically allocated. Non-atomic access to the current CPU's
 * version should probably be combined with get_cpu()/put_cpu().
 */
#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))

#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

struct percpu_data {
	void *ptrs[1];
};

#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)

#define per_cpu_ptr(ptr, cpu)						\
({									\
        struct percpu_data *__p = __percpu_disguise(ptr);		\
        (__typeof__(ptr))__p->ptrs[(cpu)];				\
})

#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);

+31 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/percpu.h>

#if 0
#define DEBUGP printk
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
}

#ifdef CONFIG_SMP

#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA

static void *percpu_modalloc(unsigned long size, unsigned long align,
			     const char *name)
{
	void *ptr;

	if (align > PAGE_SIZE) {
		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
		       name, align, PAGE_SIZE);
		align = PAGE_SIZE;
	}

	ptr = __alloc_percpu(size, align);
	if (!ptr)
		printk(KERN_WARNING
		       "Could not allocate %lu bytes percpu data\n", size);
	return ptr;
}

static void percpu_modfree(void *freeme)
{
	free_percpu(freeme);
}

#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

/* Number of blocks used and allocated. */
static unsigned int pcpu_num_used, pcpu_num_allocated;
/* Size of each block.  -ve means used. */
@@ -499,6 +528,8 @@ static int percpu_modinit(void)
}
__initcall(percpu_modinit);

#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

static unsigned int find_pcpusec(Elf_Ehdr *hdr,
				 Elf_Shdr *sechdrs,
				 const char *secstrings)
+4 −0
Original line number Diff line number Diff line
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
obj-$(CONFIG_SMP) += percpu.o
else
obj-$(CONFIG_SMP) += allocpercpu.o
endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o

mm/percpu.c

0 → 100644
+890 −0

File added.

Preview size limit exceeded, changes collapsed.