Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8633322c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  sched: move rq_weight data array out of .percpu
  percpu: allow pcpu_alloc() to be called with IRQs off
parents 9532faeb 4a6cc4bd
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -1564,11 +1564,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)

#ifdef CONFIG_FAIR_GROUP_SCHED

struct update_shares_data {
	unsigned long rq_weight[NR_CPUS];
};

static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
static __read_mostly unsigned long *update_shares_data;

static void __set_se_shares(struct sched_entity *se, unsigned long shares);

@@ -1578,12 +1574,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void update_group_shares_cpu(struct task_group *tg, int cpu,
				    unsigned long sd_shares,
				    unsigned long sd_rq_weight,
				    struct update_shares_data *usd)
				    unsigned long *usd_rq_weight)
{
	unsigned long shares, rq_weight;
	int boost = 0;

	rq_weight = usd->rq_weight[cpu];
	rq_weight = usd_rq_weight[cpu];
	if (!rq_weight) {
		boost = 1;
		rq_weight = NICE_0_LOAD;
@@ -1618,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
static int tg_shares_up(struct task_group *tg, void *data)
{
	unsigned long weight, rq_weight = 0, shares = 0;
	struct update_shares_data *usd;
	unsigned long *usd_rq_weight;
	struct sched_domain *sd = data;
	unsigned long flags;
	int i;
@@ -1627,11 +1623,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
		return 0;

	local_irq_save(flags);
	usd = &__get_cpu_var(update_shares_data);
	usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());

	for_each_cpu(i, sched_domain_span(sd)) {
		weight = tg->cfs_rq[i]->load.weight;
		usd->rq_weight[i] = weight;
		usd_rq_weight[i] = weight;

		/*
		 * If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1648,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
		shares = tg->shares;

	for_each_cpu(i, sched_domain_span(sd))
		update_group_shares_cpu(tg, i, shares, rq_weight, usd);
		update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);

	local_irq_restore(flags);

@@ -9407,6 +9403,10 @@ void __init sched_init(void)
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */

#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
	update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
					    __alignof__(unsigned long));
#endif
	for_each_possible_cpu(i) {
		struct rq *rq;

+17 −13
Original line number Diff line number Diff line
@@ -153,7 +153,10 @@ static int pcpu_reserved_chunk_limit;
 *
 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 * pcpu_lock is grabbed and released as necessary.  All actual memory
 * allocations are done using GFP_KERNEL with pcpu_lock released.
 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 * general, percpu memory can't be allocated with irq off but
 * irqsave/restore are still used in alloc path so that it can be used
 * from early init path - sched_init() specifically.
 *
 * Free path accesses and alters only the index data structures, so it
 * can be safely called from atomic context.  When memory needs to be
@@ -366,7 +369,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 * RETURNS:
 * 0 if noop, 1 if successfully extended, -errno on failure.
 */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags)
{
	int new_alloc;
	int *new;
@@ -376,7 +379,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
	if (chunk->map_alloc >= chunk->map_used + 2)
		return 0;

	spin_unlock_irq(&pcpu_lock);
	spin_unlock_irqrestore(&pcpu_lock, *flags);

	new_alloc = PCPU_DFL_MAP_ALLOC;
	while (new_alloc < chunk->map_used + 2)
@@ -384,7 +387,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)

	new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
	if (!new) {
		spin_lock_irq(&pcpu_lock);
		spin_lock_irqsave(&pcpu_lock, *flags);
		return -ENOMEM;
	}

@@ -393,7 +396,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
	 * could have happened inbetween, so map_used couldn't have
	 * grown.
	 */
	spin_lock_irq(&pcpu_lock);
	spin_lock_irqsave(&pcpu_lock, *flags);
	BUG_ON(new_alloc < chunk->map_used + 2);

	size = chunk->map_alloc * sizeof(chunk->map[0]);
@@ -1047,6 +1050,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
	struct pcpu_chunk *chunk;
	const char *err;
	int slot, off;
	unsigned long flags;

	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
		WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -1055,13 +1059,13 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
	}

	mutex_lock(&pcpu_alloc_mutex);
	spin_lock_irq(&pcpu_lock);
	spin_lock_irqsave(&pcpu_lock, flags);

	/* serve reserved allocations from the reserved chunk if available */
	if (reserved && pcpu_reserved_chunk) {
		chunk = pcpu_reserved_chunk;
		if (size > chunk->contig_hint ||
		    pcpu_extend_area_map(chunk) < 0) {
		    pcpu_extend_area_map(chunk, &flags) < 0) {
			err = "failed to extend area map of reserved chunk";
			goto fail_unlock;
		}
@@ -1079,7 +1083,7 @@ restart:
			if (size > chunk->contig_hint)
				continue;

			switch (pcpu_extend_area_map(chunk)) {
			switch (pcpu_extend_area_map(chunk, &flags)) {
			case 0:
				break;
			case 1:
@@ -1096,7 +1100,7 @@ restart:
	}

	/* hmmm... no space left, create a new chunk */
	spin_unlock_irq(&pcpu_lock);
	spin_unlock_irqrestore(&pcpu_lock, flags);

	chunk = alloc_pcpu_chunk();
	if (!chunk) {
@@ -1104,16 +1108,16 @@ restart:
		goto fail_unlock_mutex;
	}

	spin_lock_irq(&pcpu_lock);
	spin_lock_irqsave(&pcpu_lock, flags);
	pcpu_chunk_relocate(chunk, -1);
	goto restart;

area_found:
	spin_unlock_irq(&pcpu_lock);
	spin_unlock_irqrestore(&pcpu_lock, flags);

	/* populate, map and clear the area */
	if (pcpu_populate_chunk(chunk, off, size)) {
		spin_lock_irq(&pcpu_lock);
		spin_lock_irqsave(&pcpu_lock, flags);
		pcpu_free_area(chunk, off);
		err = "failed to populate";
		goto fail_unlock;
@@ -1125,7 +1129,7 @@ area_found:
	return __addr_to_pcpu_ptr(chunk->base_addr + off);

fail_unlock:
	spin_unlock_irq(&pcpu_lock);
	spin_unlock_irqrestore(&pcpu_lock, flags);
fail_unlock_mutex:
	mutex_unlock(&pcpu_alloc_mutex);
	if (warn_limit) {