Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1d5844ba authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: hmp: Optimize cycle counter reads"

parents 2d08b001 259636e7
Loading
Loading
Loading
Loading
+39 −7
Original line number Diff line number Diff line
@@ -2521,10 +2521,42 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
	return pred_demand;
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu)
#define THRESH_CC_UPDATE (2 * NSEC_PER_USEC)

/*
 * Assumes rq_lock is held and wallclock was recorded in the same critical
 * section as this function's invocation.
 */
static inline u64 read_cycle_counter(int cpu, u64 wallclock)
{
	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
	u64 delta;

	if (unlikely(!cluster))
		return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);

	/*
	 * Why don't we need locking here? Let's say that delta is negative
	 * because some other CPU happened to update last_cc_update with a
	 * more recent timestamp. We simply read the conter again in that case
	 * with no harmful side effects. This can happen if there is an FIQ
	 * between when we read the wallclock and when we use it here.
	 */
	delta = wallclock - atomic64_read(&cluster->last_cc_update);
	if (delta > THRESH_CC_UPDATE) {
		atomic64_set(&cluster->cycles,
			     cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu));
		atomic64_set(&cluster->last_cc_update, wallclock);
	}

	return atomic64_read(&cluster->cycles);
}

static void update_task_cpu_cycles(struct task_struct *p, int cpu,
				   u64 wallclock)
{
	if (use_cycle_counter)
		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
		p->cpu_cycles = read_cycle_counter(cpu, wallclock);
}

static void
@@ -2542,7 +2574,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
		return;
	}

	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
	cur_cycles = read_cycle_counter(cpu, wallclock);

	/*
	 * If current task is idle task and irqtime == 0 CPU was
@@ -2834,7 +2866,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
	update_window_start(rq, wallclock);

	if (!p->ravg.mark_start) {
		update_task_cpu_cycles(p, cpu_of(rq));
		update_task_cpu_cycles(p, cpu_of(rq), wallclock);
		goto done;
	}

@@ -2902,7 +2934,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
	if (is_idle_task(curr)) {
		/* We're here without rq->lock held, IRQ disabled */
		raw_spin_lock(&rq->lock);
		update_task_cpu_cycles(curr, cpu);
		update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
		raw_spin_unlock(&rq->lock);
	}
}
@@ -2947,7 +2979,7 @@ void mark_task_starting(struct task_struct *p)
	p->ravg.mark_start = p->last_wake_ts = wallclock;
	p->last_cpu_selected_ts = wallclock;
	p->last_switch_out_ts = 0;
	update_task_cpu_cycles(p, cpu_of(rq));
	update_task_cpu_cycles(p, cpu_of(rq), wallclock);
}

void set_window_start(struct rq *rq)
@@ -3560,7 +3592,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
			 wallclock, 0);

	update_task_cpu_cycles(p, new_cpu);
	update_task_cpu_cycles(p, new_cpu, wallclock);

	new_task = is_new_task(p);
	/* Protected by rq_lock */
+2 −0
Original line number Diff line number Diff line
@@ -397,6 +397,8 @@ struct sched_cluster {
	unsigned int static_cluster_pwr_cost;
	int notifier_sent;
	bool wake_up_idle;
	atomic64_t last_cc_update;
	atomic64_t cycles;
};

extern unsigned long all_cluster_ids[];