Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3493e84d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perfcounters-fixes-for-linus' of...

Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf_counter: Report the cloning task as parent on perf_counter_fork()
  perf_counter: Fix an ipi-deadlock
  perf: Rework/fix the whole read vs group stuff
  perf_counter: Fix swcounter context invariance
  perf report: Don't show unresolved DSOs and symbols when -S/-d is used
  perf tools: Add a general option to enable raw sample records
  perf tools: Add a per tracepoint counter attribute to get raw sample
  perf_counter: Provide hw_perf_counter_setup_online() APIs
  perf list: Fix large list output by using the pager
  perf_counter, x86: Fix/improve apic fallback
  perf record: Add missing -C option support for specifying profile cpu
  perf tools: Fix dso__new handle() to handle deleted DSOs
  perf tools: Fix fallback to cplus_demangle() when bfd_demangle() is not available
  perf report: Show the tid too in -D
  perf record: Fix .tid and .pid fill-in when synthesizing events
  perf_counter, x86: Fix generic cache events on P6-mobile CPUs
  perf_counter, x86: Fix lapic printk message
parents 919aa96a 94d5d1b2
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -24,6 +24,7 @@ config X86
	select HAVE_UNSTABLE_SCHED_CLOCK
	select HAVE_UNSTABLE_SCHED_CLOCK
	select HAVE_IDE
	select HAVE_IDE
	select HAVE_OPROFILE
	select HAVE_OPROFILE
	select HAVE_PERF_COUNTERS if (!M386 && !M486)
	select HAVE_IOREMAP_PROT
	select HAVE_IOREMAP_PROT
	select HAVE_KPROBES
	select HAVE_KPROBES
	select ARCH_WANT_OPTIONAL_GPIOLIB
	select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -742,7 +743,6 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
config X86_LOCAL_APIC
	def_bool y
	def_bool y
	depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
	depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
	select HAVE_PERF_COUNTERS if (!M386 && !M486)


config X86_IO_APIC
config X86_IO_APIC
	def_bool y
	def_bool y
+33 −7
Original line number Original line Diff line number Diff line
@@ -55,6 +55,7 @@ struct x86_pmu {
	int		num_counters_fixed;
	int		num_counters_fixed;
	int		counter_bits;
	int		counter_bits;
	u64		counter_mask;
	u64		counter_mask;
	int		apic;
	u64		max_period;
	u64		max_period;
	u64		intel_ctrl;
	u64		intel_ctrl;
};
};
@@ -72,8 +73,8 @@ static const u64 p6_perfmon_event_map[] =
{
{
  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0000,
  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0f2e,
  [PERF_COUNT_HW_CACHE_MISSES]		= 0x0000,
  [PERF_COUNT_HW_CACHE_MISSES]		= 0x012e,
  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
@@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);


static bool reserve_pmc_hardware(void)
static bool reserve_pmc_hardware(void)
{
{
#ifdef CONFIG_X86_LOCAL_APIC
	int i;
	int i;


	if (nmi_watchdog == NMI_LOCAL_APIC)
	if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void)
		if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
		if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
			goto eventsel_fail;
			goto eventsel_fail;
	}
	}
#endif


	return true;
	return true;


#ifdef CONFIG_X86_LOCAL_APIC
eventsel_fail:
eventsel_fail:
	for (i--; i >= 0; i--)
	for (i--; i >= 0; i--)
		release_evntsel_nmi(x86_pmu.eventsel + i);
		release_evntsel_nmi(x86_pmu.eventsel + i);
@@ -644,10 +648,12 @@ static bool reserve_pmc_hardware(void)
		enable_lapic_nmi_watchdog();
		enable_lapic_nmi_watchdog();


	return false;
	return false;
#endif
}
}


static void release_pmc_hardware(void)
static void release_pmc_hardware(void)
{
{
#ifdef CONFIG_X86_LOCAL_APIC
	int i;
	int i;


	for (i = 0; i < x86_pmu.num_counters; i++) {
	for (i = 0; i < x86_pmu.num_counters; i++) {
@@ -657,6 +663,7 @@ static void release_pmc_hardware(void)


	if (nmi_watchdog == NMI_LOCAL_APIC)
	if (nmi_watchdog == NMI_LOCAL_APIC)
		enable_lapic_nmi_watchdog();
		enable_lapic_nmi_watchdog();
#endif
}
}


static void hw_perf_counter_destroy(struct perf_counter *counter)
static void hw_perf_counter_destroy(struct perf_counter *counter)
@@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
		hwc->sample_period = x86_pmu.max_period;
		hwc->sample_period = x86_pmu.max_period;
		hwc->last_period = hwc->sample_period;
		hwc->last_period = hwc->sample_period;
		atomic64_set(&hwc->period_left, hwc->sample_period);
		atomic64_set(&hwc->period_left, hwc->sample_period);
	} else {
		/*
		 * If we have a PMU initialized but no APIC
		 * interrupts, we cannot sample hardware
		 * counters (user-space has to fall back and
		 * sample via a hrtimer based software counter):
		 */
		if (!x86_pmu.apic)
			return -EOPNOTSUPP;
	}
	}


	counter->destroy = hw_perf_counter_destroy;
	counter->destroy = hw_perf_counter_destroy;
@@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)


void set_perf_counter_pending(void)
void set_perf_counter_pending(void)
{
{
#ifdef CONFIG_X86_LOCAL_APIC
	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
#endif
}
}


void perf_counters_lapic_init(void)
void perf_counters_lapic_init(void)
{
{
	if (!x86_pmu_initialized())
#ifdef CONFIG_X86_LOCAL_APIC
	if (!x86_pmu.apic || !x86_pmu_initialized())
		return;
		return;


	/*
	/*
	 * Always use NMI for PMU
	 * Always use NMI for PMU
	 */
	 */
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
#endif
}
}


static int __kprobes
static int __kprobes
@@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self,


	regs = args->regs;
	regs = args->regs;


#ifdef CONFIG_X86_LOCAL_APIC
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
#endif
	/*
	/*
	 * Can't rely on the handled return value to say it was our NMI, two
	 * Can't rely on the handled return value to say it was our NMI, two
	 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
	 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
@@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = {
	.event_map		= p6_pmu_event_map,
	.event_map		= p6_pmu_event_map,
	.raw_event		= p6_pmu_raw_event,
	.raw_event		= p6_pmu_raw_event,
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.apic			= 1,
	.max_period		= (1ULL << 31) - 1,
	.max_period		= (1ULL << 31) - 1,
	.version		= 0,
	.version		= 0,
	.num_counters		= 2,
	.num_counters		= 2,
@@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = {
	.event_map		= intel_pmu_event_map,
	.event_map		= intel_pmu_event_map,
	.raw_event		= intel_pmu_raw_event,
	.raw_event		= intel_pmu_raw_event,
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
	.apic			= 1,
	/*
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * so we install an artificial 1<<31 period regardless of
@@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = {
	.num_counters		= 4,
	.num_counters		= 4,
	.counter_bits		= 48,
	.counter_bits		= 48,
	.counter_mask		= (1ULL << 48) - 1,
	.counter_mask		= (1ULL << 48) - 1,
	.apic			= 1,
	/* use highest bit to detect overflow */
	/* use highest bit to detect overflow */
	.max_period		= (1ULL << 47) - 1,
	.max_period		= (1ULL << 47) - 1,
};
};
@@ -1589,13 +1614,14 @@ static int p6_pmu_init(void)
		return -ENODEV;
		return -ENODEV;
	}
	}


	x86_pmu = p6_pmu;

	if (!cpu_has_apic) {
	if (!cpu_has_apic) {
		pr_info("no Local APIC, try rebooting with lapic");
		pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
		return -ENODEV;
		pr_info("no hardware sampling interrupt available.\n");
		x86_pmu.apic = 0;
	}
	}


	x86_pmu				= p6_pmu;

	return 0;
	return 0;
}
}


+38 −11
Original line number Original line Diff line number Diff line
@@ -115,7 +115,7 @@ enum perf_counter_sample_format {
	PERF_SAMPLE_TID				= 1U << 1,
	PERF_SAMPLE_TID				= 1U << 1,
	PERF_SAMPLE_TIME			= 1U << 2,
	PERF_SAMPLE_TIME			= 1U << 2,
	PERF_SAMPLE_ADDR			= 1U << 3,
	PERF_SAMPLE_ADDR			= 1U << 3,
	PERF_SAMPLE_GROUP			= 1U << 4,
	PERF_SAMPLE_READ			= 1U << 4,
	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
	PERF_SAMPLE_ID				= 1U << 6,
	PERF_SAMPLE_ID				= 1U << 6,
	PERF_SAMPLE_CPU				= 1U << 7,
	PERF_SAMPLE_CPU				= 1U << 7,
@@ -127,16 +127,32 @@ enum perf_counter_sample_format {
};
};


/*
/*
 * Bits that can be set in attr.read_format to request that
 * The format of the data returned by read() on a perf counter fd,
 * reads on the counter should return the indicated quantities,
 * as specified by attr.read_format:
 * in increasing order of bit value, after the counter value.
 *
 * struct read_format {
 * 	{ u64		value;
 * 	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 * 	  { u64		time_running; } && PERF_FORMAT_RUNNING
 * 	  { u64		id;           } && PERF_FORMAT_ID
 * 	} && !PERF_FORMAT_GROUP
 *
 * 	{ u64		nr;
 * 	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 * 	  { u64		time_running; } && PERF_FORMAT_RUNNING
 * 	  { u64		value;
 * 	    { u64	id;           } && PERF_FORMAT_ID
 * 	  }		cntr[nr];
 * 	} && PERF_FORMAT_GROUP
 * };
 */
 */
enum perf_counter_read_format {
enum perf_counter_read_format {
	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
	PERF_FORMAT_ID				= 1U << 2,
	PERF_FORMAT_ID				= 1U << 2,
	PERF_FORMAT_GROUP			= 1U << 3,


	PERF_FORMAT_MAX = 1U << 3, 		/* non-ABI */
	PERF_FORMAT_MAX = 1U << 4, 		/* non-ABI */
};
};


#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
@@ -343,10 +359,8 @@ enum perf_event_type {
	 * struct {
	 * struct {
	 * 	struct perf_event_header	header;
	 * 	struct perf_event_header	header;
	 * 	u32				pid, tid;
	 * 	u32				pid, tid;
	 * 	u64				value;
	 *
	 * 	{ u64		time_enabled; 	} && PERF_FORMAT_ENABLED
	 * 	struct read_format		values;
	 * 	{ u64		time_running; 	} && PERF_FORMAT_RUNNING
	 * 	{ u64		parent_id;	} && PERF_FORMAT_ID
	 * };
	 * };
	 */
	 */
	PERF_EVENT_READ			= 8,
	PERF_EVENT_READ			= 8,
@@ -364,11 +378,22 @@ enum perf_event_type {
	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
	 * 	{ u64			period;   } && PERF_SAMPLE_PERIOD
	 * 	{ u64			period;   } && PERF_SAMPLE_PERIOD
	 *
	 *
	 *	{ u64			nr;
	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
	 *	  { u64 id, val; }	cnt[nr];  } && PERF_SAMPLE_GROUP
	 *
	 *
	 *	{ u64			nr,
	 *	{ u64			nr,
	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
	 *
	 * 	#
	 * 	# The RAW record below is opaque data wrt the ABI
	 * 	#
	 * 	# That is, the ABI doesn't make any promises wrt to
	 * 	# the stability of its content, it may vary depending
	 * 	# on event, hardware, kernel version and phase of
	 * 	# the moon.
	 * 	#
	 * 	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
	 * 	#
	 *
	 *	{ u32			size;
	 *	{ u32			size;
	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
	 * };
	 * };
@@ -694,6 +719,8 @@ struct perf_sample_data {


extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
				 struct perf_sample_data *data);
				 struct perf_sample_data *data);
extern void perf_counter_output(struct perf_counter *counter, int nmi,
				struct perf_sample_data *data);


/*
/*
 * Return 1 for a software counter, 0 for a hardware counter
 * Return 1 for a software counter, 0 for a hardware counter
+236 −102
Original line number Original line Diff line number Diff line
@@ -88,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }


void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
void __weak hw_perf_counter_setup_online(int cpu)	{ barrier(); }


int __weak
int __weak
hw_perf_group_sched_in(struct perf_counter *group_leader,
hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -306,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
		return;
		return;


	counter->state = PERF_COUNTER_STATE_INACTIVE;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
	if (counter->pending_disable) {
		counter->pending_disable = 0;
		counter->state = PERF_COUNTER_STATE_OFF;
	}
	counter->tstamp_stopped = ctx->time;
	counter->tstamp_stopped = ctx->time;
	counter->pmu->disable(counter);
	counter->pmu->disable(counter);
	counter->oncpu = -1;
	counter->oncpu = -1;
@@ -1691,7 +1696,32 @@ static int perf_release(struct inode *inode, struct file *file)
	return 0;
	return 0;
}
}


static u64 perf_counter_read_tree(struct perf_counter *counter)
static int perf_counter_read_size(struct perf_counter *counter)
{
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (counter->attr.read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (counter->attr.read_format & PERF_FORMAT_GROUP) {
		nr += counter->group_leader->nr_siblings;
		size += sizeof(u64);
	}

	size += entry * nr;

	return size;
}

static u64 perf_counter_read_value(struct perf_counter *counter)
{
{
	struct perf_counter *child;
	struct perf_counter *child;
	u64 total = 0;
	u64 total = 0;
@@ -1703,14 +1733,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter)
	return total;
	return total;
}
}


static int perf_counter_read_entry(struct perf_counter *counter,
				   u64 read_format, char __user *buf)
{
	int n = 0, count = 0;
	u64 values[2];

	values[n++] = perf_counter_read_value(counter);
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_counter_id(counter);

	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
}

static int perf_counter_read_group(struct perf_counter *counter,
				   u64 read_format, char __user *buf)
{
	struct perf_counter *leader = counter->group_leader, *sub;
	int n = 0, size = 0, err = -EFAULT;
	u64 values[3];

	values[n++] = 1 + leader->nr_siblings;
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		values[n++] = leader->total_time_enabled +
			atomic64_read(&leader->child_total_time_enabled);
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		values[n++] = leader->total_time_running +
			atomic64_read(&leader->child_total_time_running);
	}

	size = n * sizeof(u64);

	if (copy_to_user(buf, values, size))
		return -EFAULT;

	err = perf_counter_read_entry(leader, read_format, buf + size);
	if (err < 0)
		return err;

	size += err;

	list_for_each_entry(sub, &leader->sibling_list, list_entry) {
		err = perf_counter_read_entry(counter, read_format,
				buf + size);
		if (err < 0)
			return err;

		size += err;
	}

	return size;
}

static int perf_counter_read_one(struct perf_counter *counter,
				 u64 read_format, char __user *buf)
{
	u64 values[4];
	int n = 0;

	values[n++] = perf_counter_read_value(counter);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
	}
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_counter_id(counter);

	if (copy_to_user(buf, values, n * sizeof(u64)))
		return -EFAULT;

	return n * sizeof(u64);
}

/*
/*
 * Read the performance counter - simple non blocking version for now
 * Read the performance counter - simple non blocking version for now
 */
 */
static ssize_t
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
{
	u64 values[4];
	u64 read_format = counter->attr.read_format;
	int n;
	int ret;


	/*
	/*
	 * Return end-of-file for a read on a counter that is in
	 * Return end-of-file for a read on a counter that is in
@@ -1720,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
	if (counter->state == PERF_COUNTER_STATE_ERROR)
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		return 0;
		return 0;


	if (count < perf_counter_read_size(counter))
		return -ENOSPC;

	WARN_ON_ONCE(counter->ctx->parent_ctx);
	WARN_ON_ONCE(counter->ctx->parent_ctx);
	mutex_lock(&counter->child_mutex);
	mutex_lock(&counter->child_mutex);
	values[0] = perf_counter_read_tree(counter);
	if (read_format & PERF_FORMAT_GROUP)
	n = 1;
		ret = perf_counter_read_group(counter, read_format, buf);
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
	else
		values[n++] = counter->total_time_enabled +
		ret = perf_counter_read_one(counter, read_format, buf);
			atomic64_read(&counter->child_total_time_enabled);
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
	if (counter->attr.read_format & PERF_FORMAT_ID)
		values[n++] = primary_counter_id(counter);
	mutex_unlock(&counter->child_mutex);
	mutex_unlock(&counter->child_mutex);


	if (count < n * sizeof(u64))
	return ret;
		return -EINVAL;
	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
}
}


static ssize_t
static ssize_t
@@ -2245,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)


	if (counter->pending_disable) {
	if (counter->pending_disable) {
		counter->pending_disable = 0;
		counter->pending_disable = 0;
		perf_counter_disable(counter);
		__perf_counter_disable(counter);
	}
	}


	if (counter->pending_wakeup) {
	if (counter->pending_wakeup) {
@@ -2630,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
	return task_pid_nr_ns(p, counter->ns);
	return task_pid_nr_ns(p, counter->ns);
}
}


static void perf_counter_output(struct perf_counter *counter, int nmi,
static void perf_output_read_one(struct perf_output_handle *handle,
				 struct perf_counter *counter)
{
	u64 read_format = counter->attr.read_format;
	u64 values[4];
	int n = 0;

	values[n++] = atomic64_read(&counter->count);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
	}
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_counter_id(counter);

	perf_output_copy(handle, values, n * sizeof(u64));
}

/*
 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
 */
static void perf_output_read_group(struct perf_output_handle *handle,
			    struct perf_counter *counter)
{
	struct perf_counter *leader = counter->group_leader, *sub;
	u64 read_format = counter->attr.read_format;
	u64 values[5];
	int n = 0;

	values[n++] = 1 + leader->nr_siblings;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = leader->total_time_enabled;

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = leader->total_time_running;

	if (leader != counter)
		leader->pmu->read(leader);

	values[n++] = atomic64_read(&leader->count);
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_counter_id(leader);

	perf_output_copy(handle, values, n * sizeof(u64));

	list_for_each_entry(sub, &leader->sibling_list, list_entry) {
		n = 0;

		if (sub != counter)
			sub->pmu->read(sub);

		values[n++] = atomic64_read(&sub->count);
		if (read_format & PERF_FORMAT_ID)
			values[n++] = primary_counter_id(sub);

		perf_output_copy(handle, values, n * sizeof(u64));
	}
}

static void perf_output_read(struct perf_output_handle *handle,
			     struct perf_counter *counter)
{
	if (counter->attr.read_format & PERF_FORMAT_GROUP)
		perf_output_read_group(handle, counter);
	else
		perf_output_read_one(handle, counter);
}

void perf_counter_output(struct perf_counter *counter, int nmi,
				struct perf_sample_data *data)
				struct perf_sample_data *data)
{
{
	int ret;
	int ret;
@@ -2641,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
	struct {
	struct {
		u32 pid, tid;
		u32 pid, tid;
	} tid_entry;
	} tid_entry;
	struct {
		u64 id;
		u64 counter;
	} group_entry;
	struct perf_callchain_entry *callchain = NULL;
	struct perf_callchain_entry *callchain = NULL;
	int callchain_size = 0;
	int callchain_size = 0;
	u64 time;
	u64 time;
@@ -2699,10 +2870,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
	if (sample_type & PERF_SAMPLE_PERIOD)
	if (sample_type & PERF_SAMPLE_PERIOD)
		header.size += sizeof(u64);
		header.size += sizeof(u64);


	if (sample_type & PERF_SAMPLE_GROUP) {
	if (sample_type & PERF_SAMPLE_READ)
		header.size += sizeof(u64) +
		header.size += perf_counter_read_size(counter);
			counter->nr_siblings * sizeof(group_entry);
	}


	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		callchain = perf_callchain(data->regs);
		callchain = perf_callchain(data->regs);
@@ -2759,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
	if (sample_type & PERF_SAMPLE_PERIOD)
	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(&handle, data->period);
		perf_output_put(&handle, data->period);


	/*
	if (sample_type & PERF_SAMPLE_READ)
	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
		perf_output_read(&handle, counter);
	 */
	if (sample_type & PERF_SAMPLE_GROUP) {
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;

		perf_output_put(&handle, nr);

		leader = counter->group_leader;
		list_for_each_entry(sub, &leader->sibling_list, list_entry) {
			if (sub != counter)
				sub->pmu->read(sub);

			group_entry.id = primary_counter_id(sub);
			group_entry.counter = atomic64_read(&sub->count);

			perf_output_put(&handle, group_entry);
		}
	}


	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (callchain)
		if (callchain)
@@ -2817,8 +2968,6 @@ struct perf_read_event {


	u32				pid;
	u32				pid;
	u32				tid;
	u32				tid;
	u64				value;
	u64				format[3];
};
};


static void
static void
@@ -2830,34 +2979,20 @@ perf_counter_read_event(struct perf_counter *counter,
		.header = {
		.header = {
			.type = PERF_EVENT_READ,
			.type = PERF_EVENT_READ,
			.misc = 0,
			.misc = 0,
			.size = sizeof(event) - sizeof(event.format),
			.size = sizeof(event) + perf_counter_read_size(counter),
		},
		},
		.pid = perf_counter_pid(counter, task),
		.pid = perf_counter_pid(counter, task),
		.tid = perf_counter_tid(counter, task),
		.tid = perf_counter_tid(counter, task),
		.value = atomic64_read(&counter->count),
	};
	};
	int ret, i = 0;
	int ret;

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		event.header.size += sizeof(u64);
		event.format[i++] = counter->total_time_enabled;
	}

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		event.header.size += sizeof(u64);
		event.format[i++] = counter->total_time_running;
	}

	if (counter->attr.read_format & PERF_FORMAT_ID) {
		event.header.size += sizeof(u64);
		event.format[i++] = primary_counter_id(counter);
	}


	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
	if (ret)
	if (ret)
		return;
		return;


	perf_output_copy(&handle, &event, event.header.size);
	perf_output_put(&handle, event);
	perf_output_read(&handle, counter);

	perf_output_end(&handle);
	perf_output_end(&handle);
}
}


@@ -2893,10 +3028,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
		return;
		return;


	task_event->event.pid = perf_counter_pid(counter, task);
	task_event->event.pid = perf_counter_pid(counter, task);
	task_event->event.ppid = perf_counter_pid(counter, task->real_parent);
	task_event->event.ppid = perf_counter_pid(counter, current);


	task_event->event.tid = perf_counter_tid(counter, task);
	task_event->event.tid = perf_counter_tid(counter, task);
	task_event->event.ptid = perf_counter_tid(counter, task->real_parent);
	task_event->event.ptid = perf_counter_tid(counter, current);


	perf_output_put(&handle, task_event->event);
	perf_output_put(&handle, task_event->event);
	perf_output_end(&handle);
	perf_output_end(&handle);
@@ -3443,40 +3578,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,


static int perf_swcounter_is_counting(struct perf_counter *counter)
static int perf_swcounter_is_counting(struct perf_counter *counter)
{
{
	struct perf_counter_context *ctx;
	/*
	unsigned long flags;
	 * The counter is active, we're good!
	int count;
	 */

	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		return 1;
		return 1;


	/*
	 * The counter is off/error, not counting.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE)
	if (counter->state != PERF_COUNTER_STATE_INACTIVE)
		return 0;
		return 0;


	/*
	/*
	 * If the counter is inactive, it could be just because
	 * The counter is inactive, if the context is active
	 * its task is scheduled out, or because it's in a group
	 * we're part of a group that didn't make it on the 'pmu',
	 * which could not go on the PMU.  We want to count in
	 * not counting.
	 * the first case but not the second.  If the context is
	 * currently active then an inactive software counter must
	 * be the second case.  If it's not currently active then
	 * we need to know whether the counter was active when the
	 * context was last active, which we can determine by
	 * comparing counter->tstamp_stopped with ctx->time.
	 *
	 * We are within an RCU read-side critical section,
	 * which protects the existence of *ctx.
	 */
	 */
	ctx = counter->ctx;
	if (counter->ctx->is_active)
	spin_lock_irqsave(&ctx->lock, flags);
		return 0;
	count = 1;

	/* Re-check state now we have the lock */
	/*
	if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
	 * We're inactive and the context is too, this means the
	    counter->ctx->is_active ||
	 * task is scheduled out, we're counting events that happen
	    counter->tstamp_stopped < ctx->time)
	 * to us, like migration events.
		count = 0;
	 */
	spin_unlock_irqrestore(&ctx->lock, flags);
	return 1;
	return count;
}
}


static int perf_swcounter_match(struct perf_counter *counter,
static int perf_swcounter_match(struct perf_counter *counter,
@@ -3928,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
	atomic64_set(&hwc->period_left, hwc->sample_period);
	atomic64_set(&hwc->period_left, hwc->sample_period);


	/*
	/*
	 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
	 * we currently do not support PERF_FORMAT_GROUP on inherited counters
	 */
	 */
	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
		goto done;
		goto done;


	switch (attr->type) {
	switch (attr->type) {
@@ -4592,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
		perf_counter_init_cpu(cpu);
		perf_counter_init_cpu(cpu);
		break;
		break;


	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		hw_perf_counter_setup_online(cpu);
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
	case CPU_DOWN_PREPARE_FROZEN:
		perf_counter_exit_cpu(cpu);
		perf_counter_exit_cpu(cpu);
@@ -4616,6 +4748,8 @@ void __init perf_counter_init(void)
{
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
			(void *)(long)smp_processor_id());
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);
	register_cpu_notifier(&perf_cpu_nb);
}
}


+18 −11
Original line number Original line Diff line number Diff line
@@ -382,24 +382,31 @@ endif
ifdef NO_DEMANGLE
ifdef NO_DEMANGLE
	BASIC_CFLAGS += -DNO_DEMANGLE
	BASIC_CFLAGS += -DNO_DEMANGLE
else
else

	has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y")
	has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y")


	has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y")

	has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y")

	ifeq ($(has_bfd),y)
	ifeq ($(has_bfd),y)
		EXTLIBS += -lbfd
		EXTLIBS += -lbfd
	else ifeq ($(has_bfd_iberty),y)
	else
		has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y")
		ifeq ($(has_bfd_iberty),y)
			EXTLIBS += -lbfd -liberty
			EXTLIBS += -lbfd -liberty
	else ifeq ($(has_bfd_iberty_z),y)
		else
			has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y")
			ifeq ($(has_bfd_iberty_z),y)
				EXTLIBS += -lbfd -liberty -lz
				EXTLIBS += -lbfd -liberty -lz
			else
				has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y")
				ifeq ($(has_cplus_demangle),y)
					EXTLIBS += -liberty
					BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
				else
				else
					msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling)
					msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling)
					BASIC_CFLAGS += -DNO_DEMANGLE
					BASIC_CFLAGS += -DNO_DEMANGLE
				endif
				endif
			endif
			endif
		endif
	endif
endif


ifndef CC_LD_DYNPATH
ifndef CC_LD_DYNPATH
	ifdef NO_R_TO_GCC_LINKER
	ifdef NO_R_TO_GCC_LINKER
Loading