Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 891c3970 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar
Browse files

uprobes: Teach build_probe_list() to consider the range



Currently build_probe_list() builds the list of all uprobes
attached to the given inode, and the caller should filter out
those who don't fall into the [start,end) range, this is
sub-optimal.

This patch turns find_least_offset_node() into
find_node_in_range() which returns the first node inside the
[min,max] range, and changes build_probe_list() to use this node
as a starting point for rb_prev() and rb_next() to find all
other nodes the caller needs. The resulting list is no longer
sorted but we do not care.

This can speed up both build_probe_list() and the callers, but
there is another reason to introduce find_node_in_range(). It
can be used to figure out whether the given vma has uprobes or
not, this will be needed soon.

While at it, shift INIT_LIST_HEAD(tmp_list) into
build_probe_list().

Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar.vnet.ibm.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20120729182240.GA20352@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 89133786
Loading
Loading
Loading
Loading
+50 −53
Original line number Original line Diff line number Diff line
@@ -939,59 +939,66 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
		put_uprobe(uprobe);
		put_uprobe(uprobe);
}
}


/*
static struct rb_node *
 * Of all the nodes that correspond to the given inode, return the node
find_node_in_range(struct inode *inode, loff_t min, loff_t max)
 * with the least offset.
 */
static struct rb_node *find_least_offset_node(struct inode *inode)
{
{
	struct uprobe u = { .inode = inode, .offset = 0};
	struct rb_node *n = uprobes_tree.rb_node;
	struct rb_node *n = uprobes_tree.rb_node;
	struct rb_node *close_node = NULL;
	struct uprobe *uprobe;
	int match;


	while (n) {
	while (n) {
		uprobe = rb_entry(n, struct uprobe, rb_node);
		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
		match = match_uprobe(&u, uprobe);

		if (uprobe->inode == inode)
			close_node = n;


		if (!match)
		if (inode < u->inode) {
			return close_node;

		if (match < 0)
			n = n->rb_left;
			n = n->rb_left;
		else
		} else if (inode > u->inode) {
			n = n->rb_right;
			n = n->rb_right;
		} else {
			if (max < u->offset)
				n = n->rb_left;
			else if (min > u->offset)
				n = n->rb_right;
			else
				break;
		}
	}
	}


	return close_node;
	return n;
}
}


/*
/*
 * For a given inode, build a list of probes that need to be inserted.
 * For a given range in vma, build a list of probes that need to be inserted.
 */
 */
static void build_probe_list(struct inode *inode, struct list_head *head)
static void build_probe_list(struct inode *inode,
				struct vm_area_struct *vma,
				unsigned long start, unsigned long end,
				struct list_head *head)
{
{
	struct uprobe *uprobe;
	loff_t min, max;
	unsigned long flags;
	unsigned long flags;
	struct rb_node *n;
	struct rb_node *n, *t;

	struct uprobe *u;
	spin_lock_irqsave(&uprobes_treelock, flags);


	n = find_least_offset_node(inode);
	INIT_LIST_HEAD(head);
	min = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + start - vma->vm_start;
	max = min + (end - start) - 1;


	for (; n; n = rb_next(n)) {
	spin_lock_irqsave(&uprobes_treelock, flags);
		uprobe = rb_entry(n, struct uprobe, rb_node);
	n = find_node_in_range(inode, min, max);
		if (uprobe->inode != inode)
	if (n) {
		for (t = n; t; t = rb_prev(t)) {
			u = rb_entry(t, struct uprobe, rb_node);
			if (u->inode != inode || u->offset < min)
				break;
				break;

			list_add(&u->pending_list, head);
		list_add(&uprobe->pending_list, head);
			atomic_inc(&u->ref);
		atomic_inc(&uprobe->ref);
		}
		for (t = n; (t = rb_next(t)); ) {
			u = rb_entry(t, struct uprobe, rb_node);
			if (u->inode != inode || u->offset > max)
				break;
			list_add(&u->pending_list, head);
			atomic_inc(&u->ref);
		}
	}
	}

	spin_unlock_irqrestore(&uprobes_treelock, flags);
	spin_unlock_irqrestore(&uprobes_treelock, flags);
}
}


@@ -1021,9 +1028,8 @@ int uprobe_mmap(struct vm_area_struct *vma)
	if (!inode)
	if (!inode)
		return 0;
		return 0;


	INIT_LIST_HEAD(&tmp_list);
	mutex_lock(uprobes_mmap_hash(inode));
	mutex_lock(uprobes_mmap_hash(inode));
	build_probe_list(inode, &tmp_list);
	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);


	ret = 0;
	ret = 0;
	count = 0;
	count = 0;
@@ -1032,11 +1038,6 @@ int uprobe_mmap(struct vm_area_struct *vma)
		if (!ret) {
		if (!ret) {
			loff_t vaddr = vma_address(vma, uprobe->offset);
			loff_t vaddr = vma_address(vma, uprobe->offset);


			if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
				put_uprobe(uprobe);
				continue;
			}

			ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
			ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
			/*
			/*
			 * We can race against uprobe_register(), see the
			 * We can race against uprobe_register(), see the
@@ -1092,21 +1093,17 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
	if (!inode)
	if (!inode)
		return;
		return;


	INIT_LIST_HEAD(&tmp_list);
	mutex_lock(uprobes_mmap_hash(inode));
	mutex_lock(uprobes_mmap_hash(inode));
	build_probe_list(inode, &tmp_list);
	build_probe_list(inode, vma, start, end, &tmp_list);


	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
		loff_t vaddr = vma_address(vma, uprobe->offset);
		loff_t vaddr = vma_address(vma, uprobe->offset);

		if (vaddr >= start && vaddr < end) {
		/*
		/*
		 * An unregister could have removed the probe before
		 * An unregister could have removed the probe before
		 * unmap. So check before we decrement the count.
		 * unmap. So check before we decrement the count.
		 */
		 */
		if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
		if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
			atomic_dec(&vma->vm_mm->uprobes_state.count);
			atomic_dec(&vma->vm_mm->uprobes_state.count);
		}
		put_uprobe(uprobe);
		put_uprobe(uprobe);
	}
	}
	mutex_unlock(uprobes_mmap_hash(inode));
	mutex_unlock(uprobes_mmap_hash(inode));