Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6b9b4d5 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'rcu/next' of...

parents e36c886a 28457ee6
Loading
Loading
Loading
Loading
+4 −10
Original line number Original line Diff line number Diff line
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
      all the readers who were traversing the list when we deleted the
      all the readers who were traversing the list when we deleted the
      element are finished.  We use <function>call_rcu()</function> to
      element are finished.  We use <function>call_rcu()</function> to
      register a callback which will actually destroy the object once
      register a callback which will actually destroy the object once
      the readers are finished.
      all pre-existing readers are finished.  Alternatively,
      <function>synchronize_rcu()</function> may be used to block until
      all pre-existing are finished.
    </para>
    </para>
    <para>
    <para>
      But how does Read Copy Update know when the readers are
      But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
-        object_put(obj);
-        object_put(obj);
+        list_del_rcu(&amp;obj-&gt;list);
+        list_del_rcu(&amp;obj-&gt;list);
         cache_num--;
         cache_num--;
+        call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu, obj);
+        call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
 }
 }


 /* Must be holding cache_lock */
 /* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
         if (++cache_num > MAX_CACHE_SIZE) {
         if (++cache_num > MAX_CACHE_SIZE) {
                 struct object *i, *outcast = NULL;
                 struct object *i, *outcast = NULL;
                 list_for_each_entry(i, &amp;cache, list) {
                 list_for_each_entry(i, &amp;cache, list) {
@@ -85,6 +94,7 @@
         obj-&gt;popularity = 0;
         atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
         spin_lock_init(&amp;obj-&gt;lock);
+        INIT_RCU_HEAD(&amp;obj-&gt;rcu);

         spin_lock_irqsave(&amp;cache_lock, flags);
         __cache_add(obj);
@@ -104,12 +114,11 @@
@@ -104,12 +114,11 @@
 struct object *cache_find(int id)
 struct object *cache_find(int id)
 {
 {
+39 −7
Original line number Original line Diff line number Diff line
@@ -218,13 +218,22 @@ over a rather long period of time, but improvements are always welcome!
	include:
	include:


	a.	Keeping a count of the number of data-structure elements
	a.	Keeping a count of the number of data-structure elements
		used by the RCU-protected data structure, including those
		used by the RCU-protected data structure, including
		waiting for a grace period to elapse.  Enforce a limit
		those waiting for a grace period to elapse.  Enforce a
		on this number, stalling updates as needed to allow
		limit on this number, stalling updates as needed to allow
		previously deferred frees to complete.
		previously deferred frees to complete.	Alternatively,

		limit only the number awaiting deferred free rather than
		Alternatively, limit only the number awaiting deferred
		the total number of elements.
		free rather than the total number of elements.

		One way to stall the updates is to acquire the update-side
		mutex.	(Don't try this with a spinlock -- other CPUs
		spinning on the lock could prevent the grace period
		from ever ending.)  Another way to stall the updates
		is for the updates to use a wrapper function around
		the memory allocator, so that this wrapper function
		simulates OOM when there is too much memory awaiting an
		RCU grace period.  There are of course many other
		variations on this theme.


	b.	Limiting update rate.  For example, if updates occur only
	b.	Limiting update rate.  For example, if updates occur only
		once per hour, then no explicit rate limiting is required,
		once per hour, then no explicit rate limiting is required,
@@ -365,3 +374,26 @@ over a rather long period of time, but improvements are always welcome!
	and the compiler to freely reorder code into and out of RCU
	and the compiler to freely reorder code into and out of RCU
	read-side critical sections.  It is the responsibility of the
	read-side critical sections.  It is the responsibility of the
	RCU update-side primitives to deal with this.
	RCU update-side primitives to deal with this.

17.	Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
	the __rcu sparse checks to validate your RCU code.  These
	can help find problems as follows:

	CONFIG_PROVE_RCU: check that accesses to RCU-protected data
		structures are carried out under the proper RCU
		read-side critical section, while holding the right
		combination of locks, or whatever other conditions
		are appropriate.

	CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
		same object to call_rcu() (or friends) before an RCU
		grace period has elapsed since the last time that you
		passed that same object to call_rcu() (or friends).

	__rcu sparse checks: tag the pointer to the RCU-protected data
		structure with __rcu, and sparse will warn you if you
		access that pointer without the services of one of the
		variants of rcu_dereference().

	These debugging aids can help you find problems that are
	otherwise extremely difficult to spot.
+1 −1
Original line number Original line Diff line number Diff line
@@ -28,7 +28,7 @@ struct evdev {
	int minor;
	int minor;
	struct input_handle handle;
	struct input_handle handle;
	wait_queue_head_t wait;
	wait_queue_head_t wait;
	struct evdev_client *grab;
	struct evdev_client __rcu *grab;
	struct list_head client_list;
	struct list_head client_list;
	spinlock_t client_lock; /* protects client_list */
	spinlock_t client_lock; /* protects client_list */
	struct mutex mutex;
	struct mutex mutex;
+12 −4
Original line number Original line Diff line number Diff line
@@ -127,7 +127,10 @@ static void handle_tx(struct vhost_net *net)
	size_t len, total_len = 0;
	size_t len, total_len = 0;
	int err, wmem;
	int err, wmem;
	size_t hdr_size;
	size_t hdr_size;
	struct socket *sock = rcu_dereference(vq->private_data);
	struct socket *sock;

	sock = rcu_dereference_check(vq->private_data,
				     lockdep_is_held(&vq->mutex));
	if (!sock)
	if (!sock)
		return;
		return;


@@ -582,7 +585,10 @@ static void vhost_net_disable_vq(struct vhost_net *n,
static void vhost_net_enable_vq(struct vhost_net *n,
static void vhost_net_enable_vq(struct vhost_net *n,
				struct vhost_virtqueue *vq)
				struct vhost_virtqueue *vq)
{
{
	struct socket *sock = vq->private_data;
	struct socket *sock;

	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
	if (!sock)
	if (!sock)
		return;
		return;
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
@@ -598,7 +604,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
	struct socket *sock;
	struct socket *sock;


	mutex_lock(&vq->mutex);
	mutex_lock(&vq->mutex);
	sock = vq->private_data;
	sock = rcu_dereference_protected(vq->private_data,
					 lockdep_is_held(&vq->mutex));
	vhost_net_disable_vq(n, vq);
	vhost_net_disable_vq(n, vq);
	rcu_assign_pointer(vq->private_data, NULL);
	rcu_assign_pointer(vq->private_data, NULL);
	mutex_unlock(&vq->mutex);
	mutex_unlock(&vq->mutex);
@@ -736,7 +743,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
	}
	}


	/* start polling new socket */
	/* start polling new socket */
	oldsock = vq->private_data;
	oldsock = rcu_dereference_protected(vq->private_data,
					    lockdep_is_held(&vq->mutex));
	if (sock != oldsock) {
	if (sock != oldsock) {
                vhost_net_disable_vq(n, vq);
                vhost_net_disable_vq(n, vq);
                rcu_assign_pointer(vq->private_data, sock);
                rcu_assign_pointer(vq->private_data, sock);
+16 −6
Original line number Original line Diff line number Diff line
@@ -284,7 +284,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
	vhost_dev_cleanup(dev);
	vhost_dev_cleanup(dev);


	memory->nregions = 0;
	memory->nregions = 0;
	dev->memory = memory;
	RCU_INIT_POINTER(dev->memory, memory);
	return 0;
	return 0;
}
}


@@ -316,8 +316,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
		fput(dev->log_file);
		fput(dev->log_file);
	dev->log_file = NULL;
	dev->log_file = NULL;
	/* No one will access memory at this point */
	/* No one will access memory at this point */
	kfree(dev->memory);
	kfree(rcu_dereference_protected(dev->memory,
	dev->memory = NULL;
					lockdep_is_held(&dev->mutex)));
	RCU_INIT_POINTER(dev->memory, NULL);
	if (dev->mm)
	if (dev->mm)
		mmput(dev->mm);
		mmput(dev->mm);
	dev->mm = NULL;
	dev->mm = NULL;
@@ -401,14 +402,22 @@ static int vq_access_ok(unsigned int num,
/* Caller should have device mutex but not vq mutex */
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
int vhost_log_access_ok(struct vhost_dev *dev)
{
{
	return memory_access_ok(dev, dev->memory, 1);
	struct vhost_memory *mp;

	mp = rcu_dereference_protected(dev->memory,
				       lockdep_is_held(&dev->mutex));
	return memory_access_ok(dev, mp, 1);
}
}


/* Verify access for write logging. */
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
/* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
{
{
	return vq_memory_access_ok(log_base, vq->dev->memory,
	struct vhost_memory *mp;

	mp = rcu_dereference_protected(vq->dev->memory,
				       lockdep_is_held(&vq->mutex));
	return vq_memory_access_ok(log_base, mp,
			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
					sizeof *vq->used +
					sizeof *vq->used +
@@ -448,7 +457,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
		kfree(newmem);
		kfree(newmem);
		return -EFAULT;
		return -EFAULT;
	}
	}
	oldmem = d->memory;
	oldmem = rcu_dereference_protected(d->memory,
					   lockdep_is_held(&d->mutex));
	rcu_assign_pointer(d->memory, newmem);
	rcu_assign_pointer(d->memory, newmem);
	synchronize_rcu();
	synchronize_rcu();
	kfree(oldmem);
	kfree(oldmem);
Loading