Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b922df73 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits)
  rcu: RCU-based detection of stalled CPUs for Classic RCU, fix
  rcu: RCU-based detection of stalled CPUs for Classic RCU
  rcu: add rcu_read_lock_sched() / rcu_read_unlock_sched()
  rcu: fix sparse shadowed variable warning
  doc/RCU: fix pseudocode in rcuref.txt
  rcuclassic: fix compiler warning
  rcu: use irq-safe locks
  rcuclassic: fix compilation NG
  rcu: fix locking cleanup fallout
  rcu: remove redundant ACCESS_ONCE definition from rcupreempt.c
  rcu: fix classic RCU locking cleanup lockdep problem
  rcu: trace fix possible mem-leak
  rcu: just rename call_rcu_bh instead of making it a macro
  rcu: remove list_for_each_rcu()
  rcu: fixes to include/linux/rcupreempt.h
  rcu: classic RCU locking and memory-barrier cleanups
  rcu: prevent console flood when one CPU sees another AWOL via RCU
  rcu, debug: detect stalled grace periods, cleanups
  rcu, debug: detect stalled grace periods
  rcu classic: new algorithm for callbacks-processing(v2)
  ...
parents c54dcd8e cdbb92b3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -210,7 +210,7 @@ over a rather long period of time, but improvements are always welcome!
		number of updates per grace period.

9.	All RCU list-traversal primitives, which include
	rcu_dereference(), list_for_each_rcu(), list_for_each_entry_rcu(),
	rcu_dereference(), list_for_each_entry_rcu(),
	list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
	must be either within an RCU read-side critical section or
	must be protected by appropriate update-side locks.  RCU
+8 −8
Original line number Diff line number Diff line
@@ -29,9 +29,9 @@ release_referenced() delete()
					}

If this list/array is made lock free using RCU as in changing the
write_lock() in add() and delete() to spin_lock and changing read_lock
in search_and_reference to rcu_read_lock(), the atomic_get in
search_and_reference could potentially hold reference to an element which
write_lock() in add() and delete() to spin_lock() and changing read_lock()
in search_and_reference() to rcu_read_lock(), the atomic_inc() in
search_and_reference() could potentially hold reference to an element which
has already been deleted from the list/array.  Use atomic_inc_not_zero()
in this scenario as follows:

@@ -40,20 +40,20 @@ add() search_and_reference()
{					{
    alloc_object			    rcu_read_lock();
    ...					    search_for_element
    atomic_set(&el->rc, 1);		    if (atomic_inc_not_zero(&el->rc)) {
    write_lock(&list_lock);		        rcu_read_unlock();
    atomic_set(&el->rc, 1);		    if (!atomic_inc_not_zero(&el->rc)) {
    spin_lock(&list_lock);		        rcu_read_unlock();
					        return FAIL;
    add_element				    }
    ...					    ...
    write_unlock(&list_lock);		    rcu_read_unlock();
    spin_unlock(&list_lock);		    rcu_read_unlock();
}					}
3.					4.
release_referenced()			delete()
{					{
    ...					    write_lock(&list_lock);
    ...					    spin_lock(&list_lock);
    if (atomic_dec_and_test(&el->rc))       ...
        call_rcu(&el->head, el_free);       delete_element
    ...                                     write_unlock(&list_lock);
    ...                                     spin_unlock(&list_lock);
} 					    ...
					    if (atomic_dec_and_test(&el->rc))
					        call_rcu(&el->head, el_free);
+0 −2
Original line number Diff line number Diff line
@@ -786,8 +786,6 @@ RCU pointer/list traversal:
	list_for_each_entry_rcu
	hlist_for_each_entry_rcu

	list_for_each_rcu		(to be deprecated in favor of
					 list_for_each_entry_rcu)
	list_for_each_continue_rcu	(to be deprecated in favor of new
					 list_for_each_entry_continue_rcu)

+3 −1
Original line number Diff line number Diff line
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 * ACCESS_ONCE() in different C statements.
 *
 * This macro does absolutely -nothing- to prevent the CPU from reordering,
 * merging, or refetching absolutely anything at any time.
 * merging, or refetching absolutely anything at any time.  Its main intended
 * use is to mediate communication between process-level code and irq/NMI
 * handlers, all running on the same CPU.
 */
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

+27 −10
Original line number Diff line number Diff line
@@ -40,12 +40,21 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>

#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
#define RCU_SECONDS_TILL_STALL_CHECK	( 3 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK	(30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */

/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
	long	cur;		/* Current batch number.                      */
	long	completed;	/* Number of the last completed batch         */
	int	next_pending;	/* Is the next batch already waiting?         */
	long	pending;	/* Number of the last pending batch           */
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
	unsigned long gp_start;	/* Time at which GP started in jiffies. */
	unsigned long jiffies_stall;
				/* Time at which to check for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */

	int	signaled;

@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
	return (a - b) > 0;
}

/*
 * Per-CPU data for Read-Copy UPdate.
 * nxtlist - new callbacks are added here
 * curlist - current batch for which quiescent cycle started if any
 */
/* Per-CPU data for Read-Copy UPdate. */
struct rcu_data {
	/* 1) quiescent state handling : */
	long		quiescbatch;     /* Batch # for grace period */
@@ -78,12 +83,24 @@ struct rcu_data {
	int		qs_pending;	 /* core waits for quiesc state */

	/* 2) batch handling */
	long  	       	batch;           /* Batch # for current RCU batch */
	/*
	 * if nxtlist is not NULL, then:
	 * batch:
	 *	The batch # for the last entry of nxtlist
	 * [*nxttail[1], NULL = *nxttail[2]):
	 *	Entries that batch # <= batch
	 * [*nxttail[0], *nxttail[1]):
	 *	Entries that batch # <= batch - 1
	 * [nxtlist, *nxttail[0]):
	 *	Entries that batch # <= batch - 2
	 *	The grace period for these entries has completed, and
	 *	the other grace-period-completed entries may be moved
	 *	here temporarily in rcu_process_callbacks().
	 */
	long  	       	batch;
	struct rcu_head *nxtlist;
	struct rcu_head **nxttail;
	struct rcu_head **nxttail[3];
	long            qlen; 	 	 /* # of queued callbacks */
	struct rcu_head *curlist;
	struct rcu_head **curtail;
	struct rcu_head *donelist;
	struct rcu_head **donetail;
	long		blimit;		 /* Upper limit on a processed batch */
Loading