Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b6e511e authored by Cliff Wickman's avatar Cliff Wickman Committed by Ingo Molnar
Browse files

x86/uv: Work around UV2 BAU hangs



On SGI's UV2 the BAU (Broadcast Assist Unit) driver can hang
under a heavy load. To cure this:

- Disable the UV2 extended status mode (see UV2_EXT_SHFT), as
  this mode changes BAU behavior in more ways then just delivering
  an extra bit of status.  Revert status to just two meaningful bits,
  like UV1.

- Use no IPI-style resets on UV2.  Just give up the request for
  whatever the reason it failed and let it be accomplished with
  the legacy IPI method.

- Use no alternate sending descriptor (the former UV2 workaround
  bcp->using_desc and handle_uv2_busy() stuff).  Just disable the
  use of the BAU for a period of time in favor of the legacy IPI
  method when the h/w bug leaves a descriptor busy.

  -- new tunable: giveup_limit determines the threshold at which a hub is
     so plugged that it should do all requests with the legacy IPI method for a
     period of time
  -- generalize disable_for_congestion() (renamed disable_for_period()) for
     use whenever a hub should avoid using the BAU for a period of time

Also:

 - Fix find_another_by_swack(), which is part of the UV2 bug workaround

 - Correct and clarify the statistics (new stats s_overipilimit, s_giveuplimit,
   s_enters, s_ipifordisabled, s_plugged, s_congested)

Signed-off-by: default avatarCliff Wickman <cpw@sgi.com>
Link: http://lkml.kernel.org/r/20120622131459.GC31884@sgi.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 26ef8577
Loading
Loading
Loading
Loading
+18 −10
Original line number Original line Diff line number Diff line
@@ -140,6 +140,9 @@
#define IPI_RESET_LIMIT			1
#define IPI_RESET_LIMIT			1
/* after this # consecutive successes, bump up the throttle if it was lowered */
/* after this # consecutive successes, bump up the throttle if it was lowered */
#define COMPLETE_THRESHOLD		5
#define COMPLETE_THRESHOLD		5
/* after this # of giveups (fall back to kernel IPI's) disable the use of
   the BAU for a period of time */
#define GIVEUP_LIMIT			100


#define UV_LB_SUBNODEID			0x10
#define UV_LB_SUBNODEID			0x10


@@ -166,7 +169,6 @@
#define FLUSH_RETRY_TIMEOUT		2
#define FLUSH_RETRY_TIMEOUT		2
#define FLUSH_GIVEUP			3
#define FLUSH_GIVEUP			3
#define FLUSH_COMPLETE			4
#define FLUSH_COMPLETE			4
#define FLUSH_RETRY_BUSYBUG		5


/*
/*
 * tuning the action when the numalink network is extremely delayed
 * tuning the action when the numalink network is extremely delayed
@@ -175,7 +177,7 @@
						   microseconds */
						   microseconds */
#define CONGESTED_REPS			10	/* long delays averaged over
#define CONGESTED_REPS			10	/* long delays averaged over
						   this many broadcasts */
						   this many broadcasts */
#define CONGESTED_PERIOD		30	/* time for the bau to be
#define DISABLED_PERIOD			10	/* time for the bau to be
						   disabled, in seconds */
						   disabled, in seconds */
/* see msg_type: */
/* see msg_type: */
#define MSG_NOOP			0
#define MSG_NOOP			0
@@ -520,7 +522,12 @@ struct ptc_stats {
	unsigned long	s_uv2_wars;		/* uv2 workaround, perm. busy */
	unsigned long	s_uv2_wars;		/* uv2 workaround, perm. busy */
	unsigned long	s_uv2_wars_hw;		/* uv2 workaround, hiwater */
	unsigned long	s_uv2_wars_hw;		/* uv2 workaround, hiwater */
	unsigned long	s_uv2_war_waits;	/* uv2 workaround, long waits */
	unsigned long	s_uv2_war_waits;	/* uv2 workaround, long waits */
	unsigned long	s_overipilimit;		/* over the ipi reset limit */
	unsigned long	s_giveuplimit;		/* disables, over giveup limit*/
	unsigned long	s_enters;		/* entries to the driver */
	unsigned long	s_enters;		/* entries to the driver */
	unsigned long	s_ipifordisabled;	/* fall back to IPI; disabled */
	unsigned long	s_plugged;		/* plugged by h/w bug*/
	unsigned long	s_congested;		/* giveup on long wait */
	/* destination statistics */
	/* destination statistics */
	unsigned long	d_alltlb;		/* times all tlb's on this
	unsigned long	d_alltlb;		/* times all tlb's on this
						   cpu were flushed */
						   cpu were flushed */
@@ -588,8 +595,7 @@ struct bau_control {
	int			ipi_attempts;
	int			ipi_attempts;
	int			conseccompletes;
	int			conseccompletes;
	short			nobau;
	short			nobau;
	int			baudisabled;
	short			baudisabled;
	int			set_bau_off;
	short			cpu;
	short			cpu;
	short			osnode;
	short			osnode;
	short			uvhub_cpu;
	short			uvhub_cpu;
@@ -598,14 +604,16 @@ struct bau_control {
	short			cpus_in_socket;
	short			cpus_in_socket;
	short			cpus_in_uvhub;
	short			cpus_in_uvhub;
	short			partition_base_pnode;
	short			partition_base_pnode;
	short			using_desc; /* an index, like uvhub_cpu */
	short			busy;       /* all were busy (war) */
	unsigned int		inuse_map;
	unsigned short		message_number;
	unsigned short		message_number;
	unsigned short		uvhub_quiesce;
	unsigned short		uvhub_quiesce;
	short			socket_acknowledge_count[DEST_Q_SIZE];
	short			socket_acknowledge_count[DEST_Q_SIZE];
	cycles_t		send_message;
	cycles_t		send_message;
	cycles_t		period_end;
	cycles_t		period_time;
	spinlock_t		uvhub_lock;
	spinlock_t		uvhub_lock;
	spinlock_t		queue_lock;
	spinlock_t		queue_lock;
	spinlock_t		disable_lock;
	/* tunables */
	/* tunables */
	int			max_concurr;
	int			max_concurr;
	int			max_concurr_const;
	int			max_concurr_const;
@@ -616,9 +624,9 @@ struct bau_control {
	int			complete_threshold;
	int			complete_threshold;
	int			cong_response_us;
	int			cong_response_us;
	int			cong_reps;
	int			cong_reps;
	int			cong_period;
	cycles_t		disabled_period;
	unsigned long		clocks_per_100_usec;
	int			period_giveups;
	cycles_t		period_time;
	int			giveup_limit;
	long			period_requests;
	long			period_requests;
	struct hub_and_pnode	*thp;
	struct hub_and_pnode	*thp;
};
};
+182 −205
Original line number Original line Diff line number Diff line
/*
/*
 *	SGI UltraViolet TLB flush routines.
 *	SGI UltraViolet TLB flush routines.
 *
 *
 *	(c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
 *	(c) 2008-2012 Cliff Wickman <cpw@sgi.com>, SGI.
 *
 *
 *	This code is released under the GNU General Public License version 2 or
 *	This code is released under the GNU General Public License version 2 or
 *	later.
 *	later.
@@ -39,8 +39,6 @@ static int timeout_base_ns[] = {
static int timeout_us;
static int timeout_us;
static int nobau;
static int nobau;
static int nobau_perm;
static int nobau_perm;
static int baudisabled;
static spinlock_t disable_lock;
static cycles_t congested_cycles;
static cycles_t congested_cycles;


/* tunables: */
/* tunables: */
@@ -48,12 +46,13 @@ static int max_concurr = MAX_BAU_CONCURRENT;
static int max_concurr_const	= MAX_BAU_CONCURRENT;
static int max_concurr_const	= MAX_BAU_CONCURRENT;
static int plugged_delay	= PLUGGED_DELAY;
static int plugged_delay	= PLUGGED_DELAY;
static int plugsb4reset		= PLUGSB4RESET;
static int plugsb4reset		= PLUGSB4RESET;
static int giveup_limit		= GIVEUP_LIMIT;
static int timeoutsb4reset	= TIMEOUTSB4RESET;
static int timeoutsb4reset	= TIMEOUTSB4RESET;
static int ipi_reset_limit	= IPI_RESET_LIMIT;
static int ipi_reset_limit	= IPI_RESET_LIMIT;
static int complete_threshold	= COMPLETE_THRESHOLD;
static int complete_threshold	= COMPLETE_THRESHOLD;
static int congested_respns_us	= CONGESTED_RESPONSE_US;
static int congested_respns_us	= CONGESTED_RESPONSE_US;
static int congested_reps	= CONGESTED_REPS;
static int congested_reps	= CONGESTED_REPS;
static int congested_period	= CONGESTED_PERIOD;
static int disabled_period	= DISABLED_PERIOD;


static struct tunables tunables[] = {
static struct tunables tunables[] = {
	{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
	{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
@@ -64,7 +63,8 @@ static struct tunables tunables[] = {
	{&complete_threshold, COMPLETE_THRESHOLD},
	{&complete_threshold, COMPLETE_THRESHOLD},
	{&congested_respns_us, CONGESTED_RESPONSE_US},
	{&congested_respns_us, CONGESTED_RESPONSE_US},
	{&congested_reps, CONGESTED_REPS},
	{&congested_reps, CONGESTED_REPS},
	{&congested_period, CONGESTED_PERIOD}
	{&disabled_period, DISABLED_PERIOD},
	{&giveup_limit, GIVEUP_LIMIT}
};
};


static struct dentry *tunables_dir;
static struct dentry *tunables_dir;
@@ -313,7 +313,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
		 * Both sockets dump their completed count total into
		 * Both sockets dump their completed count total into
		 * the message's count.
		 * the message's count.
		 */
		 */
		smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
		*sp = 0;
		asp = (struct atomic_short *)&msg->acknowledge_count;
		asp = (struct atomic_short *)&msg->acknowledge_count;
		msg_ack_count = atom_asr(socket_ack_count, asp);
		msg_ack_count = atom_asr(socket_ack_count, asp);


@@ -526,16 +526,15 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
}
}


/*
/*
 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
 * But not currently used.
 */
 */
static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
{
{
	unsigned long descriptor_status;
	unsigned long descriptor_status;
	unsigned long descriptor_status2;


	descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
	descriptor_status =
	descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
		((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
	descriptor_status = (descriptor_status << 1) | descriptor_status2;
	return descriptor_status;
	return descriptor_status;
}
}


@@ -566,87 +565,11 @@ int normal_busy(struct bau_control *bcp)
 */
 */
int handle_uv2_busy(struct bau_control *bcp)
int handle_uv2_busy(struct bau_control *bcp)
{
{
	int busy_one = bcp->using_desc;
	int normal = bcp->uvhub_cpu;
	int selected = -1;
	int i;
	unsigned long descriptor_status;
	unsigned long status;
	int mmr_offset;
	struct bau_desc *bau_desc_old;
	struct bau_desc *bau_desc_new;
	struct bau_control *hmaster = bcp->uvhub_master;
	struct ptc_stats *stat = bcp->statp;
	struct ptc_stats *stat = bcp->statp;
	cycles_t ttm;


	stat->s_uv2_wars++;
	stat->s_uv2_wars++;
	spin_lock(&hmaster->uvhub_lock);
	bcp->busy = 1;
	/* try for the original first */
	return FLUSH_GIVEUP;
	if (busy_one != normal) {
		if (!normal_busy(bcp))
			selected = normal;
	}
	if (selected < 0) {
		/* can't use the normal, select an alternate */
		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
		descriptor_status = read_lmmr(mmr_offset);

		/* scan available descriptors 32-63 */
		for (i = 0; i < UV_CPUS_PER_AS; i++) {
			if ((hmaster->inuse_map & (1 << i)) == 0) {
				status = ((descriptor_status >>
						(i * UV_ACT_STATUS_SIZE)) &
						UV_ACT_STATUS_MASK) << 1;
				if (status != UV2H_DESC_BUSY) {
					selected = i + UV_CPUS_PER_AS;
					break;
				}
			}
		}
	}

	if (busy_one != normal)
		/* mark the busy alternate as not in-use */
		hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));

	if (selected >= 0) {
		/* switch to the selected descriptor */
		if (selected != normal) {
			/* set the selected alternate as in-use */
			hmaster->inuse_map |=
					(1 << (selected - UV_CPUS_PER_AS));
			if (selected > stat->s_uv2_wars_hw)
				stat->s_uv2_wars_hw = selected;
		}
		bau_desc_old = bcp->descriptor_base;
		bau_desc_old += (ITEMS_PER_DESC * busy_one);
		bcp->using_desc = selected;
		bau_desc_new = bcp->descriptor_base;
		bau_desc_new += (ITEMS_PER_DESC * selected);
		*bau_desc_new = *bau_desc_old;
	} else {
		/*
		 * All are busy. Wait for the normal one for this cpu to
		 * free up.
		 */
		stat->s_uv2_war_waits++;
		spin_unlock(&hmaster->uvhub_lock);
		ttm = get_cycles();
		do {
			cpu_relax();
		} while (normal_busy(bcp));
		spin_lock(&hmaster->uvhub_lock);
		/* switch to the original descriptor */
		bcp->using_desc = normal;
		bau_desc_old = bcp->descriptor_base;
		bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
		bcp->using_desc = (ITEMS_PER_DESC * normal);
		bau_desc_new = bcp->descriptor_base;
		bau_desc_new += (ITEMS_PER_DESC * normal);
		*bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
	}
	spin_unlock(&hmaster->uvhub_lock);
	return FLUSH_RETRY_BUSYBUG;
}
}


static int uv2_wait_completion(struct bau_desc *bau_desc,
static int uv2_wait_completion(struct bau_desc *bau_desc,
@@ -655,7 +578,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
{
{
	unsigned long descriptor_stat;
	unsigned long descriptor_stat;
	cycles_t ttm;
	cycles_t ttm;
	int desc = bcp->using_desc;
	int desc = bcp->uvhub_cpu;
	long busy_reps = 0;
	long busy_reps = 0;
	struct ptc_stats *stat = bcp->statp;
	struct ptc_stats *stat = bcp->statp;


@@ -663,24 +586,38 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,


	/* spin on the status MMR, waiting for it to go idle */
	/* spin on the status MMR, waiting for it to go idle */
	while (descriptor_stat != UV2H_DESC_IDLE) {
	while (descriptor_stat != UV2H_DESC_IDLE) {
		if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
			/*
			/*
		 * Our software ack messages may be blocked because
			 * A h/w bug on the destination side may
		 * there are no swack resources available.  As long
			 * have prevented the message being marked
		 * as none of them has timed out hardware will NACK
			 * pending, thus it doesn't get replied to
		 * our message and its state will stay IDLE.
			 * and gets continually nacked until it times
			 * out with a SOURCE_TIMEOUT.
			 */
			 */
		if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
		    (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
			stat->s_stimeout++;
			stat->s_stimeout++;
			return FLUSH_GIVEUP;
			return FLUSH_GIVEUP;
		} else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
		} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
			stat->s_strongnacks++;
			ttm = get_cycles();

			/*
			 * Our retries may be blocked by all destination
			 * swack resources being consumed, and a timeout
			 * pending.  In that case hardware returns the
			 * ERROR that looks like a destination timeout.
			 * Without using the extended status we have to
			 * deduce from the short time that this was a
			 * strong nack.
			 */
			if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
				bcp->conseccompletes = 0;
				bcp->conseccompletes = 0;
				stat->s_plugged++;
				/* FLUSH_RETRY_PLUGGED causes hang on boot */
				return FLUSH_GIVEUP;
				return FLUSH_GIVEUP;
		} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
			}
			stat->s_dtimeout++;
			stat->s_dtimeout++;
			bcp->conseccompletes = 0;
			bcp->conseccompletes = 0;
			return FLUSH_RETRY_TIMEOUT;
			/* FLUSH_RETRY_TIMEOUT causes hang on boot */
			return FLUSH_GIVEUP;
		} else {
		} else {
			busy_reps++;
			busy_reps++;
			if (busy_reps > 1000000) {
			if (busy_reps > 1000000) {
@@ -688,10 +625,9 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
				busy_reps = 0;
				busy_reps = 0;
				ttm = get_cycles();
				ttm = get_cycles();
				if ((ttm - bcp->send_message) >
				if ((ttm - bcp->send_message) >
					(bcp->clocks_per_100_usec)) {
						bcp->timeout_interval)
					return handle_uv2_busy(bcp);
					return handle_uv2_busy(bcp);
			}
			}
			}
			/*
			/*
			 * descriptor_stat is still BUSY
			 * descriptor_stat is still BUSY
			 */
			 */
@@ -714,7 +650,7 @@ static int wait_completion(struct bau_desc *bau_desc,
{
{
	int right_shift;
	int right_shift;
	unsigned long mmr_offset;
	unsigned long mmr_offset;
	int desc = bcp->using_desc;
	int desc = bcp->uvhub_cpu;


	if (desc < UV_CPUS_PER_AS) {
	if (desc < UV_CPUS_PER_AS) {
		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -793,33 +729,31 @@ static void destination_timeout(struct bau_desc *bau_desc,
}
}


/*
/*
 * Completions are taking a very long time due to a congested numalink
 * Stop all cpus on a uvhub from using the BAU for a period of time.
 * network.
 * This is reversed by check_enable.
 */
 */
static void disable_for_congestion(struct bau_control *bcp,
static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
					struct ptc_stats *stat)
{
{
	/* let only one cpu do this disabling */
	spin_lock(&disable_lock);

	if (!baudisabled && bcp->period_requests &&
	    ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
	int tcpu;
	int tcpu;
	struct bau_control *tbcp;
	struct bau_control *tbcp;
		/* it becomes this cpu's job to turn on the use of the
	struct bau_control *hmaster;
		   BAU again */
	cycles_t tm1;
		baudisabled = 1;

		bcp->set_bau_off = 1;
	hmaster = bcp->uvhub_master;
		bcp->set_bau_on_time = get_cycles();
	spin_lock(&hmaster->disable_lock);
		bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
	if (!bcp->baudisabled) {
		stat->s_bau_disabled++;
		stat->s_bau_disabled++;
		tm1 = get_cycles();
		for_each_present_cpu(tcpu) {
		for_each_present_cpu(tcpu) {
			tbcp = &per_cpu(bau_control, tcpu);
			tbcp = &per_cpu(bau_control, tcpu);
			if (tbcp->uvhub_master == hmaster) {
				tbcp->baudisabled = 1;
				tbcp->baudisabled = 1;
				tbcp->set_bau_on_time =
					tm1 + bcp->disabled_period;
			}
			}
		}
		}

	}
	spin_unlock(&disable_lock);
	spin_unlock(&hmaster->disable_lock);
}
}


static void count_max_concurr(int stat, struct bau_control *bcp,
static void count_max_concurr(int stat, struct bau_control *bcp,
@@ -850,16 +784,30 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
			bcp->period_requests++;
			bcp->period_requests++;
			bcp->period_time += elapsed;
			bcp->period_time += elapsed;
			if ((elapsed > congested_cycles) &&
			if ((elapsed > congested_cycles) &&
			    (bcp->period_requests > bcp->cong_reps))
			    (bcp->period_requests > bcp->cong_reps) &&
				disable_for_congestion(bcp, stat);
			    ((bcp->period_time / bcp->period_requests) >
							congested_cycles)) {
				stat->s_congested++;
				disable_for_period(bcp, stat);
			}
		}
		}
	} else
	} else
		stat->s_requestor--;
		stat->s_requestor--;


	if (completion_status == FLUSH_COMPLETE && try > 1)
	if (completion_status == FLUSH_COMPLETE && try > 1)
		stat->s_retriesok++;
		stat->s_retriesok++;
	else if (completion_status == FLUSH_GIVEUP)
	else if (completion_status == FLUSH_GIVEUP) {
		stat->s_giveup++;
		stat->s_giveup++;
		if (get_cycles() > bcp->period_end)
			bcp->period_giveups = 0;
		bcp->period_giveups++;
		if (bcp->period_giveups == 1)
			bcp->period_end = get_cycles() + bcp->disabled_period;
		if (bcp->period_giveups > bcp->giveup_limit) {
			disable_for_period(bcp, stat);
			stat->s_giveuplimit++;
		}
	}
}
}


/*
/*
@@ -903,7 +851,8 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
 * Returns 1 if it gives up entirely and the original cpu mask is to be
 * Returns 1 if it gives up entirely and the original cpu mask is to be
 * returned to the kernel.
 * returned to the kernel.
 */
 */
int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
	struct bau_desc *bau_desc)
{
{
	int seq_number = 0;
	int seq_number = 0;
	int completion_stat = 0;
	int completion_stat = 0;
@@ -916,24 +865,23 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
	struct bau_control *hmaster = bcp->uvhub_master;
	struct bau_control *hmaster = bcp->uvhub_master;
	struct uv1_bau_msg_header *uv1_hdr = NULL;
	struct uv1_bau_msg_header *uv1_hdr = NULL;
	struct uv2_bau_msg_header *uv2_hdr = NULL;
	struct uv2_bau_msg_header *uv2_hdr = NULL;
	struct bau_desc *bau_desc;


	if (bcp->uvhub_version == 1)
	if (bcp->uvhub_version == 1) {
		uv1 = 1;
		uv1_throttle(hmaster, stat);
		uv1_throttle(hmaster, stat);
	}


	while (hmaster->uvhub_quiesce)
	while (hmaster->uvhub_quiesce)
		cpu_relax();
		cpu_relax();


	time1 = get_cycles();
	time1 = get_cycles();
	do {
	if (uv1)
		bau_desc = bcp->descriptor_base;
		bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
		if (bcp->uvhub_version == 1) {
			uv1 = 1;
		uv1_hdr = &bau_desc->header.uv1_hdr;
		uv1_hdr = &bau_desc->header.uv1_hdr;
		} else
	else
		uv2_hdr = &bau_desc->header.uv2_hdr;
		uv2_hdr = &bau_desc->header.uv2_hdr;
		if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {

	do {
		if (try == 0) {
			if (uv1)
			if (uv1)
				uv1_hdr->msg_type = MSG_REGULAR;
				uv1_hdr->msg_type = MSG_REGULAR;
			else
			else
@@ -951,25 +899,24 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
			uv1_hdr->sequence = seq_number;
			uv1_hdr->sequence = seq_number;
		else
		else
			uv2_hdr->sequence = seq_number;
			uv2_hdr->sequence = seq_number;
		index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
		index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
		bcp->send_message = get_cycles();
		bcp->send_message = get_cycles();


		write_mmr_activation(index);
		write_mmr_activation(index);


		try++;
		try++;
		completion_stat = wait_completion(bau_desc, bcp, try);
		completion_stat = wait_completion(bau_desc, bcp, try);
		/* UV2: wait_completion() may change the bcp->using_desc */


		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);


		if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
		if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
			bcp->ipi_attempts = 0;
			bcp->ipi_attempts = 0;
			stat->s_overipilimit++;
			completion_stat = FLUSH_GIVEUP;
			completion_stat = FLUSH_GIVEUP;
			break;
			break;
		}
		}
		cpu_relax();
		cpu_relax();
	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
		 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
		 (completion_stat == FLUSH_RETRY_TIMEOUT));
		 (completion_stat == FLUSH_RETRY_TIMEOUT));


	time2 = get_cycles();
	time2 = get_cycles();
@@ -990,28 +937,33 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
}
}


/*
/*
 * The BAU is disabled. When the disabled time period has expired, the cpu
 * The BAU is disabled for this uvhub. When the disabled time period has
 * that disabled it must re-enable it.
 * expired re-enable it.
 * Return 0 if it is re-enabled for all cpus.
 * Return 0 if it is re-enabled for all cpus on this uvhub.
 */
 */
static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
{
{
	int tcpu;
	int tcpu;
	struct bau_control *tbcp;
	struct bau_control *tbcp;
	struct bau_control *hmaster;


	if (bcp->set_bau_off) {
	hmaster = bcp->uvhub_master;
		if (get_cycles() >= bcp->set_bau_on_time) {
	spin_lock(&hmaster->disable_lock);
	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
		stat->s_bau_reenabled++;
		stat->s_bau_reenabled++;
			baudisabled = 0;
		for_each_present_cpu(tcpu) {
		for_each_present_cpu(tcpu) {
			tbcp = &per_cpu(bau_control, tcpu);
			tbcp = &per_cpu(bau_control, tcpu);
			if (tbcp->uvhub_master == hmaster) {
				tbcp->baudisabled = 0;
				tbcp->baudisabled = 0;
				tbcp->period_requests = 0;
				tbcp->period_requests = 0;
				tbcp->period_time = 0;
				tbcp->period_time = 0;
				tbcp->period_giveups = 0;
			}
			}
			return 0;
		}
		}
		spin_unlock(&hmaster->disable_lock);
		return 0;
	}
	}
	spin_unlock(&hmaster->disable_lock);
	return -1;
	return -1;
}
}


@@ -1113,6 +1065,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
	struct cpumask *flush_mask;
	struct cpumask *flush_mask;
	struct ptc_stats *stat;
	struct ptc_stats *stat;
	struct bau_control *bcp;
	struct bau_control *bcp;
	unsigned long descriptor_status;
	unsigned long status;


	bcp = &per_cpu(bau_control, cpu);
	bcp = &per_cpu(bau_control, cpu);
	stat = bcp->statp;
	stat = bcp->statp;
@@ -1121,11 +1075,23 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
	if (bcp->nobau)
	if (bcp->nobau)
		return cpumask;
		return cpumask;


	if (bcp->busy) {
		descriptor_status =
			read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
		status = ((descriptor_status >> (bcp->uvhub_cpu *
			UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
		if (status == UV2H_DESC_BUSY)
			return cpumask;
		bcp->busy = 0;
	}

	/* bau was disabled due to slow response */
	/* bau was disabled due to slow response */
	if (bcp->baudisabled) {
	if (bcp->baudisabled) {
		if (check_enable(bcp, stat))
		if (check_enable(bcp, stat)) {
			stat->s_ipifordisabled++;
			return cpumask;
			return cpumask;
		}
		}
	}


	/*
	/*
	 * Each sending cpu has a per-cpu mask which it fills from the caller's
	 * Each sending cpu has a per-cpu mask which it fills from the caller's
@@ -1140,7 +1106,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
		stat->s_ntargself++;
		stat->s_ntargself++;


	bau_desc = bcp->descriptor_base;
	bau_desc = bcp->descriptor_base;
	bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
	bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
		return NULL;
		return NULL;
@@ -1153,25 +1119,27 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
	 * or 1 if it gave up and the original cpumask should be returned.
	 * or 1 if it gave up and the original cpumask should be returned.
	 */
	 */
	if (!uv_flush_send_and_wait(flush_mask, bcp))
	if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
		return NULL;
		return NULL;
	else
	else
		return cpumask;
		return cpumask;
}
}


/*
/*
 * Search the message queue for any 'other' message with the same software
 * Search the message queue for any 'other' unprocessed message with the
 * acknowledge resource bit vector.
 * same software acknowledge resource bit vector as the 'msg' message.
 */
 */
struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
			struct bau_control *bcp, unsigned char swack_vec)
					   struct bau_control *bcp)
{
{
	struct bau_pq_entry *msg_next = msg + 1;
	struct bau_pq_entry *msg_next = msg + 1;
	unsigned char swack_vec = msg->swack_vec;


	if (msg_next > bcp->queue_last)
	if (msg_next > bcp->queue_last)
		msg_next = bcp->queue_first;
		msg_next = bcp->queue_first;
	while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
	while (msg_next != msg) {
		if (msg_next->swack_vec == swack_vec)
		if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
				(msg_next->swack_vec == swack_vec))
			return msg_next;
			return msg_next;
		msg_next++;
		msg_next++;
		if (msg_next > bcp->queue_last)
		if (msg_next > bcp->queue_last)
@@ -1200,32 +1168,30 @@ void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
		 * This message was assigned a swack resource, but no
		 * This message was assigned a swack resource, but no
		 * reserved acknowlegment is pending.
		 * reserved acknowlegment is pending.
		 * The bug has prevented this message from setting the MMR.
		 * The bug has prevented this message from setting the MMR.
		 * And no other message has used the same sw_ack resource.
		 * Do the requested shootdown but do not reply to the msg.
		 * (the 0 means make no acknowledge)
		 */
		 */
		bau_process_message(mdp, bcp, 0);
		return;
	}

		/*
		/*
	 * Some message has set the MMR 'pending' bit; it might have been
		 * Some message has set the MMR 'pending' bit; it might have
	 * another message.  Look for that message.
		 * been another message.  Look for that message.
		 */
		 */
	other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
		other_msg = find_another_by_swack(msg, bcp);
		if (other_msg) {
		if (other_msg) {
		/* There is another.  Do not ack the current one. */
			/*
			 * There is another. Process this one but do not
			 * ack it.
			 */
			bau_process_message(mdp, bcp, 0);
			bau_process_message(mdp, bcp, 0);
			/*
			/*
		 * Let the natural processing of that message acknowledge
			 * Let the natural processing of that other message
		 * it. Don't get the processing of sw_ack's out of order.
			 * acknowledge it. Don't get the processing of sw_ack's
			 * out of order.
			 */
			 */
			return;
			return;
		}
		}
	}


	/*
	/*
	 * There is no other message using this sw_ack, so it is safe to
	 * Either the MMR shows this one pending a reply or there is no
	 * acknowledge it.
	 * other message using this sw_ack, so it is safe to acknowledge it.
	 */
	 */
	bau_process_message(mdp, bcp, 1);
	bau_process_message(mdp, bcp, 1);


@@ -1330,7 +1296,8 @@ static void __init enable_timeouts(void)
		 */
		 */
		mmr_image |= (1L << SOFTACK_MSHIFT);
		mmr_image |= (1L << SOFTACK_MSHIFT);
		if (is_uv2_hub()) {
		if (is_uv2_hub()) {
			mmr_image |= (1L << UV2_EXT_SHFT);
			/* hw bug workaround; do not use extended status */
			mmr_image &= ~(1L << UV2_EXT_SHFT);
		}
		}
		write_mmr_misc_control(pnode, mmr_image);
		write_mmr_misc_control(pnode, mmr_image);
	}
	}
@@ -1383,13 +1350,15 @@ static int ptc_seq_show(struct seq_file *file, void *data)
		seq_printf(file,
		seq_printf(file,
			"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
			"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
		seq_printf(file,
		seq_printf(file,
		    "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
			"numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
		seq_printf(file,
		seq_printf(file,
			"resetp resett giveup sto bz throt enters swack recv rtime ");
			"rok resetp resett giveup sto bz throt disable ");
		seq_printf(file,
		seq_printf(file,
			"all one mult none retry canc nocan reset rcan ");
			"enable wars warshw warwaits enters ipidis plugged ");
		seq_printf(file,
		seq_printf(file,
			"disable enable wars warshw warwaits\n");
			"ipiover glim cong swack recv rtime all one mult ");
		seq_printf(file,
			"none retry canc nocan reset rcan\n");
	}
	}
	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
		bcp = &per_cpu(bau_control, cpu);
		bcp = &per_cpu(bau_control, cpu);
@@ -1407,25 +1376,28 @@ static int ptc_seq_show(struct seq_file *file, void *data)
			   stat->s_ntarguvhub8, stat->s_ntarguvhub4,
			   stat->s_ntarguvhub8, stat->s_ntarguvhub4,
			   stat->s_ntarguvhub2, stat->s_ntarguvhub1,
			   stat->s_ntarguvhub2, stat->s_ntarguvhub1,
			   stat->s_dtimeout, stat->s_strongnacks);
			   stat->s_dtimeout, stat->s_strongnacks);
		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld ",
		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
			   stat->s_retry_messages, stat->s_retriesok,
			   stat->s_retry_messages, stat->s_retriesok,
			   stat->s_resets_plug, stat->s_resets_timeout,
			   stat->s_resets_plug, stat->s_resets_timeout,
			   stat->s_giveup, stat->s_stimeout,
			   stat->s_giveup, stat->s_stimeout,
			   stat->s_busy, stat->s_throttles, stat->s_enters);
			   stat->s_busy, stat->s_throttles);
		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
			   stat->s_bau_disabled, stat->s_bau_reenabled,
			   stat->s_uv2_wars, stat->s_uv2_wars_hw,
			   stat->s_uv2_war_waits, stat->s_enters,
			   stat->s_ipifordisabled, stat->s_plugged,
			   stat->s_overipilimit, stat->s_giveuplimit,
			   stat->s_congested);


		/* destination side statistics */
		/* destination side statistics */
		seq_printf(file,
		seq_printf(file,
			   "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
			"%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
			   read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
			   read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
			   stat->d_requestee, cycles_2_us(stat->d_time),
			   stat->d_requestee, cycles_2_us(stat->d_time),
			   stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
			   stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
			   stat->d_nocanceled, stat->d_resets,
			   stat->d_nocanceled, stat->d_resets,
			   stat->d_rcanceled);
			   stat->d_rcanceled);
		seq_printf(file, "%ld %ld %ld %ld %ld\n",
			stat->s_bau_disabled, stat->s_bau_reenabled,
			stat->s_uv2_wars, stat->s_uv2_wars_hw,
			stat->s_uv2_war_waits);
	}
	}
	return 0;
	return 0;
}
}
@@ -1439,13 +1411,14 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
	char *buf;
	char *buf;
	int ret;
	int ret;


	buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
	buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
		"max_concur plugged_delay plugsb4reset",
		"max_concur plugged_delay plugsb4reset timeoutsb4reset",
		"timeoutsb4reset ipi_reset_limit complete_threshold",
		"ipi_reset_limit complete_threshold congested_response_us",
		"congested_response_us congested_reps congested_period",
		"congested_reps disabled_period giveup_limit",
		max_concurr, plugged_delay, plugsb4reset,
		max_concurr, plugged_delay, plugsb4reset,
		timeoutsb4reset, ipi_reset_limit, complete_threshold,
		timeoutsb4reset, ipi_reset_limit, complete_threshold,
		congested_respns_us, congested_reps, congested_period);
		congested_respns_us, congested_reps, disabled_period,
		giveup_limit);


	if (!buf)
	if (!buf)
		return -ENOMEM;
		return -ENOMEM;
@@ -1616,7 +1589,8 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
		bcp->complete_threshold =	complete_threshold;
		bcp->complete_threshold =	complete_threshold;
		bcp->cong_response_us =		congested_respns_us;
		bcp->cong_response_us =		congested_respns_us;
		bcp->cong_reps =		congested_reps;
		bcp->cong_reps =		congested_reps;
		bcp->cong_period =		congested_period;
		bcp->disabled_period =		sec_2_cycles(disabled_period);
		bcp->giveup_limit =		giveup_limit;
	}
	}
	return count;
	return count;
}
}
@@ -1745,6 +1719,10 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
			 *   fairness chaining multilevel count replied_to
			 *   fairness chaining multilevel count replied_to
			 */
			 */
		} else {
		} else {
			/*
			 * BIOS uses legacy mode, but UV2 hardware always
			 * uses native mode for selective broadcasts.
			 */
			uv2_hdr = &bd2->header.uv2_hdr;
			uv2_hdr = &bd2->header.uv2_hdr;
			uv2_hdr->swack_flag =	1;
			uv2_hdr->swack_flag =	1;
			uv2_hdr->base_dest_nasid =
			uv2_hdr->base_dest_nasid =
@@ -1896,10 +1874,11 @@ static void __init init_per_cpu_tunables(void)
		bcp->complete_threshold		= complete_threshold;
		bcp->complete_threshold		= complete_threshold;
		bcp->cong_response_us		= congested_respns_us;
		bcp->cong_response_us		= congested_respns_us;
		bcp->cong_reps			= congested_reps;
		bcp->cong_reps			= congested_reps;
		bcp->cong_period		= congested_period;
		bcp->disabled_period =		sec_2_cycles(disabled_period);
		bcp->clocks_per_100_usec =	usec_2_cycles(100);
		bcp->giveup_limit =		giveup_limit;
		spin_lock_init(&bcp->queue_lock);
		spin_lock_init(&bcp->queue_lock);
		spin_lock_init(&bcp->uvhub_lock);
		spin_lock_init(&bcp->uvhub_lock);
		spin_lock_init(&bcp->disable_lock);
	}
	}
}
}


@@ -2020,7 +1999,6 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
		}
		}
		bcp->uvhub_master = *hmasterp;
		bcp->uvhub_master = *hmasterp;
		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
		bcp->using_desc = bcp->uvhub_cpu;
		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
				bcp->uvhub_cpu);
				bcp->uvhub_cpu);
@@ -2123,7 +2101,6 @@ static int __init uv_bau_init(void)
	}
	}


	nuvhubs = uv_num_possible_blades();
	nuvhubs = uv_num_possible_blades();
	spin_lock_init(&disable_lock);
	congested_cycles = usec_2_cycles(congested_respns_us);
	congested_cycles = usec_2_cycles(congested_respns_us);


	uv_base_pnode = 0x7fffffff;
	uv_base_pnode = 0x7fffffff;