Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bea248fb authored by Michael Ellerman's avatar Michael Ellerman Committed by Paul Mackerras
Browse files

[PATCH] ppc64: Remove lpqueue pointer from the paca on iSeries



The iSeries code keeps a pointer to the ItLpQueue in its paca struct. But
all these pointers end up pointing to the one place, ie. xItLpQueue.

So remove the pointer from the paca struct and just refer to xItLpQueue
directly where needed.

The only complication is that the spread_lpevents logic was implemented by
having a NULL lpqueue pointer in the paca on CPUs that weren't supposed to
process events. Instead we just compare the spread_lpevents value to the
processor id to get the same behaviour.

Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Acked-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent b1bdfbd0
Loading
Loading
Loading
Loading
+9 −7
Original line number Diff line number Diff line
@@ -69,15 +69,17 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
	return nextLpEvent;
}

unsigned long spread_lpevents = 1;

int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
{
	int retval = 0;
	struct HvLpEvent * nextLpEvent;
	if ( lpQueue ) {
		nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
		retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
	}
	return retval;
	struct HvLpEvent *next_event;

	if (smp_processor_id() >= spread_lpevents)
		return 0;

	next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
	return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
}

void ItLpQueue_clearValid( struct HvLpEvent * event )
+2 −4
Original line number Diff line number Diff line
@@ -855,17 +855,15 @@ late_initcall(iSeries_src_init);

static int set_spread_lpevents(char *str)
{
	unsigned long i;
	unsigned long val = simple_strtoul(str, NULL, 0);
	extern unsigned long spread_lpevents;

	/*
	 * The parameter is the number of processors to share in processing
	 * lp events.
	 */
	if (( val > 0) && (val <= NR_CPUS)) {
		for (i = 1; i < val; ++i)
			paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;

		spread_lpevents = val;
		printk("lpevent processing spread over %ld processors\n", val);
	} else {
		printk("invalid spread_lpevents %ld\n", val);
+2 −2
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ static int iSeries_idle(void)

	while (1) {
		if (lpaca->lppaca.shared_proc) {
			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
			if (ItLpQueue_isLpIntPending(&xItLpQueue))
				process_iSeries_events();
			if (!need_resched())
				yield_shared_processor();
@@ -100,7 +100,7 @@ static int iSeries_idle(void)

				while (!need_resched()) {
					HMT_medium();
					if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
					if (ItLpQueue_isLpIntPending(&xItLpQueue))
						process_iSeries_events();
					HMT_low();
				}
+2 −4
Original line number Diff line number Diff line
@@ -269,7 +269,6 @@ out:
void do_IRQ(struct pt_regs *regs)
{
	struct paca_struct *lpaca;
	struct ItLpQueue *lpq;

	irq_enter();

@@ -295,9 +294,8 @@ void do_IRQ(struct pt_regs *regs)
		iSeries_smp_message_recv(regs);
	}
#endif /* CONFIG_SMP */
	lpq = lpaca->lpqueue_ptr;
	if (lpq && ItLpQueue_isLpIntPending(lpq))
		lpevent_count += ItLpQueue_process(lpq, regs);
	if (ItLpQueue_isLpIntPending(&xItLpQueue))
		lpevent_count += ItLpQueue_process(&xItLpQueue, regs);

	irq_exit();

+2 −3
Original line number Diff line number Diff line
@@ -802,9 +802,8 @@ int mf_get_boot_rtc(struct rtc_time *tm)
	/* We need to poll here as we are not yet taking interrupts */
	while (rtc_data.busy) {
		extern unsigned long lpevent_count;
		struct ItLpQueue *lpq = get_paca()->lpqueue_ptr;
		if (lpq && ItLpQueue_isLpIntPending(lpq))
			lpevent_count += ItLpQueue_process(lpq, NULL);
		if (ItLpQueue_isLpIntPending(&xItLpQueue))
			lpevent_count += ItLpQueue_process(&xItLpQueue, NULL);
	}
	return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}
Loading