Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d517bb79 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull alpha fixes from Matt Turner:
 "A build fix and a regression fix"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
  alpha/PCI: Fix noname IRQ level detection
  alpha: extend memset16 to EV6 optimised routines
parents 91cfc88c 86be8993
Loading
Loading
Loading
Loading
+29 −6
Original line number Original line Diff line number Diff line
@@ -102,6 +102,15 @@ sio_pci_route(void)
				   alpha_mv.sys.sio.route_tab);
				   alpha_mv.sys.sio.route_tab);
}
}


static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
{
	if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
	    (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
		return false;

	return true;
}

static unsigned int __init
static unsigned int __init
sio_collect_irq_levels(void)
sio_collect_irq_levels(void)
{
{
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)


	/* Iterate through the devices, collecting IRQ levels.  */
	/* Iterate through the devices, collecting IRQ levels.  */
	for_each_pci_dev(dev) {
	for_each_pci_dev(dev) {
		if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
		if (!sio_pci_dev_irq_needs_level(dev))
		    (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
			continue;
			continue;


		if (dev->irq)
		if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
	return level_bits;
	return level_bits;
}
}


static void __init
static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
sio_fixup_irq_levels(unsigned int level_bits)
{
{
	unsigned int old_level_bits;
	unsigned int old_level_bits;


@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
	 */
	 */
	old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
	old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);


	level_bits |= (old_level_bits & 0x71ff);
	if (reset)
		old_level_bits &= 0x71ff;

	level_bits |= old_level_bits;


	outb((level_bits >> 0) & 0xff, 0x4d0);
	outb((level_bits >> 0) & 0xff, 0x4d0);
	outb((level_bits >> 8) & 0xff, 0x4d1);
	outb((level_bits >> 8) & 0xff, 0x4d1);
}
}


static inline void
sio_fixup_irq_levels(unsigned int level_bits)
{
	__sio_fixup_irq_levels(level_bits, true);
}

static inline int
static inline int
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
{
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
	const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
	const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
	int irq = COMMON_TABLE_LOOKUP, tmp;
	int irq = COMMON_TABLE_LOOKUP, tmp;
	tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
	tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
	return irq >= 0 ? tmp : -1;

	irq = irq >= 0 ? tmp : -1;

	/* Fixup IRQ level if an actual IRQ mapping is detected */
	if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
		__sio_fixup_irq_levels(1 << irq, false);

	return irq;
}
}


static inline int
static inline int
+6 −6
Original line number Original line Diff line number Diff line
@@ -18,7 +18,7 @@
 * The algorithm for the leading and trailing quadwords remains the same,
 * The algorithm for the leading and trailing quadwords remains the same,
 * however the loop has been unrolled to enable better memory throughput,
 * however the loop has been unrolled to enable better memory throughput,
 * and the code has been replicated for each of the entry points: __memset
 * and the code has been replicated for each of the entry points: __memset
 * and __memsetw to permit better scheduling to eliminate the stalling
 * and __memset16 to permit better scheduling to eliminate the stalling
 * encountered during the mask replication.
 * encountered during the mask replication.
 * A future enhancement might be to put in a byte store loop for really
 * A future enhancement might be to put in a byte store loop for really
 * small (say < 32 bytes) memset()s.  Whether or not that change would be
 * small (say < 32 bytes) memset()s.  Whether or not that change would be
@@ -34,7 +34,7 @@
	.globl memset
	.globl memset
	.globl __memset
	.globl __memset
	.globl ___memset
	.globl ___memset
	.globl __memsetw
	.globl __memset16
	.globl __constant_c_memset
	.globl __constant_c_memset


	.ent ___memset
	.ent ___memset
@@ -415,9 +415,9 @@ end:
	 * to mask stalls.  Note that entry point names also had to change
	 * to mask stalls.  Note that entry point names also had to change
	 */
	 */
	.align 5
	.align 5
	.ent __memsetw
	.ent __memset16


__memsetw:
__memset16:
	.frame $30,0,$26,0
	.frame $30,0,$26,0
	.prologue 0
	.prologue 0


@@ -596,8 +596,8 @@ end_w:
	nop
	nop
	ret $31,($26),1		# L0 :
	ret $31,($26),1		# L0 :


	.end __memsetw
	.end __memset16
	EXPORT_SYMBOL(__memsetw)
	EXPORT_SYMBOL(__memset16)


memset = ___memset
memset = ___memset
__memset = ___memset
__memset = ___memset