Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dde00137 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-2.6.25' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Add arch-specific walk_memory_remove() for 64-bit powerpc
  [POWERPC] Enable hotplug memory remove for 64-bit powerpc
  [POWERPC] Add remove_memory() for 64-bit powerpc
  [POWERPC] Make cell IOMMU fixed mapping printk more useful
  [POWERPC] Fix potential cell IOMMU bug when switching back to default DMA ops
  [POWERPC] Don't enable cell IOMMU fixed mapping if there are no dma-ranges
  [POWERPC] Fix cell IOMMU null pointer explosion on old firmwares
  [POWERPC] spufs: Fix timing dependent false return from spufs_run_spu
  [POWERPC] spufs: No need to have a runnable SPU for libassist update
  [POWERPC] spufs: Update SPU_Status[CISHP] in backing runcntl write
  [POWERPC] spufs: Fix state_mutex leaks
  [POWERPC] Disable G5 NAP mode during SMU commands on U3
parents f3aafa6c a99824f3
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -272,6 +272,12 @@ config HOTPLUG_CPU
config ARCH_ENABLE_MEMORY_HOTPLUG
	def_bool y

config ARCH_HAS_WALK_MEMORY
	def_bool y

config ARCH_ENABLE_MEMORY_HOTREMOVE
	def_bool y

config KEXEC
	bool "kexec system call (EXPERIMENTAL)"
	depends on (PPC_PRPMC2800 || PPC_MULTIPLATFORM) && EXPERIMENTAL
+33 −0
Original line number Diff line number Diff line
@@ -129,6 +129,39 @@ int __devinit arch_add_memory(int nid, u64 start, u64 size)
	return __add_pages(zone, start_pfn, nr_pages);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn, end_pfn;
	int ret;

	start_pfn = start >> PAGE_SHIFT;
	end_pfn = start_pfn + (size >> PAGE_SHIFT);
	ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
	if (ret)
		goto out;
	/* Arch-specific calls go here - next patch */
out:
	return ret;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
 * memory range. On PPC64, since this range comes from /sysfs, the range
 * is guaranteed to be valid, non-overlapping and can not contain any
 * holes. By the time we get here (memory add or remove), /proc/device-tree
 * is updated and correct. Only reason we need to check against device-tree
 * would be if we allow user-land to specify a memory range through a
 * system call/ioctl etc. instead of doing offline/online through /sysfs.
 */
int
walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
			int (*func)(unsigned long, unsigned long, void *))
{
	return  (*func)(start_pfn, nr_pages, arg);
}

#endif /* CONFIG_MEMORY_HOTPLUG */

void show_mem(void)
+28 −20
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_platform.h>

#include <asm/prom.h>
@@ -789,18 +790,16 @@ static int __init cell_iommu_init_disabled(void)
static u64 cell_iommu_get_fixed_address(struct device *dev)
{
	u64 cpu_addr, size, best_size, pci_addr = OF_BAD_ADDR;
	struct device_node *tmp, *np;
	struct device_node *np;
	const u32 *ranges = NULL;
	int i, len, best;

	np = dev->archdata.of_node;
	of_node_get(np);
	ranges = of_get_property(np, "dma-ranges", &len);
	while (!ranges && np) {
		tmp = of_get_parent(np);
		of_node_put(np);
		np = tmp;
	np = of_node_get(dev->archdata.of_node);
	while (np) {
		ranges = of_get_property(np, "dma-ranges", &len);
		if (ranges)
			break;
		np = of_get_next_parent(np);
	}

	if (!ranges) {
@@ -842,19 +841,18 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	if (dma_mask == DMA_BIT_MASK(64)) {
		if (cell_iommu_get_fixed_address(dev) == OF_BAD_ADDR)
			dev_dbg(dev, "iommu: 64-bit OK, but bad addr\n");
		else {
	if (dma_mask == DMA_BIT_MASK(64) &&
		cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
	{
		dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
		set_dma_ops(dev, &dma_iommu_fixed_ops);
			cell_dma_dev_setup(dev);
		}
	} else {
		dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
		set_dma_ops(dev, get_pci_dma_ops());
	}

	cell_dma_dev_setup(dev);

	*dev->dma_mask = dma_mask;

	return 0;
@@ -918,6 +916,18 @@ static int __init cell_iommu_fixed_mapping_init(void)
		return -1;
	}

	/* We must have dma-ranges properties for fixed mapping to work */
	for (np = NULL; (np = of_find_all_nodes(np));) {
		if (of_find_property(np, "dma-ranges", NULL))
			break;
	}
	of_node_put(np);

	if (!np) {
		pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
		return -1;
	}

	/* The default setup is to have the fixed mapping sit after the
	 * dynamic region, so find the top of the largest IOMMU window
	 * on any axon, then add the size of RAM and that's our max value.
@@ -981,8 +991,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
			dsize = htab_size_bytes;
		}

		pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \
			 "fixed window %lx-%lx\n", iommu->nid, dbase,
		printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
			"fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
			 dbase + dsize, fbase, fbase + fsize);

		cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize);
@@ -998,8 +1008,6 @@ static int __init cell_iommu_fixed_mapping_init(void)
	dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
	set_pci_dma_ops(&dma_iommu_ops);

	printk(KERN_DEBUG "IOMMU fixed mapping established.\n");

	return 0;
}

+6 −0
Original line number Diff line number Diff line
@@ -288,6 +288,12 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
	spin_lock(&ctx->csa.register_lock);
	ctx->csa.prob.spu_runcntl_RW = val;
	if (val & SPU_RUNCNTL_RUNNABLE) {
		ctx->csa.prob.spu_status_R &=
			~SPU_STATUS_STOPPED_BY_STOP &
			~SPU_STATUS_STOPPED_BY_HALT &
			~SPU_STATUS_SINGLE_STEP &
			~SPU_STATUS_INVALID_INSTR &
			~SPU_STATUS_INVALID_CH;
		ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
	} else {
		ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
+4 −8
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx)
	u64 ea, dsisr, access;
	unsigned long flags;
	unsigned flt = 0;
	int ret, ret2;
	int ret;

	/*
	 * dar and dsisr get passed from the registers
@@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx)
		ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);

	/*
	 * If spu_acquire fails due to a pending signal we just want to return
	 * EINTR to userspace even if that means missing the dma restart or
	 * updating the page fault statistics.
	 * This is nasty: we need the state_mutex for all the bookkeeping even
	 * if the syscall was interrupted by a signal. ewww.
	 */
	ret2 = spu_acquire(ctx);
	if (ret2)
		goto out;
	mutex_lock(&ctx->state_mutex);

	/*
	 * Clear dsisr under ctxt lock after handling the fault, so that
@@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx)
	} else
		spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);

 out:
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
	return ret;
}
Loading