Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 462e9a35 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ARM fixes from Russell King:
 "A number of ARM fixes:

   - prevent oopses caused by dma_get_sgtable() and declared DMA
     coherent memory

   - fix boot failure on nommu caused by ID_PFR1 access

   - a number of kprobes fixes from Jon Medhurst and Masami Hiramatsu"

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8665/1: nommu: access ID_PFR1 only if CPUID scheme
  ARM: dma-mapping: disallow dma_get_sgtable() for non-kernel managed memory
  arm: kprobes: Align stack to 8-bytes in test code
  arm: kprobes: Fix the return address of multiple kretprobes
  arm: kprobes: Skip single-stepping in recursing path if possible
  arm: kprobes: Allow to handle reentered kprobe on single-stepping
parents 5b50be74 3872fe83
Loading
Loading
Loading
Loading
+19 −1
Original line number Diff line number Diff line
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}

/*
 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
 * that the intention is to allow exporting memory allocated via the
 * coherent DMA APIs through the dma_buf API, which only accepts a
 * scattertable.  This presents a couple of problems:
 * 1. Not all memory allocated via the coherent DMA APIs is backed by
 *    a struct page
 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
 *    as we will try to flush the memory through a different alias to that
 *    actually being used (and the flushes are redundant.)
 */
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		 void *cpu_addr, dma_addr_t handle, size_t size,
		 unsigned long attrs)
{
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
	unsigned long pfn = dma_to_pfn(dev, handle);
	struct page *page;
	int ret;

	/* If the PFN is not valid, we do not have a struct page */
	if (!pfn_valid(pfn))
		return -ENXIO;

	page = pfn_to_page(pfn);

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;
+4 −1
Original line number Diff line number Diff line
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
 */
static inline bool security_extensions_enabled(void)
{
	/* Check CPUID Identification Scheme before ID_PFR1 read */
	if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
		return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
	return 0;
}

static unsigned long __init setup_vectors_base(void)
+38 −11
Original line number Diff line number Diff line
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
#endif

	if (p) {
		if (cur) {
		if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
			/*
			 * Probe hit but conditional execution check failed,
			 * so just skip the instruction and continue as if
			 * nothing had happened.
			 * In this case, we can skip recursing check too.
			 */
			singlestep_skip(p, regs);
		} else if (cur) {
			/* Kprobe is pending, so we're recursing. */
			switch (kcb->kprobe_status) {
			case KPROBE_HIT_ACTIVE:
			case KPROBE_HIT_SSDONE:
			case KPROBE_HIT_SS:
				/* A pre- or post-handler probe got us here. */
				kprobes_inc_nmissed_count(p);
				save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
				singlestep(p, regs, kcb);
				restore_previous_kprobe(kcb);
				break;
			case KPROBE_REENTER:
				/* A nested probe was hit in FIQ, it is a BUG */
				pr_warn("Unrecoverable kprobe detected at %p.\n",
					p->addr);
				/* fall through */
			default:
				/* impossible cases */
				BUG();
			}
		} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
		} else {
			/* Probe hit and conditional execution check ok. */
			set_current_kprobe(p);
			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
				}
				reset_current_kprobe();
			}
		} else {
			/*
			 * Probe hit but conditional execution check failed,
			 * so just skip the instruction and continue as if
			 * nothing had happened.
			 */
			singlestep_skip(p, regs);
		}
	} else if (cur) {
		/* We probably hit a jprobe.  Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
	struct hlist_node *tmp;
	unsigned long flags, orig_ret_address = 0;
	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
	kprobe_opcode_t *correct_ret_addr = NULL;

	INIT_HLIST_HEAD(&empty_rp);
	kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
			/* another task is sharing our hash bucket */
			continue;

		orig_ret_address = (unsigned long)ri->ret_addr;

		if (orig_ret_address != trampoline_address)
			/*
			 * This is the real return address. Any other
			 * instances associated with this task are for
			 * other calls deeper on the call stack
			 */
			break;
	}

	kretprobe_assert(ri, orig_ret_address, trampoline_address);

	correct_ret_addr = ri->ret_addr;
	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
		if (ri->task != current)
			/* another task is sharing our hash bucket */
			continue;

		orig_ret_address = (unsigned long)ri->ret_addr;
		if (ri->rp && ri->rp->handler) {
			__this_cpu_write(current_kprobe, &ri->rp->kp);
			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
			ri->ret_addr = correct_ret_addr;
			ri->rp->handler(ri, regs);
			__this_cpu_write(current_kprobe, NULL);
		}

		orig_ret_address = (unsigned long)ri->ret_addr;
		recycle_rp_inst(ri, &empty_rp);

		if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
			break;
	}

	kretprobe_assert(ri, orig_ret_address, trampoline_address);
	kretprobe_hash_unlock(current, &flags);

	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+8 −3
Original line number Diff line number Diff line
@@ -977,7 +977,10 @@ static void coverage_end(void)
void __naked __kprobes_test_case_start(void)
{
	__asm__ __volatile__ (
		"stmdb	sp!, {r4-r11}				\n\t"
		"mov	r2, sp					\n\t"
		"bic	r3, r2, #7				\n\t"
		"mov	sp, r3					\n\t"
		"stmdb	sp!, {r2-r11}				\n\t"
		"sub	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
		"bic	r0, lr, #1  @ r0 = inline data		\n\t"
		"mov	r1, sp					\n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
		"movne	pc, r0					\n\t"
		"mov	r0, r4					\n\t"
		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
		"ldmia	sp!, {r4-r11}				\n\t"
		"ldmia	sp!, {r2-r11}				\n\t"
		"mov	sp, r2					\n\t"
		"mov	pc, r0					\n\t"
	);
}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
		"bxne	r0					\n\t"
		"mov	r0, r4					\n\t"
		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
		"ldmia	sp!, {r4-r11}				\n\t"
		"ldmia	sp!, {r2-r11}				\n\t"
		"mov	sp, r2					\n\t"
		"bx	r0					\n\t"
	);
}