Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db045733 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull livepatching updates from Jiri Kosina:

 - stacktrace handling improvements from Miroslav benes

 - debug output improvements from Petr Mladek

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching:
  livepatch: Remove duplicate warning about missing reliable stacktrace support
  Revert "livepatch: Remove reliable stacktrace check in klp_try_switch_task()"
  stacktrace: Remove weak version of save_stack_trace_tsk_reliable()
  livepatch: Use static buffer for debugging messages under rq lock
  livepatch: Remove stale kobj_added entries from kernel-doc descriptions
parents 1f7563f7 38195dd5
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@
 * @stack_node:	list node for klp_ops func_stack list
 * @old_size:	size of the old function
 * @new_size:	size of the new function
 * @kobj_added: @kobj has been added and needs freeing
 * @nop:        temporary patch to use the original code again; dyn. allocated
 * @patched:	the func has been added to the klp_ops list
 * @transition:	the func is currently being applied or reverted
@@ -113,7 +112,6 @@ struct klp_callbacks {
 * @node:	list node for klp_patch obj_list
 * @mod:	kernel module associated with the patched object
 *		(NULL for vmlinux)
 * @kobj_added: @kobj has been added and needs freeing
 * @dynamic:    temporary object for nop functions; dynamically allocated
 * @patched:	the object's funcs have been added to the klp_ops list
 */
@@ -140,7 +138,6 @@ struct klp_object {
 * @list:	list node for global list of actively used patches
 * @kobj:	kobject for sysfs resources
 * @obj_list:	dynamic list of the object entries
 * @kobj_added: @kobj has been added and needs freeing
 * @enabled:	the patch is enabled (but operation may be incomplete)
 * @forced:	was involved in a forced transition
 * @free_work:	patch cleanup from workqueue-context
+8 −3
Original line number Diff line number Diff line
@@ -247,7 +247,6 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
	int ret, nr_entries;

	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
	WARN_ON_ONCE(ret == -ENOSYS);
	if (ret < 0) {
		snprintf(err_buf, STACK_ERR_BUF_SIZE,
			 "%s: %s:%d has an unreliable stack\n",
@@ -281,11 +280,11 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
 */
static bool klp_try_switch_task(struct task_struct *task)
{
	static char err_buf[STACK_ERR_BUF_SIZE];
	struct rq *rq;
	struct rq_flags flags;
	int ret;
	bool success = false;
	char err_buf[STACK_ERR_BUF_SIZE];

	err_buf[0] = '\0';

@@ -293,6 +292,13 @@ static bool klp_try_switch_task(struct task_struct *task)
	if (task->patch_state == klp_target_state)
		return true;

	/*
	 * For arches which don't have reliable stack traces, we have to rely
	 * on other methods (e.g., switching tasks at kernel exit).
	 */
	if (!klp_have_reliable_stack())
		return false;

	/*
	 * Now try to check the stack for any to-be-patched or to-be-unpatched
	 * functions.  If all goes well, switch the task to the target patch
@@ -328,7 +334,6 @@ static bool klp_try_switch_task(struct task_struct *task)
		pr_debug("%s", err_buf);

	return success;

}

/*
+0 −8
Original line number Diff line number Diff line
@@ -255,14 +255,6 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
	WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
}

__weak int
save_stack_trace_tsk_reliable(struct task_struct *tsk,
			      struct stack_trace *trace)
{
	WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
	return -ENOSYS;
}

/**
 * stack_trace_save - Save a stack trace into a storage array
 * @store:	Pointer to storage array