Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1cb81b14 authored by Markus Metzger's avatar Markus Metzger Committed by Ingo Molnar
Browse files

x86, bts, mm: clean up buffer allocation



The current mm interface is asymetric. One function allocates a locked
buffer, another function only refunds the memory.

Change this to have two functions for accounting and refunding locked
memory, respectively; and do the actual buffer allocation in ptrace.

[ Impact: refactor BTS buffer allocation code ]

Signed-off-by: default avatarMarkus Metzger <markus.t.metzger@intel.com>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090424095143.A30265@sedona.ch.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7e0bfad2
Loading
Loading
Loading
Loading
+26 −13
Original line number Diff line number Diff line
@@ -617,17 +617,28 @@ struct bts_context {
	struct work_struct	work;
};

static inline void alloc_bts_buffer(struct bts_context *context,
				    unsigned int size)
static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
{
	void *buffer;
	void *buffer = NULL;
	int err = -ENOMEM;

	err = account_locked_memory(current->mm, current->signal->rlim, size);
	if (err < 0)
		return err;

	buffer = kzalloc(size, GFP_KERNEL);
	if (!buffer)
		goto out_refund;

	buffer = alloc_locked_buffer(size);
	if (buffer) {
	context->buffer = buffer;
	context->size = size;
	context->mm = get_task_mm(current);
	}

	return 0;

 out_refund:
	refund_locked_memory(current->mm, size);
	return err;
}

static inline void free_bts_buffer(struct bts_context *context)
@@ -638,7 +649,7 @@ static inline void free_bts_buffer(struct bts_context *context)
	kfree(context->buffer);
	context->buffer = NULL;

	refund_locked_buffer_memory(context->mm, context->size);
	refund_locked_memory(context->mm, context->size);
	context->size = 0;

	mmput(context->mm);
@@ -786,13 +797,15 @@ static int ptrace_bts_config(struct task_struct *child,
	context->tracer = NULL;

	if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
		int err;

		free_bts_buffer(context);
		if (!cfg.size)
			return 0;

		alloc_bts_buffer(context, cfg.size);
		if (!context->buffer)
			return -ENOMEM;
		err = alloc_bts_buffer(context, cfg.size);
		if (err < 0)
			return err;
	}

	if (cfg.flags & PTRACE_BTS_O_TRACE)
+4 −2
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
struct rlimit;

#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -1319,7 +1320,8 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);

extern void *alloc_locked_buffer(size_t size);
extern void refund_locked_buffer_memory(struct mm_struct *mm, size_t size);
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
				 size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
+17 −19
Original line number Diff line number Diff line
@@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user)
	free_uid(user);
}

void *alloc_locked_buffer(size_t size)
int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
			  size_t size)
{
	unsigned long rlim, vm, pgsz;
	void *buffer = NULL;
	unsigned long lim, vm, pgsz;
	int error = -ENOMEM;

	pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;

	down_write(&current->mm->mmap_sem);

	rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
	vm   = current->mm->total_vm + pgsz;
	if (rlim < vm)
		goto out;
	down_write(&mm->mmap_sem);

	rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
	vm   = current->mm->locked_vm + pgsz;
	if (rlim < vm)
	lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
	vm   = mm->total_vm + pgsz;
	if (lim < vm)
		goto out;

	buffer = kzalloc(size, GFP_KERNEL);
	if (!buffer)
	lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
	vm   = mm->locked_vm + pgsz;
	if (lim < vm)
		goto out;

	current->mm->total_vm  += pgsz;
	current->mm->locked_vm += pgsz;
	mm->total_vm  += pgsz;
	mm->locked_vm += pgsz;

	error = 0;
 out:
	up_write(&current->mm->mmap_sem);
	return buffer;
	up_write(&mm->mmap_sem);
	return error;
}

void refund_locked_buffer_memory(struct mm_struct *mm, size_t size)
void refund_locked_memory(struct mm_struct *mm, size_t size)
{
	unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;