Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b27eb186 authored by HATAYAMA Daisuke's avatar HATAYAMA Daisuke Committed by Linus Torvalds
Browse files

vmcore: clean up read_vmcore()



Rewrite part of read_vmcore() that reads objects in vmcore_list in the
same way as part reading ELF headers, by which some duplicated and
redundant codes are removed.

Signed-off-by: default avatarHATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Acked-by: default avatarVivek Goyal <vgoyal@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Lisa Mitchell <lisa.mitchell@hp.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0fa73b86
Loading
Loading
Loading
Loading
+20 −48
Original line number Diff line number Diff line
@@ -118,27 +118,6 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
	return read;
}

/* Maps vmcore file offset to respective physical address in memroy. */
static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
					struct vmcore **m_ptr)
{
	struct vmcore *m;
	u64 paddr;

	list_for_each_entry(m, vc_list, list) {
		u64 start, end;
		start = m->offset;
		end = m->offset + m->size - 1;
		if (offset >= start && offset <= end) {
			paddr = m->paddr + offset - start;
			*m_ptr = m;
			return paddr;
		}
	}
	*m_ptr = NULL;
	return 0;
}

/* Read from the ELF header and then the crash dump. On error, negative value is
 * returned otherwise number of bytes read are returned.
 */
@@ -147,8 +126,8 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
{
	ssize_t acc = 0, tmp;
	size_t tsz;
	u64 start, nr_bytes;
	struct vmcore *curr_m = NULL;
	u64 start;
	struct vmcore *m = NULL;

	if (buflen == 0 || *fpos >= vmcore_size)
		return 0;
@@ -174,18 +153,12 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
			return acc;
	}

	start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
	if (!curr_m)
        	return -EINVAL;

	while (buflen) {
		tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));

		/* Calculate left bytes in current memory segment. */
		nr_bytes = (curr_m->size - (start - curr_m->paddr));
		if (tsz > nr_bytes)
			tsz = nr_bytes;

	list_for_each_entry(m, &vmcore_list, list) {
		if (*fpos < m->offset + m->size) {
			tsz = m->offset + m->size - *fpos;
			if (buflen < tsz)
				tsz = buflen;
			start = m->paddr + *fpos - m->offset;
			tmp = read_from_oldmem(buffer, tsz, &start, 1);
			if (tmp < 0)
				return tmp;
@@ -193,14 +166,13 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
			*fpos += tsz;
			buffer += tsz;
			acc += tsz;
		if (start >= (curr_m->paddr + curr_m->size)) {
			if (curr_m->list.next == &vmcore_list)
				return acc;	/*EOF*/
			curr_m = list_entry(curr_m->list.next,
						struct vmcore, list);
			start = curr_m->paddr;

			/* leave now if filled buffer already */
			if (buflen == 0)
				return acc;
		}
	}

	return acc;
}