Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d2c5e30c authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

[PATCH] zoned vm counters: conversion of nr_bounce to per zone counter



Conversion of nr_bounce to a per zone counter

nr_bounce is only used for proc output.  So it could be left as an event
counter.  However, the event counters may not be accurate and nr_bounce is
categorizing types of pages in a zone.  So we really need this to also be a
per zone counter.

[akpm@osdl.org: bugfix]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fd39fc85
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
		       "Node %d AnonPages:    %8lu kB\n"
		       "Node %d PageTables:   %8lu kB\n"
		       "Node %d NFS Unstable: %8lu kB\n"
		       "Node %d Bounce:       %8lu kB\n"
		       "Node %d Slab:         %8lu kB\n",
		       nid, K(i.totalram),
		       nid, K(i.freeram),
@@ -82,6 +83,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
		       nid, K(node_page_state(nid, NR_PAGETABLE)),
		       nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
		       nid, K(node_page_state(nid, NR_BOUNCE)),
		       nid, K(node_page_state(nid, NR_SLAB)));
	n += hugetlb_report_node_meminfo(nid, buf + n);
	return n;
+2 −0
Original line number Diff line number Diff line
@@ -171,6 +171,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
		"Slab:         %8lu kB\n"
		"PageTables:   %8lu kB\n"
		"NFS Unstable: %8lu kB\n"
		"Bounce:       %8lu kB\n"
		"CommitLimit:  %8lu kB\n"
		"Committed_AS: %8lu kB\n"
		"VmallocTotal: %8lu kB\n"
@@ -196,6 +197,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
		K(global_page_state(NR_SLAB)),
		K(global_page_state(NR_PAGETABLE)),
		K(global_page_state(NR_UNSTABLE_NFS)),
		K(global_page_state(NR_BOUNCE)),
		K(allowed),
		K(committed),
		(unsigned long)VMALLOC_TOTAL >> 10,
+1 −0
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@ enum zone_stat_item {
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
	NR_BOUNCE,
	NR_VM_ZONE_STAT_ITEMS };

struct per_cpu_pages {
+0 −1
Original line number Diff line number Diff line
@@ -67,7 +67,6 @@ struct page_state {
	unsigned long allocstall;	/* direct reclaim calls */

	unsigned long pgrotated;	/* pages rotated to tail of the LRU */
	unsigned long nr_bounce;	/* pages for bounce buffers */
};

extern void get_full_page_state(struct page_state *ret);
+3 −3
Original line number Diff line number Diff line
@@ -315,8 +315,8 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
		if (bvec->bv_page == org_vec->bv_page)
			continue;

		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
		mempool_free(bvec->bv_page, pool);
		dec_page_state(nr_bounce);
	}

	bio_endio(bio_orig, bio_orig->bi_size, err);
@@ -397,7 +397,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
		to->bv_len = from->bv_len;
		to->bv_offset = from->bv_offset;
		inc_page_state(nr_bounce);
		inc_zone_page_state(to->bv_page, NR_BOUNCE);

		if (rw == WRITE) {
			char *vto, *vfrom;
Loading