Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0139aa7b authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm: rename _count, field of the struct page, to _refcount



Many developers already know that field for reference count of the
struct page is _count and atomic type.  They would try to handle it
directly and this could break the purpose of page reference count
tracepoint.  To prevent direct _count modification, this patch rename it
to _refcount and add warning message on the code.  After that, developer
who need to handle reference count will find that field should not be
accessed directly.

[akpm@linux-foundation.org: fix comments, per Vlastimil]
[akpm@linux-foundation.org: Documentation/vm/transhuge.txt too]
[sfr@canb.auug.org.au: sync ethernet driver changes]
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Sunil Goutham <sgoutham@cavium.com>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Manish Chopra <manish.chopra@qlogic.com>
Cc: Yuval Mintz <yuval.mintz@qlogic.com>
Cc: Tariq Toukan <tariqt@mellanox.com>
Cc: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6d061f9f
Loading
Loading
Loading
Loading
+5 −5
Original line number Original line Diff line number Diff line
@@ -394,9 +394,9 @@ hugepage natively. Once finished you can drop the page table lock.
Refcounting on THP is mostly consistent with refcounting on other compound
Refcounting on THP is mostly consistent with refcounting on other compound
pages:
pages:


  - get_page()/put_page() and GUP operate in head page's ->_count.
  - get_page()/put_page() and GUP operate in head page's ->_refcount.


  - ->_count in tail pages is always zero: get_page_unless_zero() never
  - ->_refcount in tail pages is always zero: get_page_unless_zero() never
    succeed on tail pages.
    succeed on tail pages.


  - map/unmap of the pages with PTE entry increment/decrement ->_mapcount
  - map/unmap of the pages with PTE entry increment/decrement ->_mapcount
@@ -426,15 +426,15 @@ requests to split pinned huge page: it expects page count to be equal to
sum of mapcount of all sub-pages plus one (split_huge_page caller must
sum of mapcount of all sub-pages plus one (split_huge_page caller must
have reference for head page).
have reference for head page).


split_huge_page uses migration entries to stabilize page->_count and
split_huge_page uses migration entries to stabilize page->_refcount and
page->_mapcount.
page->_mapcount.


We safe against physical memory scanners too: the only legitimate way
We safe against physical memory scanners too: the only legitimate way
scanner can get reference to a page is get_page_unless_zero().
scanner can get reference to a page is get_page_unless_zero().


All tail pages has zero ->_count until atomic_add(). It prevent scanner
All tail pages has zero ->_refcount until atomic_add(). It prevent scanner
from geting reference to tail page up to the point. After the atomic_add()
from geting reference to tail page up to the point. After the atomic_add()
we don't care about ->_count value.  We already known how many references
we don't care about ->_refcount value.  We already known how many references
with should uncharge from head page.
with should uncharge from head page.


For head page get_page_unless_zero() will succeed and we don't mind. It's
For head page get_page_unless_zero() will succeed and we don't mind. It's
+1 −1
Original line number Original line Diff line number Diff line
@@ -679,7 +679,7 @@ static void __init init_free_pfn_range(unsigned long start, unsigned long end)
			 * Hacky direct set to avoid unnecessary
			 * Hacky direct set to avoid unnecessary
			 * lock take/release for EVERY page here.
			 * lock take/release for EVERY page here.
			 */
			 */
			p->_count.counter = 0;
			p->_refcount.counter = 0;
			p->_mapcount.counter = -1;
			p->_mapcount.counter = -1;
		}
		}
		init_page_count(page);
		init_page_count(page);
+1 −1
Original line number Original line Diff line number Diff line
@@ -861,7 +861,7 @@ rqbiocnt(struct request *r)
 * discussion.
 * discussion.
 *
 *
 * We cannot use get_page in the workaround, because it insists on a
 * We cannot use get_page in the workaround, because it insists on a
 * positive page count as a precondition.  So we use _count directly.
 * positive page count as a precondition.  So we use _refcount directly.
 */
 */
static void
static void
bio_pageinc(struct bio *bio)
bio_pageinc(struct bio *bio)
+1 −1
Original line number Original line Diff line number Diff line
@@ -1164,7 +1164,7 @@ static void msc_mmap_close(struct vm_area_struct *vma)
	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
		return;
		return;


	/* drop page _counts */
	/* drop page _refcounts */
	for (pg = 0; pg < msc->nr_pages; pg++) {
	for (pg = 0; pg < msc->nr_pages; pg++) {
		struct page *page = msc_buffer_get_page(msc, pg);
		struct page *page = msc_buffer_get_page(msc, pg);


+10 −10
Original line number Original line Diff line number Diff line
@@ -433,8 +433,8 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
		if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
		if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
			goto err_unmap;
			goto err_unmap;
		atomic_add(mlx5e_mpwqe_strides_per_page(rq),
		page_ref_add(wi->umr.dma_info[i].page,
			   &wi->umr.dma_info[i].page->_count);
			     mlx5e_mpwqe_strides_per_page(rq));
		wi->skbs_frags[i] = 0;
		wi->skbs_frags[i] = 0;
	}
	}


@@ -452,8 +452,8 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
	while (--i >= 0) {
	while (--i >= 0) {
		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
			       PCI_DMA_FROMDEVICE);
			       PCI_DMA_FROMDEVICE);
		atomic_sub(mlx5e_mpwqe_strides_per_page(rq),
		page_ref_sub(wi->umr.dma_info[i].page,
			   &wi->umr.dma_info[i].page->_count);
			     mlx5e_mpwqe_strides_per_page(rq));
		put_page(wi->umr.dma_info[i].page);
		put_page(wi->umr.dma_info[i].page);
	}
	}
	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
@@ -477,8 +477,8 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
			       PCI_DMA_FROMDEVICE);
			       PCI_DMA_FROMDEVICE);
		atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
		page_ref_sub(wi->umr.dma_info[i].page,
			   &wi->umr.dma_info[i].page->_count);
			mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
		put_page(wi->umr.dma_info[i].page);
		put_page(wi->umr.dma_info[i].page);
	}
	}
	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
@@ -527,8 +527,8 @@ static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
	 */
	 */
	split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
	split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
		atomic_add(mlx5e_mpwqe_strides_per_page(rq),
		page_ref_add(&wi->dma_info.page[i],
			   &wi->dma_info.page[i]._count);
			     mlx5e_mpwqe_strides_per_page(rq));
		wi->skbs_frags[i] = 0;
		wi->skbs_frags[i] = 0;
	}
	}


@@ -551,8 +551,8 @@ void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
	dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
	dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
		       PCI_DMA_FROMDEVICE);
		       PCI_DMA_FROMDEVICE);
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
		atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
		page_ref_sub(&wi->dma_info.page[i],
			   &wi->dma_info.page[i]._count);
			mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
		put_page(&wi->dma_info.page[i]);
		put_page(&wi->dma_info.page[i]);
	}
	}
}
}
Loading