Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e392508 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller
Browse files

net: Store virtual address instead of page in netdev_alloc_cache



This change makes it so that we store the virtual address of the page
in the netdev_alloc_cache instead of the page pointer.  The idea behind
this is to avoid multiple calls to page_address since the virtual address
is required for every access, but the page pointer is only needed at
allocation or reset of the page.

While I was at it I also reordered the netdev_alloc_cache structure a bit
so that the size is always 16 bytes by dropping size in the case where
PAGE_SIZE is greater than or equal to 32KB.

Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2ee52ad4
Loading
Loading
Loading
Loading
+2 −3
Original line number Original line Diff line number Diff line
@@ -2128,9 +2128,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
		kfree_skb(skb);
		kfree_skb(skb);
}
}


#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
#define NETDEV_FRAG_PAGE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
#define NETDEV_FRAG_PAGE_MAX_ORDER	get_order(NETDEV_FRAG_PAGE_MAX_SIZE)
#define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE


void *netdev_alloc_frag(unsigned int fragsz);
void *netdev_alloc_frag(unsigned int fragsz);


+32 −23
Original line number Original line Diff line number Diff line
@@ -348,7 +348,13 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
EXPORT_SYMBOL(build_skb);
EXPORT_SYMBOL(build_skb);


struct netdev_alloc_cache {
struct netdev_alloc_cache {
	struct page_frag	frag;
	void * va;
#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
	__u16 offset;
	__u16 size;
#else
	__u32 offset;
#endif
	/* we maintain a pagecount bias, so that we dont dirty cache line
	/* we maintain a pagecount bias, so that we dont dirty cache line
	 * containing page->_count every time we allocate a fragment.
	 * containing page->_count every time we allocate a fragment.
	 */
	 */
@@ -361,21 +367,20 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
				       gfp_t gfp_mask)
				       gfp_t gfp_mask)
{
{
	const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
	struct page *page = NULL;
	struct page *page = NULL;
	gfp_t gfp = gfp_mask;
	gfp_t gfp = gfp_mask;


	if (order) {
#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
		    __GFP_NOMEMALLOC;
		    __GFP_NOMEMALLOC;
		page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
		nc->frag.size = PAGE_SIZE << (page ? order : 0);
				NETDEV_FRAG_PAGE_MAX_ORDER);
	}
	nc->size = page ? NETDEV_FRAG_PAGE_MAX_SIZE : PAGE_SIZE;

#endif
	if (unlikely(!page))
	if (unlikely(!page))
		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);


	nc->frag.page = page;
	nc->va = page ? page_address(page) : NULL;


	return page;
	return page;
}
}
@@ -383,19 +388,20 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
			       unsigned int fragsz, gfp_t gfp_mask)
			       unsigned int fragsz, gfp_t gfp_mask)
{
{
	struct page *page = nc->frag.page;
	unsigned int size = PAGE_SIZE;
	unsigned int size;
	struct page *page;
	int offset;
	int offset;


	if (unlikely(!page)) {
	if (unlikely(!nc->va)) {
refill:
refill:
		page = __page_frag_refill(nc, gfp_mask);
		page = __page_frag_refill(nc, gfp_mask);
		if (!page)
		if (!page)
			return NULL;
			return NULL;


		/* if size can vary use frag.size else just use PAGE_SIZE */
#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
		size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
		/* if size can vary use size else just use PAGE_SIZE */

		size = nc->size;
#endif
		/* Even if we own the page, we do not use atomic_set().
		/* Even if we own the page, we do not use atomic_set().
		 * This would break get_page_unless_zero() users.
		 * This would break get_page_unless_zero() users.
		 */
		 */
@@ -404,17 +410,20 @@ static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
		/* reset page count bias and offset to start of new frag */
		/* reset page count bias and offset to start of new frag */
		nc->pfmemalloc = page->pfmemalloc;
		nc->pfmemalloc = page->pfmemalloc;
		nc->pagecnt_bias = size;
		nc->pagecnt_bias = size;
		nc->frag.offset = size;
		nc->offset = size;
	}
	}


	offset = nc->frag.offset - fragsz;
	offset = nc->offset - fragsz;
	if (unlikely(offset < 0)) {
	if (unlikely(offset < 0)) {
		page = virt_to_page(nc->va);

		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
		if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
			goto refill;
			goto refill;


		/* if size can vary use frag.size else just use PAGE_SIZE */
#if (PAGE_SIZE < NETDEV_FRAG_PAGE_MAX_SIZE)
		size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
		/* if size can vary use size else just use PAGE_SIZE */

		size = nc->size;
#endif
		/* OK, page count is 0, we can safely set it */
		/* OK, page count is 0, we can safely set it */
		atomic_set(&page->_count, size);
		atomic_set(&page->_count, size);


@@ -424,9 +433,9 @@ static void *__alloc_page_frag(struct netdev_alloc_cache *nc,
	}
	}


	nc->pagecnt_bias--;
	nc->pagecnt_bias--;
	nc->frag.offset = offset;
	nc->offset = offset;


	return page_address(page) + offset;
	return nc->va + offset;
}
}


static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)