Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4ebf0ae2 authored by David S. Miller's avatar David S. Miller
Browse files

[AF_PACKET]: Convert PACKET_MMAP over to vm_insert_page().



So we can properly use __GFP_COMP and avoid the use of
PG_reserved pages.

With extremely helpful review from Hugh Dickins.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dfb4b9dc
Loading
Loading
Loading
Loading
+61 −54
Original line number Original line Diff line number Diff line
@@ -1587,23 +1587,47 @@ static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
	return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
	return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
}
}


static void free_pg_vec(char **pg_vec, unsigned order, unsigned len)
static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
{
{
	int i;
	int i;


	for (i = 0; i < len; i++) {
	for (i = 0; i < len; i++) {
		if (pg_vec[i]) {
		if (likely(pg_vec[i]))
			struct page *page, *pend;

			pend = pg_vec_endpage(pg_vec[i], order);
			for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
				ClearPageReserved(page);
			free_pages((unsigned long) pg_vec[i], order);
			free_pages((unsigned long) pg_vec[i], order);
	}
	}
	}
	kfree(pg_vec);
	kfree(pg_vec);
}
}


static inline char *alloc_one_pg_vec_page(unsigned long order)
{
	return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
					 order);
}

static char **alloc_pg_vec(struct tpacket_req *req, int order)
{
	unsigned int block_nr = req->tp_block_nr;
	char **pg_vec;
	int i;

	pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
	if (unlikely(!pg_vec))
		goto out;

	for (i = 0; i < block_nr; i++) {
		pg_vec[i] = alloc_one_pg_vec_page(order);
		if (unlikely(!pg_vec[i]))
			goto out_free_pgvec;
	}

out:
	return pg_vec;

out_free_pgvec:
	free_pg_vec(pg_vec, order, block_nr);
	pg_vec = NULL;
	goto out;
}


static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
{
{
@@ -1617,47 +1641,30 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing


		/* Sanity tests and some calculations */
		/* Sanity tests and some calculations */


		if (po->pg_vec)
		if (unlikely(po->pg_vec))
			return -EBUSY;
			return -EBUSY;


		if ((int)req->tp_block_size <= 0)
		if (unlikely((int)req->tp_block_size <= 0))
			return -EINVAL;
			return -EINVAL;
		if (req->tp_block_size&(PAGE_SIZE-1))
		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
			return -EINVAL;
			return -EINVAL;
		if (req->tp_frame_size < TPACKET_HDRLEN)
		if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
			return -EINVAL;
			return -EINVAL;
		if (req->tp_frame_size&(TPACKET_ALIGNMENT-1))
		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
			return -EINVAL;
			return -EINVAL;


		po->frames_per_block = req->tp_block_size/req->tp_frame_size;
		po->frames_per_block = req->tp_block_size/req->tp_frame_size;
		if (po->frames_per_block <= 0)
		if (unlikely(po->frames_per_block <= 0))
			return -EINVAL;
			return -EINVAL;
		if (po->frames_per_block*req->tp_block_nr != req->tp_frame_nr)
		if (unlikely((po->frames_per_block * req->tp_block_nr) !=
			     req->tp_frame_nr))
			return -EINVAL;
			return -EINVAL;
		/* OK! */

		/* Allocate page vector */
		while ((PAGE_SIZE<<order) < req->tp_block_size)
			order++;


		err = -ENOMEM;
		err = -ENOMEM;

		order = get_order(req->tp_block_size);
		pg_vec = kmalloc(req->tp_block_nr*sizeof(char *), GFP_KERNEL);
		pg_vec = alloc_pg_vec(req, order);
		if (pg_vec == NULL)
		if (unlikely(!pg_vec))
			goto out;
			goto out;
		memset(pg_vec, 0, req->tp_block_nr*sizeof(char **));

		for (i=0; i<req->tp_block_nr; i++) {
			struct page *page, *pend;
			pg_vec[i] = (char *)__get_free_pages(GFP_KERNEL, order);
			if (!pg_vec[i])
				goto out_free_pgvec;

			pend = pg_vec_endpage(pg_vec[i], order);
			for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
				SetPageReserved(page);
		}
		/* Page vector is allocated */


		l = 0;
		l = 0;
		for (i = 0; i < req->tp_block_nr; i++) {
		for (i = 0; i < req->tp_block_nr; i++) {
@@ -1666,7 +1673,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
			int k;
			int k;


			for (k = 0; k < po->frames_per_block; k++) {
			for (k = 0; k < po->frames_per_block; k++) {
				
				header = (struct tpacket_hdr *) ptr;
				header = (struct tpacket_hdr *) ptr;
				header->tp_status = TP_STATUS_KERNEL;
				header->tp_status = TP_STATUS_KERNEL;
				ptr += req->tp_frame_size;
				ptr += req->tp_frame_size;
@@ -1674,7 +1680,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
		}
		}
		/* Done */
		/* Done */
	} else {
	} else {
		if (req->tp_frame_nr)
		if (unlikely(req->tp_frame_nr))
			return -EINVAL;
			return -EINVAL;
	}
	}


@@ -1701,7 +1707,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing


		spin_lock_bh(&sk->sk_receive_queue.lock);
		spin_lock_bh(&sk->sk_receive_queue.lock);
		pg_vec = XC(po->pg_vec, pg_vec);
		pg_vec = XC(po->pg_vec, pg_vec);
		po->frame_max = req->tp_frame_nr-1;
		po->frame_max = (req->tp_frame_nr - 1);
		po->head = 0;
		po->head = 0;
		po->frame_size = req->tp_frame_size;
		po->frame_size = req->tp_frame_size;
		spin_unlock_bh(&sk->sk_receive_queue.lock);
		spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -1728,7 +1734,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing


	release_sock(sk);
	release_sock(sk);


out_free_pgvec:
	if (pg_vec)
	if (pg_vec)
		free_pg_vec(pg_vec, order, req->tp_block_nr);
		free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
out:
@@ -1755,17 +1760,19 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
	if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
	if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
		goto out;
		goto out;


	atomic_inc(&po->mapped);
	start = vma->vm_start;
	start = vma->vm_start;
	err = -EAGAIN;
	for (i = 0; i < po->pg_vec_len; i++) {
	for (i = 0; i < po->pg_vec_len; i++) {
		if (remap_pfn_range(vma, start,
		struct page *page = virt_to_page(po->pg_vec[i]);
				     __pa(po->pg_vec[i]) >> PAGE_SHIFT,
		int pg_num;
				     po->pg_vec_pages*PAGE_SIZE,

				     vma->vm_page_prot))
		for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
			err = vm_insert_page(vma, start, page);
			if (unlikely(err))
				goto out;
				goto out;
		start += po->pg_vec_pages*PAGE_SIZE;
			start += PAGE_SIZE;
		}
		}
	}
	atomic_inc(&po->mapped);
	vma->vm_ops = &packet_mmap_ops;
	vma->vm_ops = &packet_mmap_ops;
	err = 0;
	err = 0;