Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd0bfa8d authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-af-xdp-cleanups'



Björn Töpel says:

====================
This the second follow-up set. The first four patches are uapi
changes:

* Removing rebind support
* Getting rid of structure hole
* Removing explicit cache line alignment
* Stricter bind checks

The last patches do some cleanups, where the umem and refcount_t
changes were suggested by Daniel.

* Add a missing write-barrier and use READ_ONCE for data-dependencies
* Clean up umem and do proper locking
* Convert atomic_t to refcount_t
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents d849f9f9 d3b42f14
Loading
Loading
Loading
Loading
+23 −23
Original line number Diff line number Diff line
@@ -17,19 +17,33 @@

struct sockaddr_xdp {
	__u16 sxdp_family;
	__u16 sxdp_flags;
	__u32 sxdp_ifindex;
	__u32 sxdp_queue_id;
	__u32 sxdp_shared_umem_fd;
	__u16 sxdp_flags;
};

struct xdp_ring_offset {
	__u64 producer;
	__u64 consumer;
	__u64 desc;
};

struct xdp_mmap_offsets {
	struct xdp_ring_offset rx;
	struct xdp_ring_offset tx;
	struct xdp_ring_offset fr; /* Fill */
	struct xdp_ring_offset cr; /* Completion */
};

/* XDP socket options */
#define XDP_RX_RING			1
#define XDP_TX_RING			2
#define XDP_UMEM_REG			3
#define XDP_UMEM_FILL_RING		4
#define XDP_UMEM_COMPLETION_RING	5
#define XDP_STATISTICS			6
#define XDP_MMAP_OFFSETS		1
#define XDP_RX_RING			2
#define XDP_TX_RING			3
#define XDP_UMEM_REG			4
#define XDP_UMEM_FILL_RING		5
#define XDP_UMEM_COMPLETION_RING	6
#define XDP_STATISTICS			7

struct xdp_umem_reg {
	__u64 addr; /* Start of packet data area */
@@ -50,6 +64,7 @@ struct xdp_statistics {
#define XDP_UMEM_PGOFF_FILL_RING	0x100000000
#define XDP_UMEM_PGOFF_COMPLETION_RING	0x180000000

/* Rx/Tx descriptor */
struct xdp_desc {
	__u32 idx;
	__u32 len;
@@ -58,21 +73,6 @@ struct xdp_desc {
	__u8 padding[5];
};

struct xdp_ring {
	__u32 producer __attribute__((aligned(64)));
	__u32 consumer __attribute__((aligned(64)));
};

/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
	struct xdp_ring ptrs;
	struct xdp_desc desc[0] __attribute__((aligned(64)));
};

/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
	struct xdp_ring ptrs;
	__u32 desc[0] __attribute__((aligned(64)));
};
/* UMEM descriptor is __u32 */

#endif /* _LINUX_IF_XDP_H */
+41 −44
Original line number Diff line number Diff line
@@ -16,21 +16,10 @@

#define XDP_UMEM_MIN_FRAME_SIZE 2048

int xdp_umem_create(struct xdp_umem **umem)
{
	*umem = kzalloc(sizeof(**umem), GFP_KERNEL);

	if (!*umem)
		return -ENOMEM;

	return 0;
}

static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
	unsigned int i;

	if (umem->pgs) {
	for (i = 0; i < umem->npgs; i++) {
		struct page *page = umem->pgs[i];

@@ -41,15 +30,12 @@ static void xdp_umem_unpin_pages(struct xdp_umem *umem)
	kfree(umem->pgs);
	umem->pgs = NULL;
}
}

static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
{
	if (umem->user) {
	atomic_long_sub(umem->npgs, &umem->user->locked_vm);
	free_uid(umem->user);
}
}

static void xdp_umem_release(struct xdp_umem *umem)
{
@@ -66,7 +52,6 @@ static void xdp_umem_release(struct xdp_umem *umem)
		umem->cq = NULL;
	}

	if (umem->pgs) {
	xdp_umem_unpin_pages(umem);

	task = get_pid_task(umem->pid, PIDTYPE_PID);
@@ -79,9 +64,6 @@ static void xdp_umem_release(struct xdp_umem *umem)
		goto out;

	mmput(mm);
		umem->pgs = NULL;
	}

	xdp_umem_unaccount_pages(umem);
out:
	kfree(umem);
@@ -96,7 +78,7 @@ static void xdp_umem_release_deferred(struct work_struct *work)

void xdp_get_umem(struct xdp_umem *umem)
{
	atomic_inc(&umem->users);
	refcount_inc(&umem->users);
}

void xdp_put_umem(struct xdp_umem *umem)
@@ -104,7 +86,7 @@ void xdp_put_umem(struct xdp_umem *umem)
	if (!umem)
		return;

	if (atomic_dec_and_test(&umem->users)) {
	if (refcount_dec_and_test(&umem->users)) {
		INIT_WORK(&umem->work, xdp_umem_release_deferred);
		schedule_work(&umem->work);
	}
@@ -167,16 +149,13 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
	return 0;
}

int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
	u32 frame_size = mr->frame_size, frame_headroom = mr->frame_headroom;
	u64 addr = mr->addr, size = mr->len;
	unsigned int nframes, nfpp;
	int size_chk, err;

	if (!umem)
		return -EINVAL;

	if (frame_size < XDP_UMEM_MIN_FRAME_SIZE || frame_size > PAGE_SIZE) {
		/* Strictly speaking we could support this, if:
		 * - huge pages, or*
@@ -227,7 +206,7 @@ int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
	umem->frame_size_log2 = ilog2(frame_size);
	umem->nfpp_mask = nfpp - 1;
	umem->nfpplog2 = ilog2(nfpp);
	atomic_set(&umem->users, 1);
	refcount_set(&umem->users, 1);

	err = xdp_umem_account_pages(umem);
	if (err)
@@ -245,6 +224,24 @@ int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
	return err;
}

struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
{
	struct xdp_umem *umem;
	int err;

	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
	if (!umem)
		return ERR_PTR(-ENOMEM);

	err = xdp_umem_reg(umem, mr);
	if (err) {
		kfree(umem);
		return ERR_PTR(err);
	}

	return umem;
}

bool xdp_umem_validate_queues(struct xdp_umem *umem)
{
	return umem->fq && umem->cq;
+2 −3
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ struct xdp_umem {
	struct pid *pid;
	unsigned long address;
	size_t size;
	atomic_t users;
	refcount_t users;
	struct work_struct work;
};

@@ -50,9 +50,8 @@ static inline char *xdp_umem_get_data_with_headroom(struct xdp_umem *umem,
}

bool xdp_umem_validate_queues(struct xdp_umem *umem);
int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
int xdp_umem_create(struct xdp_umem **umem);
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);

#endif /* XDP_UMEM_H_ */
+66 −39
Original line number Diff line number Diff line
@@ -142,6 +142,11 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
			goto out;
		}

		if (xs->queue_id >= xs->dev->real_num_tx_queues) {
			err = -ENXIO;
			goto out;
		}

		skb = sock_alloc_send_skb(sk, len, !need_wait, &err);
		if (unlikely(!skb)) {
			err = -EAGAIN;
@@ -223,18 +228,12 @@ static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
	if (!q)
		return -ENOMEM;

	/* Make sure queue is ready before it can be seen by others */
	smp_wmb();
	*queue = q;
	return 0;
}

static void __xsk_release(struct xdp_sock *xs)
{
	/* Wait for driver to stop using the xdp socket. */
	synchronize_net();

	dev_put(xs->dev);
}

static int xsk_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
@@ -251,7 +250,9 @@ static int xsk_release(struct socket *sock)
	local_bh_enable();

	if (xs->dev) {
		__xsk_release(xs);
		/* Wait for driver to stop using the xdp socket. */
		synchronize_net();
		dev_put(xs->dev);
		xs->dev = NULL;
	}

@@ -285,9 +286,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
	struct sock *sk = sock->sk;
	struct net_device *dev, *dev_curr;
	struct xdp_sock *xs = xdp_sk(sk);
	struct xdp_umem *old_umem = NULL;
	struct net_device *dev;
	int err = 0;

	if (addr_len < sizeof(struct sockaddr_xdp))
@@ -296,7 +296,11 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		return -EINVAL;

	mutex_lock(&xs->mutex);
	dev_curr = xs->dev;
	if (xs->dev) {
		err = -EBUSY;
		goto out_release;
	}

	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
	if (!dev) {
		err = -ENODEV;
@@ -308,7 +312,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		goto out_unlock;
	}

	if (sxdp->sxdp_queue_id >= dev->num_rx_queues) {
	if ((xs->rx && sxdp->sxdp_queue_id >= dev->real_num_rx_queues) ||
	    (xs->tx && sxdp->sxdp_queue_id >= dev->real_num_tx_queues)) {
		err = -EINVAL;
		goto out_unlock;
	}
@@ -343,7 +348,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		}

		xdp_get_umem(umem_xs->umem);
		old_umem = xs->umem;
		xs->umem = umem_xs->umem;
		sockfd_put(sock);
	} else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
@@ -355,14 +359,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		xskq_set_umem(xs->umem->cq, &xs->umem->props);
	}

	/* Rebind? */
	if (dev_curr && (dev_curr != dev ||
			 xs->queue_id != sxdp->sxdp_queue_id)) {
		__xsk_release(xs);
		if (old_umem)
			xdp_put_umem(old_umem);
	}

	xs->dev = dev;
	xs->queue_id = sxdp->sxdp_queue_id;

@@ -410,25 +406,23 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
		struct xdp_umem_reg mr;
		struct xdp_umem *umem;

		if (xs->umem)
			return -EBUSY;

		if (copy_from_user(&mr, optval, sizeof(mr)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
		err = xdp_umem_create(&umem);
		if (xs->umem) {
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}

		err = xdp_umem_reg(umem, &mr);
		if (err) {
			kfree(umem);
		umem = xdp_umem_create(&mr);
		if (IS_ERR(umem)) {
			mutex_unlock(&xs->mutex);
			return err;
			return PTR_ERR(umem);
		}

		/* Make sure umem is ready before it can be seen by others */
		smp_wmb();

		xs->umem = umem;
		mutex_unlock(&xs->mutex);
		return 0;
@@ -439,13 +433,15 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
		struct xsk_queue **q;
		int entries;

		if (!xs->umem)
			return -EINVAL;

		if (copy_from_user(&entries, optval, sizeof(entries)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
		if (!xs->umem) {
			mutex_unlock(&xs->mutex);
			return -EINVAL;
		}

		q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
			&xs->umem->cq;
		err = xsk_init_queue(entries, q, true);
@@ -495,6 +491,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,

		return 0;
	}
	case XDP_MMAP_OFFSETS:
	{
		struct xdp_mmap_offsets off;

		if (len < sizeof(off))
			return -EINVAL;

		off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
		off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
		off.rx.desc	= offsetof(struct xdp_rxtx_ring, desc);
		off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
		off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
		off.tx.desc	= offsetof(struct xdp_rxtx_ring, desc);

		off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
		off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
		off.fr.desc	= offsetof(struct xdp_umem_ring, desc);
		off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
		off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
		off.cr.desc	= offsetof(struct xdp_umem_ring, desc);

		len = sizeof(off);
		if (copy_to_user(optval, &off, len))
			return -EFAULT;
		if (put_user(len, optlen))
			return -EFAULT;

		return 0;
	}
	default:
		break;
	}
@@ -509,21 +534,23 @@ static int xsk_mmap(struct file *file, struct socket *sock,
	unsigned long size = vma->vm_end - vma->vm_start;
	struct xdp_sock *xs = xdp_sk(sock->sk);
	struct xsk_queue *q = NULL;
	struct xdp_umem *umem;
	unsigned long pfn;
	struct page *qpg;

	if (offset == XDP_PGOFF_RX_RING) {
		q = xs->rx;
		q = READ_ONCE(xs->rx);
	} else if (offset == XDP_PGOFF_TX_RING) {
		q = xs->tx;
		q = READ_ONCE(xs->tx);
	} else {
		if (!xs->umem)
		umem = READ_ONCE(xs->umem);
		if (!umem)
			return -EINVAL;

		if (offset == XDP_UMEM_PGOFF_FILL_RING)
			q = xs->umem->fq;
			q = READ_ONCE(umem->fq);
		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
			q = xs->umem->cq;
			q = READ_ONCE(umem->cq);
	}

	if (!q)
+17 −0
Original line number Diff line number Diff line
@@ -13,6 +13,23 @@

#define RX_BATCH_SIZE 16

struct xdp_ring {
	u32 producer ____cacheline_aligned_in_smp;
	u32 consumer ____cacheline_aligned_in_smp;
};

/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
	struct xdp_ring ptrs;
	struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
};

/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
	struct xdp_ring ptrs;
	u32 desc[0] ____cacheline_aligned_in_smp;
};

struct xsk_queue {
	struct xdp_umem_props umem_props;
	u32 ring_mask;
Loading