Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5e63bde authored by David S. Miller's avatar David S. Miller
Browse files

Revert "af_packet: add interframe drop cmsg (v6)"



This reverts commit 97775007.

Neil is reimplementing this generically, outside of AF_PACKET.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 91b2a3f9
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -48,13 +48,11 @@ struct sockaddr_ll
#define PACKET_RESERVE			12
#define PACKET_TX_RING			13
#define PACKET_LOSS			14
#define PACKET_GAPDATA			15

struct tpacket_stats
{
	unsigned int	tp_packets;
	unsigned int	tp_drops;
	unsigned int    tp_gap;
};

struct tpacket_auxdata
+0 −33
Original line number Diff line number Diff line
@@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
	return res;
}

/*
 * If we've lost frames since the last time we queued one to the
 * sk_receive_queue, we need to record it here.
 * This must be called under the protection of the socket lock
 * to prevent racing with other softirqs and user space
 */
static inline void record_packet_gap(struct sk_buff *skb,
					struct packet_sock *po)
{
	/*
	 * We overload the mark field here, since we're about
	 * to enqueue to a receive queue and no body else will
	 * use this field at this point
	 */
	skb->mark = po->stats.tp_gap;
	po->stats.tp_gap = 0;
	return;

}

static inline __u32 check_packet_gap(struct sk_buff *skb)
{
	return skb->mark;
}

/*
   This function makes lazy skb cloning in hope that most of packets
   are discarded by BPF.
@@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,

	spin_lock(&sk->sk_receive_queue.lock);
	po->stats.tp_packets++;
	record_packet_gap(skb, po);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	spin_unlock(&sk->sk_receive_queue.lock);
	sk->sk_data_ready(sk, skb->len);
@@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
drop_n_acct:
	spin_lock(&sk->sk_receive_queue.lock);
	po->stats.tp_drops++;
	po->stats.tp_gap++;
	spin_unlock(&sk->sk_receive_queue.lock);

drop_n_restore:
@@ -839,7 +812,6 @@ drop:

ring_is_full:
	po->stats.tp_drops++;
	po->stats.tp_gap++;
	spin_unlock(&sk->sk_receive_queue.lock);

	sk->sk_data_ready(sk, 0);
@@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
	struct sk_buff *skb;
	int copied, err;
	struct sockaddr_ll *sll;
	__u32 gap;

	err = -EINVAL;
	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
	}

	gap = check_packet_gap(skb);
	if (gap)
		put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);

	/*
	 *	Free or return the buffer as appropriate. Again this
	 *	hides all the races and re-entrancy issues from us.