Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c58dc01b authored by David S. Miller's avatar David S. Miller
Browse files

net: Make RFS socket operations not be inet specific.



Idea from Eric Dumazet.

As for placement inside of struct sock, I tried to choose a place
that otherwise has a 32-bit hole on 64-bit systems.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
parent c4ee6a53
Loading
Loading
Loading
Loading
+0 −37
Original line number Diff line number Diff line
@@ -102,7 +102,6 @@ struct rtable;
 * @uc_ttl - Unicast TTL
 * @inet_sport - Source port
 * @inet_id - ID counter for DF pkts
 * @rxhash - flow hash received from netif layer
 * @tos - TOS
 * @mc_ttl - Multicasting TTL
 * @is_icsk - is this an inet_connection_sock?
@@ -126,9 +125,6 @@ struct inet_sock {
	__u16			cmsg_flags;
	__be16			inet_sport;
	__u16			inet_id;
#ifdef CONFIG_RPS
	__u32			rxhash;
#endif

	struct ip_options	*opt;
	__u8			tos;
@@ -224,37 +220,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
	return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
}

static inline void inet_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
	rcu_read_unlock();
#endif
}

static inline void inet_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
	rcu_read_unlock();
#endif
}

static inline void inet_rps_save_rxhash(struct sock *sk, u32 rxhash)
{
#ifdef CONFIG_RPS
	if (unlikely(inet_sk(sk)->rxhash != rxhash)) {
		inet_rps_reset_flow(sk);
		inet_sk(sk)->rxhash = rxhash;
	}
#endif
}
#endif	/* _INET_SOCK_H */
+38 −0
Original line number Diff line number Diff line
@@ -198,6 +198,7 @@ struct sock_common {
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
  *	@sk_rxhash: flow hash received from netif layer
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
@@ -279,6 +280,9 @@ struct sock {
	int			sk_gso_type;
	unsigned int		sk_gso_max_size;
	int			sk_rcvlowat;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	unsigned long 		sk_flags;
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
@@ -620,6 +624,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
	return sk->sk_backlog_rcv(sk, skb);
}

static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
{
#ifdef CONFIG_RPS
	if (unlikely(sk->sk_rxhash != rxhash)) {
		sock_rps_reset_flow(sk);
		sk->sk_rxhash = rxhash;
	}
#endif
}

#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
+4 −4
Original line number Diff line number Diff line
@@ -419,7 +419,7 @@ int inet_release(struct socket *sock)
	if (sk) {
		long timeout;

		inet_rps_reset_flow(sk);
		sock_rps_reset_flow(sk);

		/* Applications forget to leave groups before exiting */
		ip_mc_drop_socket(sk);
@@ -722,7 +722,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
{
	struct sock *sk = sock->sk;

	inet_rps_record_flow(sk);
	sock_rps_record_flow(sk);

	/* We may need to bind the socket. */
	if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -737,7 +737,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
{
	struct sock *sk = sock->sk;

	inet_rps_record_flow(sk);
	sock_rps_record_flow(sk);

	/* We may need to bind the socket. */
	if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -755,7 +755,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
	int addr_len = 0;
	int err;

	inet_rps_record_flow(sk);
	sock_rps_record_flow(sk);

	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
				   flags & ~MSG_DONTWAIT, &addr_len);
+1 −1
Original line number Diff line number Diff line
@@ -1672,7 +1672,7 @@ process:

	skb->dev = NULL;

	inet_rps_save_rxhash(sk, skb->rxhash);
	sock_rps_save_rxhash(sk, skb->rxhash);

	bh_lock_sock_nested(sk);
	ret = 0;
+2 −2
Original line number Diff line number Diff line
@@ -1217,7 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
	sk->sk_state = TCP_CLOSE;
	inet->inet_daddr = 0;
	inet->inet_dport = 0;
	inet_rps_save_rxhash(sk, 0);
	sock_rps_save_rxhash(sk, 0);
	sk->sk_bound_dev_if = 0;
	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);
@@ -1262,7 +1262,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
	int rc;

	if (inet_sk(sk)->inet_daddr)
		inet_rps_save_rxhash(sk, skb->rxhash);
		sock_rps_save_rxhash(sk, skb->rxhash);

	rc = sock_queue_rcv_skb(sk, skb);
	if (rc < 0) {