Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e17f0bf authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: rmnet_data: Fix compilation error due to rmnet"

parents 0f15d24b 5c620645
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -399,8 +399,7 @@ struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
}

int rmnet_add_bridge(struct net_device *rmnet_dev,
		     struct net_device *slave_dev,
		     struct netlink_ext_ack *extack)
		     struct net_device *slave_dev)
{
	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
	struct net_device *real_dev = priv->real_dev;
@@ -422,8 +421,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
	if (err)
		return -EBUSY;

	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
					   extack);
	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL);
	if (err)
		return -EINVAL;

+2 −3
Original line number Diff line number Diff line
/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -64,8 +64,7 @@ struct rmnet_priv {
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
		     struct net_device *slave_dev,
		     struct netlink_ext_ack *extack);
		     struct net_device *slave_dev);
int rmnet_del_bridge(struct net_device *rmnet_dev,
		     struct net_device *slave_dev);
#endif /* _RMNET_CONFIG_H_ */
+13 −1
Original line number Diff line number Diff line
@@ -266,7 +266,7 @@ struct sock_common {
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
  *	@sk_gso_max_size: Maximum GSO segment size to build
  *	@sk_gso_max_segs: Maximum number of GSO segments
  *	@sk_pacing_shift: scaling factor for TCP Small Queues
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
@@ -448,6 +448,7 @@ struct sock {
	kmemcheck_bitfield_end(flags);

	u16			sk_gso_max_segs;
	u8			sk_pacing_shift;
	unsigned long	        sk_lingertime;
	struct proto		*sk_prot_creator;
	rwlock_t		sk_callback_lock;
@@ -2383,4 +2384,15 @@ extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;

/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
 * Some wifi drivers need to tweak it to get more chunks.
 * They can use this helper from their ndo_start_xmit()
 */
static inline void sk_pacing_shift_update(struct sock *sk, int val)
{
	if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
		return;
	sk->sk_pacing_shift = val;
}

#endif	/* _SOCK_H */
+1 −0
Original line number Diff line number Diff line
@@ -2744,6 +2744,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)

	sk->sk_max_pacing_rate = ~0U;
	sk->sk_pacing_rate = ~0U;
	sk->sk_pacing_shift = 10;
	sk->sk_incoming_cpu = -1;
	/*
	 * Before updating sk_refcnt, we must commit prior changes to memory
+2 −2
Original line number Diff line number Diff line
@@ -1671,7 +1671,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
{
	u32 bytes, segs;

	bytes = min(sk->sk_pacing_rate >> 10,
	bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);

	/* Goal is to send at least one packet per ms,
@@ -2145,7 +2145,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
{
	unsigned int limit;

	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
	limit <<= factor;

Loading