Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2a385d6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

tcp: bool conversions



bool conversions where possible.

__inline__ -> inline

space cleanups

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e005d193
Loading
Loading
Loading
Loading
+28 −28
Original line number Diff line number Diff line
@@ -263,14 +263,14 @@ extern int tcp_memory_pressure;
 * and worry about wraparound (automatic with unsigned arithmetic).
 */

static inline int before(__u32 seq1, __u32 seq2)
static inline bool before(__u32 seq1, __u32 seq2)
{
        return (__s32)(seq1-seq2) < 0;
}
#define after(seq2, seq1) 	before(seq1, seq2)

/* is s2<=s1<=s3 ? */
static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
{
	return seq3 - seq2 >= seq1 - seq2;
}
@@ -305,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk)
}

/* syncookies: no recent synqueue overflow on this listening socket? */
static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
@@ -383,7 +383,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
				   struct request_sock **prev);
extern int tcp_child_process(struct sock *parent, struct sock *child,
			     struct sk_buff *skb);
extern int tcp_use_frto(struct sock *sk);
extern bool tcp_use_frto(struct sock *sk);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
extern void tcp_clear_retrans(struct tcp_sock *tp);
@@ -470,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,

extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
				      int nonagle);
extern int tcp_may_send_now(struct sock *sk);
extern bool tcp_may_send_now(struct sock *sk);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_retransmit_timer(struct sock *sk);
extern void tcp_xmit_retransmit_queue(struct sock *);
@@ -484,7 +484,7 @@ extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *);
extern int tcp_syn_flood_action(struct sock *sk,
extern bool tcp_syn_flood_action(struct sock *sk,
				 const struct sk_buff *skb,
				 const char *proto);
extern void tcp_push_one(struct sock *, unsigned int mss_now);
@@ -794,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
	return tp->rx_opt.sack_ok;
}

static inline int tcp_is_reno(const struct tcp_sock *tp)
static inline bool tcp_is_reno(const struct tcp_sock *tp)
{
	return !tcp_is_sack(tp);
}

static inline int tcp_is_fack(const struct tcp_sock *tp)
static inline bool tcp_is_fack(const struct tcp_sock *tp)
{
	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
}
@@ -901,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
	return tp->snd_una + tp->snd_wnd;
}
extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);

static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
				       const struct sk_buff *skb)
@@ -944,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
	return __skb_checksum_complete(skb);
}

static inline int tcp_checksum_complete(struct sk_buff *skb)
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
	return !skb_csum_unnecessary(skb) &&
		__tcp_checksum_complete(skb);
@@ -974,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
 *
 * NOTE: is this not too big to inline?
 */
static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (sysctl_tcp_low_latency || !tp->ucopy.task)
		return 0;
		return false;

	__skb_queue_tail(&tp->ucopy.prequeue, skb);
	tp->ucopy.memory += skb->truesize;
@@ -1003,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
						  (3 * tcp_rto_min(sk)) / 4,
						  TCP_RTO_MAX);
	}
	return 1;
	return true;
}


@@ -1108,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk)
	return fin_timeout;
}

static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
				  int paws_win)
{
	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
		return 1;
		return true;
	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
		return 1;
		return true;
	/*
	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
	 * then following tcp messages have valid values. Ignore 0 value,
	 * or else 'negative' tsval might forbid us to accept their packets.
	 */
	if (!rx_opt->ts_recent)
		return 1;
	return 0;
		return true;
	return false;
}

static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
				   int rst)
{
	if (tcp_paws_check(rx_opt, 0))
		return 0;
		return false;

	/* RST segments are not recommended to carry timestamp,
	   and, if they do, it is recommended to ignore PAWS because
@@ -1144,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
	   However, we can relax time bounds for RST segments to MSL.
	 */
	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
		return 0;
	return 1;
		return false;
	return true;
}

static inline void tcp_mib_init(struct net *net)
@@ -1383,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
	__skb_unlink(skb, &sk->sk_write_queue);
}

static inline int tcp_write_queue_empty(struct sock *sk)
static inline bool tcp_write_queue_empty(struct sock *sk)
{
	return skb_queue_empty(&sk->sk_write_queue);
}
@@ -1440,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
/* Determines whether this is a thin stream (which may suffer from
 * increased latency). Used to trigger latency-reducing mechanisms.
 */
static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
{
	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
}
+10 −10
Original line number Diff line number Diff line
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
	tp->pushed_seq = tp->write_seq;
}

static inline int forced_push(const struct tcp_sock *tp)
static inline bool forced_push(const struct tcp_sock *tp)
{
	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
@@ -1082,7 +1082,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
				if (err)
					goto do_fault;
			} else {
				int merge = 0;
				bool merge = false;
				int i = skb_shinfo(skb)->nr_frags;
				struct page *page = sk->sk_sndmsg_page;
				int off;
@@ -1096,7 +1096,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
				    off != PAGE_SIZE) {
					/* We can extend the last page
					 * fragment. */
					merge = 1;
					merge = true;
				} else if (i == MAX_SKB_FRAGS || !sg) {
					/* Need to add new fragment and cannot
					 * do this because interface is non-SG,
@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
	struct tcp_sock *tp = tcp_sk(sk);
	int time_to_ack = 0;
	bool time_to_ack = false;

	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);

@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
		       !icsk->icsk_ack.pingpong)) &&
		      !atomic_read(&sk->sk_rmem_alloc)))
			time_to_ack = 1;
			time_to_ack = true;
	}

	/* We send an ACK if we can now advertise a non-zero window
@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
			 * "Lots" means "at least twice" here.
			 */
			if (new_window && new_window >= 2 * rcv_window_now)
				time_to_ack = 1;
				time_to_ack = true;
		}
	}
	if (time_to_ack)
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);

/* These states need RST on ABORT according to RFC793 */

static inline int tcp_need_reset(int state)
static inline bool tcp_need_reset(int state)
{
	return (1 << state) &
	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
}
EXPORT_SYMBOL(tcp_disconnect);

static inline int tcp_can_repair_sock(struct sock *sk)
static inline bool tcp_can_repair_sock(const struct sock *sk)
{
	return capable(CAP_NET_ADMIN) &&
		((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
@@ -3172,13 +3172,13 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
	struct tcp_md5sig_pool __percpu *pool;
	int alloc = 0;
	bool alloc = false;

retry:
	spin_lock_bh(&tcp_md5sig_pool_lock);
	pool = tcp_md5sig_pool;
	if (tcp_md5sig_users++ == 0) {
		alloc = 1;
		alloc = true;
		spin_unlock_bh(&tcp_md5sig_pool_lock);
	} else if (!pool) {
		tcp_md5sig_users--;
+3 −3
Original line number Diff line number Diff line
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
/* RFC2861 Check whether we are limited by application or congestion window
 * This is the inverse of cwnd check in tcp_tso_should_defer
 */
int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	u32 left;

	if (in_flight >= tp->snd_cwnd)
		return 1;
		return true;

	left = tp->snd_cwnd - in_flight;
	if (sk_can_gso(sk) &&
	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
	    left * tp->mss_cache < sk->sk_gso_max_size)
		return 1;
		return true;
	return left <= tcp_max_tso_deferred_mss(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
+5 −5
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@

/* Tcp Hybla structure. */
struct hybla {
	u8    hybla_en;
	bool  hybla_en;
	u32   snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
	u32   rho;	      /* Rho parameter, integer part  */
	u32   rho2;	      /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
	u32   minrtt;	      /* Minimum smoothed round trip time value seen */
};

/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
   expressed in jiffies */
/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
static int rtt0 = 25;
module_param(rtt0, int, 0644);
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
	ca->rho_3ls = 0;
	ca->rho2_7ls = 0;
	ca->snd_cwnd_cents = 0;
	ca->hybla_en = 1;
	ca->hybla_en = true;
	tp->snd_cwnd = 2;
	tp->snd_cwnd_clamp = 65535;

@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
static void hybla_state(struct sock *sk, u8 ca_state)
{
	struct hybla *ca = inet_csk_ca(sk);

	ca->hybla_en = (ca_state == TCP_CA_Open);
}

+108 −106

File changed.

Preview size limit exceeded, changes collapsed.

Loading