Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b58e6db authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-refcount_t'



Elena Reshetova says:

====================
v3 net generic subsystem refcount conversions

Changes in v3:
Rebased on top of the net-next tree.

Changes in v2:
No changes in patches apart from rebases, but now by
default refcount_t = atomic_t (*) and uses all atomic standard operations
unless CONFIG_REFCOUNT_FULL is enabled. This is a compromise for the
systems that are critical on performance (such as net) and cannot accept even
slight delay on the refcounter operations.

This series, for core network subsystem components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.
These patches contain only generic net pieces. Other changes will be sent separately.

The patches are fully independent and can be cherry-picked separately.
The big patches, such as conversions for sock structure, need a very detailed
look from maintainers: refcount managing is quite complex in them and while
it seems that they would benefit from the change, extra checking is needed.
The biggest corner issue is the fact that refcount_inc() does not increment
from zero.

If there are no objections to the patches, please merge them via respective trees.

* The respective change is currently merged into -next as
  "locking/refcount: Create unchecked atomic_t implementation".
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 574a6020 fb5c2c17
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -877,7 +877,7 @@ static void aead_sock_destruct(struct sock *sk)
	unsigned int ivlen = crypto_aead_ivsize(
				crypto_aead_reqtfm(&ctx->aead_req));

	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
	WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
	aead_put_sgl(sk);
	sock_kzfree_s(sk, ctx->iv, ivlen);
	sock_kfree_s(sk, ctx, ctx->len);
+1 −11
Original line number Diff line number Diff line
@@ -924,12 +924,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
		else {
		    dev_kfree_skb_any(entry->skb);
		}
#if 1
		/* race fixed by the above incarnation mechanism, but... */
		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
		}
#endif

		/* check error condition */
		if (*entry->status & STATUS_ERROR)
		    atomic_inc(&vcc->stats->tx_err);
@@ -1130,13 +1125,9 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
	return -ENOMEM;
    }

    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);

    vcc->push(vcc, skb);
    atomic_inc(&vcc->stats->rx);

    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);

    return 0;
}

@@ -1572,7 +1563,6 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
    unsigned long           flags;

    ASSERT(vcc);
    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
    ASSERT(fore200e);
    ASSERT(fore200e_vcc);

+1 −1
Original line number Diff line number Diff line
@@ -2395,7 +2395,7 @@ he_close(struct atm_vcc *vcc)
		 * TBRQ, the host issues the close command to the adapter.
		 */

		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
		       (retry < MAX_RETRY)) {
			msleep(sleep);
			if (sleep < 250)
+2 −2
Original line number Diff line number Diff line
@@ -724,7 +724,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
		struct sock *sk = sk_atm(vcc);

		vc->estimator->cells += (skb->len + 47) / 48;
		if (atomic_read(&sk->sk_wmem_alloc) >
		if (refcount_read(&sk->sk_wmem_alloc) >
		    (sk->sk_sndbuf >> 1)) {
			u32 cps = vc->estimator->maxcps;

@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
		atomic_inc(&vcc->stats->tx_err);
		return -ENOMEM;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);

	skb_put_data(skb, cell, 52);

+2 −2
Original line number Diff line number Diff line
@@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,

	if (type == NES_TIMER_TYPE_SEND) {
		new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
		atomic_inc(&new_send->skb->users);
		refcount_inc(&new_send->skb->users);
		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
		cm_node->send_entry = new_send;
		add_ref_cm_node(cm_node);
@@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
						  flags);
				break;
			}
			atomic_inc(&send_entry->skb->users);
			refcount_inc(&send_entry->skb->users);
			cm_packets_retrans++;
			nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
				  "for node %p, jiffies = %lu, time to send = "
Loading