Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2638595a authored by Reshetova, Elena's avatar Reshetova, Elena Committed by David S. Miller
Browse files

net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t



refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 63354797
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -915,7 +915,7 @@ struct sk_buff_fclones {

	struct sk_buff	skb2;

	atomic_t	fclone_ref;
	refcount_t	fclone_ref;
};

/**
@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
	fclones = container_of(skb, struct sk_buff_fclones, skb1);

	return skb->fclone == SKB_FCLONE_ORIG &&
	       atomic_read(&fclones->fclone_ref) > 1 &&
	       refcount_read(&fclones->fclone_ref) > 1 &&
	       fclones->skb2.sk == sk;
}

+5 −5
Original line number Diff line number Diff line
@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,

		kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
		skb->fclone = SKB_FCLONE_ORIG;
		atomic_set(&fclones->fclone_ref, 1);
		refcount_set(&fclones->fclone_ref, 1);

		fclones->skb2.fclone = SKB_FCLONE_CLONE;
	}
@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
		 * This test would have no chance to be true for the clone,
		 * while here, branch prediction will be good.
		 */
		if (atomic_read(&fclones->fclone_ref) == 1)
		if (refcount_read(&fclones->fclone_ref) == 1)
			goto fastpath;
		break;

@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
		fclones = container_of(skb, struct sk_buff_fclones, skb2);
		break;
	}
	if (!atomic_dec_and_test(&fclones->fclone_ref))
	if (!refcount_dec_and_test(&fclones->fclone_ref))
		return;
fastpath:
	kmem_cache_free(skbuff_fclone_cache, fclones);
@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
		return NULL;

	if (skb->fclone == SKB_FCLONE_ORIG &&
	    atomic_read(&fclones->fclone_ref) == 1) {
	    refcount_read(&fclones->fclone_ref) == 1) {
		n = &fclones->skb2;
		atomic_set(&fclones->fclone_ref, 2);
		refcount_set(&fclones->fclone_ref, 2);
	} else {
		if (skb_pfmemalloc(skb))
			gfp_mask |= __GFP_MEMALLOC;