Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4bd4c2dd authored by Thomas Pedersen's avatar Thomas Pedersen Committed by Johannes Berg
Browse files

mac80211: clean up mpath_move_to_queue()



Use skb_queue_walk_safe instead, and fix a few issues:

	- didn't free old skbs on moving
	- didn't react to failed skb alloc
	- needlessly held a local pointer to the destination frame queue
	- didn't check destination queue length before adding skb

Signed-off-by: default avatarThomas Pedersen <thomas@cozybit.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent b22bd522
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -215,6 +215,9 @@ struct mesh_rmc {
/* Maximum number of paths per interface */
/* Maximum number of paths per interface */
#define MESH_MAX_MPATHS		1024
#define MESH_MAX_MPATHS		1024


/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10

/* Public interfaces */
/* Public interfaces */
/* Various */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
+0 −2
Original line number Original line Diff line number Diff line
@@ -17,8 +17,6 @@
#define MAX_METRIC	0xffffffff
#define MAX_METRIC	0xffffffff
#define ARITH_SHIFT	8
#define ARITH_SHIFT	8


/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10
#define MAX_PREQ_QUEUE_LEN	64
#define MAX_PREQ_QUEUE_LEN	64


/* Destination only */
/* Destination only */
+18 −16
Original line number Original line Diff line number Diff line
@@ -279,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
				    bool copy)
{
{
	struct sk_buff *skb, *cp_skb = NULL;
	struct sk_buff *skb, *fskb, *tmp;
	struct sk_buff_head gateq, failq;
	struct sk_buff_head failq;
	unsigned long flags;
	unsigned long flags;
	int num_skbs;


	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(!gate_mpath->next_hop);
	BUG_ON(!gate_mpath->next_hop);


	__skb_queue_head_init(&gateq);
	__skb_queue_head_init(&failq);
	__skb_queue_head_init(&failq);


	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);


	num_skbs = skb_queue_len(&failq);
	skb_queue_walk_safe(&failq, fskb, tmp) {

		if (skb_queue_len(&gate_mpath->frame_queue) >=
	while (num_skbs--) {
				  MESH_FRAME_QUEUE_LEN) {
		skb = __skb_dequeue(&failq);
			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
		if (copy) {
			break;
			cp_skb = skb_copy(skb, GFP_ATOMIC);
			if (cp_skb)
				__skb_queue_tail(&failq, cp_skb);
		}
		}


		skb = skb_copy(fskb, GFP_ATOMIC);
		if (WARN_ON(!skb))
			break;

		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
		__skb_queue_tail(&gateq, skb);
		skb_queue_tail(&gate_mpath->frame_queue, skb);

		if (copy)
			continue;

		__skb_unlink(fskb, &failq);
		kfree_skb(fskb);
	}
	}


	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);


	if (!copy)
	if (!copy)
		return;
		return;