Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 402d7752 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by John W. Linville
Browse files

mac80211: Brush up error paths in mesh_path_add.



There are already tree paths, that do incremental rollbacks, so
merge them together, rename labels and format the code to look a
bit nicer.

(I do not mind dropping/delaying this patch however).

Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent b679aeb3
Loading
Loading
Loading
Loading
+20 −22
Original line number Original line Diff line number Diff line
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
	if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
	if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
		return -ENOSPC;
		return -ENOSPC;


	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
	if (!new_mpath) {
		atomic_dec(&sdata->u.sta.mpaths);
	err = -ENOMEM;
	err = -ENOMEM;
		goto endadd2;
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
	}
	if (!new_mpath)
		goto err_path_alloc;

	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
	if (!new_node) {
	if (!new_node)
		kfree(new_mpath);
		goto err_node_alloc;
		atomic_dec(&sdata->u.sta.mpaths);
		err = -ENOMEM;
		goto endadd2;
	}


	read_lock(&pathtbl_resize_lock);
	read_lock(&pathtbl_resize_lock);
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->dst, dst, ETH_ALEN);
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev)


	spin_lock(&mesh_paths->hashwlock[hash_idx]);
	spin_lock(&mesh_paths->hashwlock[hash_idx]);


	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
		mpath = node->mpath;
		if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
		if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
				== 0) {
			goto err_exists;
			err = -EEXIST;
			atomic_dec(&sdata->u.sta.mpaths);
			kfree(new_node);
			kfree(new_mpath);
			goto endadd;
		}
	}
	}


	hlist_add_head_rcu(&new_node->list, bucket);
	hlist_add_head_rcu(&new_node->list, bucket);
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
		grow = 1;
		grow = 1;


endadd:
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	read_unlock(&pathtbl_resize_lock);
	if (!err && grow) {
	if (grow) {
		struct mesh_table *oldtbl, *newtbl;
		struct mesh_table *oldtbl, *newtbl;


		write_lock(&pathtbl_resize_lock);
		write_lock(&pathtbl_resize_lock);
@@ -225,7 +214,16 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
		synchronize_rcu();
		synchronize_rcu();
		mesh_table_free(oldtbl, false);
		mesh_table_free(oldtbl, false);
	}
	}
endadd2:
	return 0;

err_exists:
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	atomic_dec(&sdata->u.sta.mpaths);
	return err;
	return err;
}
}