Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b1e9738 authored by sjur.brandeland@stericsson.com's avatar sjur.brandeland@stericsson.com Committed by David S. Miller
Browse files

caif: Use rcu_read_lock in CAIF mux layer.



Replace spin_lock with rcu_read_lock when accessing lists to layers
and cache. While packets are in flight rcu_read_lock should not be held,
instead ref-counters are used in combination with RCU.

Signed-off-by: default avatarSjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1b1cb1f7
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -12,5 +12,7 @@ struct cffrml;
struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
void cffrml_put(struct cflayer *layr);
void cffrml_hold(struct cflayer *layr);

#endif /* CFFRML_H_ */
+8 −0
Original line number Diff line number Diff line
@@ -145,3 +145,11 @@ static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
	if (layr->up->ctrlcmd)
		layr->up->ctrlcmd(layr->up, ctrl, layr->id);
}

void cffrml_put(struct cflayer *layr)
{
}

void cffrml_hold(struct cflayer *layr)
{
}
+75 −44
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfmuxl.h>
#include <net/caif/cfsrvl.h>
@@ -64,31 +65,31 @@ struct cflayer *cfmuxl_create(void)
int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
{
	struct cfmuxl *muxl = container_obj(layr);
	spin_lock(&muxl->receive_lock);
	cfsrvl_get(up);
	list_add(&up->node, &muxl->srvl_list);
	spin_unlock(&muxl->receive_lock);

	spin_lock_bh(&muxl->receive_lock);
	list_add_rcu(&up->node, &muxl->srvl_list);
	spin_unlock_bh(&muxl->receive_lock);
	return 0;
}

int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
{
	struct cfmuxl *muxl = (struct cfmuxl *) layr;
	spin_lock(&muxl->transmit_lock);
	list_add(&dn->node, &muxl->frml_list);
	spin_unlock(&muxl->transmit_lock);

	spin_lock_bh(&muxl->transmit_lock);
	list_add_rcu(&dn->node, &muxl->frml_list);
	spin_unlock_bh(&muxl->transmit_lock);
	return 0;
}

static struct cflayer *get_from_id(struct list_head *list, u16 id)
{
	struct list_head *node;
	struct cflayer *layer;
	list_for_each(node, list) {
		layer = list_entry(node, struct cflayer, node);
		if (layer->id == id)
			return layer;
	struct cflayer *lyr;
	list_for_each_entry_rcu(lyr, list, node) {
		if (lyr->id == id)
			return lyr;
	}

	return NULL;
}

@@ -96,41 +97,45 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
{
	struct cfmuxl *muxl = container_obj(layr);
	struct cflayer *dn;
	spin_lock(&muxl->transmit_lock);
	memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
	int idx = phyid % DN_CACHE_SIZE;

	spin_lock_bh(&muxl->transmit_lock);
	rcu_assign_pointer(muxl->dn_cache[idx], NULL);
	dn = get_from_id(&muxl->frml_list, phyid);
	if (dn == NULL) {
		spin_unlock(&muxl->transmit_lock);
		return NULL;
	}
	list_del(&dn->node);
	if (dn == NULL)
		goto out;

	list_del_rcu(&dn->node);
	caif_assert(dn != NULL);
	spin_unlock(&muxl->transmit_lock);
out:
	spin_unlock_bh(&muxl->transmit_lock);
	return dn;
}

/* Invariant: lock is taken */
static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
{
	struct cflayer *up;
	int idx = id % UP_CACHE_SIZE;
	up = muxl->up_cache[idx];
	up = rcu_dereference(muxl->up_cache[idx]);
	if (up == NULL || up->id != id) {
		spin_lock_bh(&muxl->receive_lock);
		up = get_from_id(&muxl->srvl_list, id);
		muxl->up_cache[idx] = up;
		rcu_assign_pointer(muxl->up_cache[idx], up);
		spin_unlock_bh(&muxl->receive_lock);
	}
	return up;
}

/* Invariant: lock is taken */
static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
{
	struct cflayer *dn;
	int idx = dev_info->id % DN_CACHE_SIZE;
	dn = muxl->dn_cache[idx];
	dn = rcu_dereference(muxl->dn_cache[idx]);
	if (dn == NULL || dn->id != dev_info->id) {
		spin_lock_bh(&muxl->transmit_lock);
		dn = get_from_id(&muxl->frml_list, dev_info->id);
		muxl->dn_cache[idx] = dn;
		rcu_assign_pointer(muxl->dn_cache[idx], dn);
		spin_unlock_bh(&muxl->transmit_lock);
	}
	return dn;
}
@@ -139,15 +144,17 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
{
	struct cflayer *up;
	struct cfmuxl *muxl = container_obj(layr);
	spin_lock(&muxl->receive_lock);
	up = get_up(muxl, id);
	int idx = id % UP_CACHE_SIZE;

	spin_lock_bh(&muxl->receive_lock);
	up = get_from_id(&muxl->srvl_list, id);
	if (up == NULL)
		goto out;
	memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
	list_del(&up->node);
	cfsrvl_put(up);

	rcu_assign_pointer(muxl->up_cache[idx], NULL);
	list_del_rcu(&up->node);
out:
	spin_unlock(&muxl->receive_lock);
	spin_unlock_bh(&muxl->receive_lock);
	return up;
}

@@ -162,22 +169,28 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
		cfpkt_destroy(pkt);
		return -EPROTO;
	}

	spin_lock(&muxl->receive_lock);
	rcu_read_lock();
	up = get_up(muxl, id);
	spin_unlock(&muxl->receive_lock);

	if (up == NULL) {
		pr_info("Received data on unknown link ID = %d (0x%x) up == NULL",
			id, id);
		pr_debug("Received data on unknown link ID = %d (0x%x)"
			" up == NULL", id, id);
		cfpkt_destroy(pkt);
		/*
		 * Don't return ERROR, since modem misbehaves and sends out
		 * flow on before linksetup response.
		 */

		rcu_read_unlock();
		return /* CFGLU_EPROT; */ 0;
	}

	/* We can't hold rcu_lock during receive, so take a ref count instead */
	cfsrvl_get(up);
	rcu_read_unlock();

	ret = up->receive(up, pkt);

	cfsrvl_put(up);
	return ret;
}
@@ -185,31 +198,49 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
	struct cfmuxl *muxl = container_obj(layr);
	int err;
	u8 linkid;
	struct cflayer *dn;
	struct caif_payload_info *info = cfpkt_info(pkt);
	BUG_ON(!info);

	rcu_read_lock();

	dn = get_dn(muxl, info->dev_info);
	if (dn == NULL) {
		pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
		pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
			info->dev_info->id, info->dev_info->id);
		rcu_read_unlock();
		cfpkt_destroy(pkt);
		return -ENOTCONN;
	}

	info->hdr_len += 1;
	linkid = info->channel_id;
	cfpkt_add_head(pkt, &linkid, 1);
	return dn->transmit(dn, pkt);

	/* We can't hold rcu_lock during receive, so take a ref count instead */
	cffrml_hold(dn);

	rcu_read_unlock();

	err = dn->transmit(dn, pkt);

	cffrml_put(dn);
	return err;
}

static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
				int phyid)
{
	struct cfmuxl *muxl = container_obj(layr);
	struct list_head *node, *next;
	struct cflayer *layer;
	list_for_each_safe(node, next, &muxl->srvl_list) {
		layer = list_entry(node, struct cflayer, node);
		if (cfsrvl_phyid_match(layer, phyid))

	rcu_read_lock();
	list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
		if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
			/* NOTE: ctrlcmd is not allowed to block */
			layer->ctrlcmd(layer, ctrl, phyid);
	}
	rcu_read_unlock();
}