Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 43768a6b authored by Sujeev Dias's avatar Sujeev Dias Committed by Gerrit - the friendly Code Review server
Browse files

mhi: dev: net: remove support for recycling buffers



Recycling buffers for downlink transfer is not a supported
feature, hence removing support for it.

CRs-Fixed: 2325839
Change-Id: I097534c027d4c6274ff6d30cfa475fc647ed7b16
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent 314af848
Loading
Loading
Loading
Loading
+5 −120
Original line number Original line Diff line number Diff line
@@ -102,18 +102,14 @@ struct mhi_netdev {
	spinlock_t rx_lock;
	spinlock_t rx_lock;
	bool enabled;
	bool enabled;
	rwlock_t pm_lock; /* state change lock */
	rwlock_t pm_lock; /* state change lock */
	int (*rx_queue)(struct mhi_netdev *, gfp_t);
	struct work_struct alloc_work;
	struct work_struct alloc_work;
	int wake;
	int wake;


	struct sk_buff_head rx_allocated;

	u32 mru;
	u32 mru;
	const char *interface_name;
	const char *interface_name;
	struct napi_struct napi;
	struct napi_struct napi;
	struct net_device *ndev;
	struct net_device *ndev;
	struct sk_buff *frag_skb;
	struct sk_buff *frag_skb;
	bool recycle_buf;


	struct mhi_stats stats;
	struct mhi_stats stats;
	struct dentry *dentry;
	struct dentry *dentry;
@@ -149,18 +145,6 @@ static __be16 mhi_netdev_ip_type_trans(struct sk_buff *skb)
	return protocol;
	return protocol;
}
}


static void mhi_netdev_skb_destructor(struct sk_buff *skb)
{
	struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb);
	struct mhi_netdev *mhi_netdev = skb_priv->mhi_netdev;

	skb->data = skb->head;
	skb_reset_tail_pointer(skb);
	skb->len = 0;
	MHI_ASSERT(skb->data != skb_priv->buf, "incorrect buf");
	skb_queue_tail(&mhi_netdev->rx_allocated, skb);
}

static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t)
static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t)
{
{
	u32 cur_mru = mhi_netdev->mru;
	u32 cur_mru = mhi_netdev->mru;
@@ -189,9 +173,6 @@ static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t)
		skb_priv->mhi_netdev = mhi_netdev;
		skb_priv->mhi_netdev = mhi_netdev;
		skb->dev = mhi_netdev->ndev;
		skb->dev = mhi_netdev->ndev;


		if (mhi_netdev->recycle_buf)
			skb->destructor = mhi_netdev_skb_destructor;

		spin_lock_bh(&mhi_netdev->rx_lock);
		spin_lock_bh(&mhi_netdev->rx_lock);
		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb,
		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb,
					 skb_priv->size, MHI_EOT);
					 skb_priv->size, MHI_EOT);
@@ -209,7 +190,6 @@ static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t)
	return 0;
	return 0;


error_queue:
error_queue:
	skb->destructor = NULL;
	read_unlock_bh(&mhi_netdev->pm_lock);
	read_unlock_bh(&mhi_netdev->pm_lock);
	dev_kfree_skb_any(skb);
	dev_kfree_skb_any(skb);


@@ -240,66 +220,6 @@ static void mhi_netdev_alloc_work(struct work_struct *work)
	MSG_LOG("Exit with status:%d retry:%d\n", ret, retry);
	MSG_LOG("Exit with status:%d retry:%d\n", ret, retry);
}
}


/* we will recycle buffers */
static int mhi_netdev_skb_recycle(struct mhi_netdev *mhi_netdev, gfp_t gfp_t)
{
	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
	int no_tre;
	int ret = 0;
	struct sk_buff *skb;
	struct mhi_skb_priv *skb_priv;

	read_lock_bh(&mhi_netdev->pm_lock);
	if (!mhi_netdev->enabled) {
		read_unlock_bh(&mhi_netdev->pm_lock);
		return -EIO;
	}

	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);

	spin_lock_bh(&mhi_netdev->rx_lock);
	while (no_tre) {
		skb = skb_dequeue(&mhi_netdev->rx_allocated);

		/* no free buffers to recycle, reschedule work */
		if (!skb) {
			ret = -ENOMEM;
			goto error_queue;
		}

		skb_priv = (struct mhi_skb_priv *)(skb->cb);
		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb,
					 skb_priv->size, MHI_EOT);

		/* failed to queue buffer */
		if (ret) {
			MSG_ERR("Failed to queue skb, ret:%d\n", ret);
			skb_queue_tail(&mhi_netdev->rx_allocated, skb);
			goto error_queue;
		}

		no_tre--;
	}

error_queue:
	spin_unlock_bh(&mhi_netdev->rx_lock);
	read_unlock_bh(&mhi_netdev->pm_lock);

	return ret;
}

static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev)
{
	struct sk_buff *skb;

	skb = skb_dequeue(&mhi_netdev->rx_allocated);
	while (skb) {
		skb->destructor = NULL;
		kfree_skb(skb);
		skb = skb_dequeue(&mhi_netdev->rx_allocated);
	}
}

static int mhi_netdev_poll(struct napi_struct *napi, int budget)
static int mhi_netdev_poll(struct napi_struct *napi, int budget)
{
{
	struct net_device *dev = napi->dev;
	struct net_device *dev = napi->dev;
@@ -329,7 +249,7 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
	}
	}


	/* queue new buffers */
	/* queue new buffers */
	ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC);
	ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_ATOMIC);
	if (ret == -ENOMEM) {
	if (ret == -ENOMEM) {
		MSG_LOG("out of tre, queuing bg worker\n");
		MSG_LOG("out of tre, queuing bg worker\n");
		mhi_netdev->stats.alloc_failed++;
		mhi_netdev->stats.alloc_failed++;
@@ -448,10 +368,9 @@ static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
				ext_cmd.u.data, mhi_dev->mtu);
				ext_cmd.u.data, mhi_dev->mtu);
			return -EINVAL;
			return -EINVAL;
		}
		}
		if (!mhi_netdev->recycle_buf) {

		MSG_LOG("MRU change request to 0x%x\n", ext_cmd.u.data);
		MSG_LOG("MRU change request to 0x%x\n", ext_cmd.u.data);
		mhi_netdev->mru = ext_cmd.u.data;
		mhi_netdev->mru = ext_cmd.u.data;
		}
		break;
		break;
	case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
	case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
		ext_cmd.u.data = 0;
		ext_cmd.u.data = 0;
@@ -611,8 +530,6 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
			MSG_ERR("Network device registration failed\n");
			MSG_ERR("Network device registration failed\n");
			goto net_dev_reg_fail;
			goto net_dev_reg_fail;
		}
		}

		skb_queue_head_init(&mhi_netdev->rx_allocated);
	}
	}


	write_lock_irq(&mhi_netdev->pm_lock);
	write_lock_irq(&mhi_netdev->pm_lock);
@@ -625,25 +542,6 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
	if (ret)
	if (ret)
		schedule_work(&mhi_netdev->alloc_work);
		schedule_work(&mhi_netdev->alloc_work);


	/* if we recycle prepare one more set */
	if (mhi_netdev->recycle_buf)
		for (; no_tre >= 0; no_tre--) {
			struct sk_buff *skb = alloc_skb(mhi_netdev->mru,
							GFP_KERNEL);
			struct mhi_skb_priv *skb_priv;

			if (!skb)
				break;

			skb_priv = (struct mhi_skb_priv *)skb->cb;
			skb_priv->buf = skb->data;
			skb_priv->size = mhi_netdev->mru;
			skb_priv->mhi_netdev = mhi_netdev;
			skb->dev = mhi_netdev->ndev;
			skb->destructor = mhi_netdev_skb_destructor;
			skb_queue_tail(&mhi_netdev->rx_allocated, skb);
		}

	napi_enable(&mhi_netdev->napi);
	napi_enable(&mhi_netdev->napi);


	MSG_LOG("Exited.\n");
	MSG_LOG("Exited.\n");
@@ -731,10 +629,6 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
	    mhi_netdev->frag_skb) {
	    mhi_netdev->frag_skb) {
		ret = mhi_netdev_process_fragment(mhi_netdev, skb);
		ret = mhi_netdev_process_fragment(mhi_netdev, skb);


		/* recycle the skb */
		if (mhi_netdev->recycle_buf)
			mhi_netdev_skb_destructor(skb);
		else
		dev_kfree_skb(skb);
		dev_kfree_skb(skb);


		if (ret)
		if (ret)
@@ -791,9 +685,6 @@ static int mhi_netdev_debugfs_trigger_reset(void *data, u64 val)
	/* disable all hardware channels */
	/* disable all hardware channels */
	mhi_unprepare_from_transfer(mhi_dev);
	mhi_unprepare_from_transfer(mhi_dev);


	/* clean up all alocated buffers */
	mhi_netdev_dealloc(mhi_netdev);

	MSG_LOG("Restarting iface\n");
	MSG_LOG("Restarting iface\n");


	ret = mhi_netdev_enable_iface(mhi_netdev);
	ret = mhi_netdev_enable_iface(mhi_netdev);
@@ -905,7 +796,6 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)


	napi_disable(&mhi_netdev->napi);
	napi_disable(&mhi_netdev->napi);
	netif_napi_del(&mhi_netdev->napi);
	netif_napi_del(&mhi_netdev->napi);
	mhi_netdev_dealloc(mhi_netdev);
	unregister_netdev(mhi_netdev->ndev);
	unregister_netdev(mhi_netdev->ndev);
	free_netdev(mhi_netdev->ndev);
	free_netdev(mhi_netdev->ndev);
	flush_work(&mhi_netdev->alloc_work);
	flush_work(&mhi_netdev->alloc_work);
@@ -946,11 +836,6 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
	if (ret)
	if (ret)
		mhi_netdev->interface_name = mhi_netdev_driver.driver.name;
		mhi_netdev->interface_name = mhi_netdev_driver.driver.name;


	mhi_netdev->recycle_buf = of_property_read_bool(of_node,
							"mhi,recycle-buf");
	mhi_netdev->rx_queue = mhi_netdev->recycle_buf ?
		mhi_netdev_skb_recycle : mhi_netdev_alloc_skb;

	spin_lock_init(&mhi_netdev->rx_lock);
	spin_lock_init(&mhi_netdev->rx_lock);
	rwlock_init(&mhi_netdev->pm_lock);
	rwlock_init(&mhi_netdev->pm_lock);
	INIT_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work);
	INIT_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work);