Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7923875 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: mhi_rmnet: add support for shutdown and system error notification"

parents 50ae8d5c 371e2918
Loading
Loading
Loading
Loading
+243 −210
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#define MHI_NAPI_WEIGHT_VALUE  12
#define WATCHDOG_TIMEOUT       (30 * HZ)
#define RMNET_IPC_LOG_PAGES (100)
#define IRQ_MASKED_BIT (0)

enum DBG_LVL {
	MSG_VERBOSE = 0x1,
@@ -100,14 +101,15 @@ struct rmnet_mhi_private {
	u32			      mhi_enabled;
	struct platform_device        *pdev;
	struct net_device	      *dev;
	atomic_t		      irq_masked_cntr;
	unsigned long		      flags;
	int			      wake_count;
	spinlock_t		      out_chan_full_lock; /* tx queue lock */
	atomic_t		      pending_data;
	struct sk_buff		      *frag_skb;
	struct work_struct	      alloc_work;
	/* lock to queue hardware and internal queue */
	spinlock_t		      alloc_lock;
	void			      *rmnet_ipc_log;
	rwlock_t		      pm_lock; /* state change lock */
	struct debug_params	      debug;
	struct dentry		      *dentry;
};
@@ -130,12 +132,12 @@ static int rmnet_mhi_process_fragment(struct rmnet_mhi_private *rmnet_mhi_ptr,
			rmnet_mhi_ptr->frag_skb = NULL;
			return -ENOMEM;
		}
		kfree_skb(rmnet_mhi_ptr->frag_skb);
		dev_kfree_skb_any(rmnet_mhi_ptr->frag_skb);
		rmnet_mhi_ptr->frag_skb = temp_skb;
		memcpy(skb_put(rmnet_mhi_ptr->frag_skb, skb->len),
			skb->data,
			skb->len);
		kfree_skb(skb);
		dev_kfree_skb_any(skb);
		if (!frag) {
			/* Last fragmented piece was received, ship it */
			netif_receive_skb(rmnet_mhi_ptr->frag_skb);
@@ -196,7 +198,6 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
{
	u32 cur_mru = rmnet_mhi_ptr->mru;
	struct mhi_skb_priv *skb_priv;
	unsigned long flags;
	int ret;
	struct sk_buff *skb;

@@ -215,7 +216,7 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
		skb_priv->dma_addr = 0;

		/* These steps must be in atomic context */
		spin_lock_irqsave(&rmnet_mhi_ptr->alloc_lock, flags);
		spin_lock_bh(&rmnet_mhi_ptr->alloc_lock);

		/* It's possible by the time alloc_skb (GFP_KERNEL)
		 * returns we already called rmnet_alloc_rx
@@ -224,14 +225,22 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
		 */
		if (unlikely(atomic_read(&rmnet_mhi_ptr->rx_pool_len) >=
			     rmnet_mhi_ptr->rx_buffers_max)) {
			spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock,
					       flags);
			spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
			dev_kfree_skb_any(skb);
			return 0;
		}

		ret = mhi_queue_xfer(
				     rmnet_mhi_ptr->rx_client_handle,
		read_lock_bh(&rmnet_mhi_ptr->pm_lock);
		if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
			rmnet_log(rmnet_mhi_ptr, MSG_INFO,
				  "!interface is disabled\n");
			dev_kfree_skb_any(skb);
			read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
			spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
			return -EIO;
		}

		ret = mhi_queue_xfer(rmnet_mhi_ptr->rx_client_handle,
				     skb->data,
				     skb_priv->dma_size,
				     MHI_EOT);
@@ -239,14 +248,15 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
			rmnet_log(rmnet_mhi_ptr,
				  MSG_CRITICAL,
				  "mhi_queue_xfer failed, error %d", ret);
			spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock,
					       flags);
			read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
			spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
			dev_kfree_skb_any(skb);
			return ret;
		}
		skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
		atomic_inc(&rmnet_mhi_ptr->rx_pool_len);
		spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock, flags);
		read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
		spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
	}

	return 0;
@@ -258,13 +268,25 @@ static void rmnet_mhi_alloc_work(struct work_struct *work)
				    struct rmnet_mhi_private,
				    alloc_work);
	int ret;
	/* sleep about 1 sec and retry, that should be enough time
	 * for system to reclaim freed memory back.
	 */
	const int sleep_ms =  1000;
	int retry = 60;

	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n");
	do {
		ret = rmnet_alloc_rx(rmnet_mhi_ptr,
				     rmnet_mhi_ptr->allocation_flags);
		/* sleep and try again */
		if (ret == -ENOMEM) {
			msleep(sleep_ms);
			retry--;
		}
	} while (ret == -ENOMEM && retry);

	WARN_ON(ret == -ENOMEM);
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exit\n");
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exit with status:%d retry:%d\n",
		  ret, retry);
}

static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
@@ -281,6 +303,12 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget)

	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");

	read_lock_bh(&rmnet_mhi_ptr->pm_lock);
	if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
		rmnet_log(rmnet_mhi_ptr, MSG_INFO, "interface is disabled!\n");
		read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
		return 0;
	}
	while (received_packets < budget) {
		struct mhi_result *result =
		      mhi_poll(rmnet_mhi_ptr->rx_client_handle);
@@ -338,12 +366,15 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
		dev->stats.rx_bytes += result->bytes_xferd;

	} /* while (received_packets < budget) or any other error */
	read_unlock_bh(&rmnet_mhi_ptr->pm_lock);

	/* Queue new buffers */
	res = rmnet_alloc_rx(rmnet_mhi_ptr, GFP_ATOMIC);

	read_lock_bh(&rmnet_mhi_ptr->pm_lock);
	if (likely(rmnet_mhi_ptr->mhi_enabled)) {
		if (res == -ENOMEM) {
		rmnet_log(rmnet_mhi_ptr,
			  MSG_INFO,
			rmnet_log(rmnet_mhi_ptr, MSG_INFO,
				  "out of mem, queuing bg worker\n");
			rmnet_mhi_ptr->alloc_fail++;
			schedule_work(&rmnet_mhi_ptr->alloc_work);
@@ -352,12 +383,12 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
		napi_complete(napi);

		/* We got a NULL descriptor back */
	if (should_reschedule == false) {
		if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) {
			atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr);
		if (!should_reschedule) {
			if (test_and_clear_bit(IRQ_MASKED_BIT,
					       &rmnet_mhi_ptr->flags))
				mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
			mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, true);
		}
			rmnet_mhi_ptr->wake_count--;
		} else {
			if (received_packets == budget)
				rmnet_mhi_ptr->debug.rx_napi_budget_overflow++;
@@ -371,44 +402,14 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
		rmnet_mhi_ptr->debug.rx_napi_skb_burst_max =
			max((u64)received_packets,
			    rmnet_mhi_ptr->debug.rx_napi_skb_burst_max);
	}
	read_unlock_bh(&rmnet_mhi_ptr->pm_lock);

	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE,
		  "Exited, polled %d pkts\n", received_packets);
	return received_packets;
}

void rmnet_mhi_clean_buffers(struct net_device *dev)
{
	struct rmnet_mhi_private *rmnet_mhi_ptr =
		*(struct rmnet_mhi_private **)netdev_priv(dev);

	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n");
	/* Clean TX buffers */
	rmnet_mhi_internal_clean_unmap_buffers(dev,
					       &rmnet_mhi_ptr->tx_buffers,
					       DMA_TO_DEVICE);

	/* Clean RX buffers */
	rmnet_mhi_internal_clean_unmap_buffers(dev,
					       &rmnet_mhi_ptr->rx_buffers,
					       DMA_FROM_DEVICE);
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited\n");
}

static int rmnet_mhi_disable_channels(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Closing MHI TX channel\n");
	mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Closing MHI RX channel\n");
	mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Clearing Pending TX buffers.\n");
	rmnet_mhi_clean_buffers(rmnet_mhi_ptr->dev);
	rmnet_mhi_ptr->tx_client_handle = NULL;
	rmnet_mhi_ptr->rx_client_handle = NULL;

	return 0;
}

static int rmnet_mhi_init_inbound(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	int res;
@@ -431,7 +432,7 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result)
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	unsigned long burst_counter = 0;
	unsigned long flags;
	unsigned long flags, pm_flags;

	rmnet_mhi_ptr = result->user_data;
	dev = rmnet_mhi_ptr->dev;
@@ -451,10 +452,10 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result)
			break;
		} else {
			if (skb->data == result->buf_addr) {
				kfree_skb(skb);
				dev_kfree_skb_any(skb);
				break;
			}
			kfree_skb(skb);
			dev_kfree_skb_any(skb);
			burst_counter++;

			/* Update statistics */
@@ -477,10 +478,15 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result)
		    rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max);

	/* In case we couldn't write again, now we can! */
	read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, pm_flags);
	if (likely(rmnet_mhi_ptr->mhi_enabled)) {
		spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock, flags);
		rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Waking up queue\n");
		netif_wake_queue(dev);
	spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
		spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
				       flags);
	}
	read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, pm_flags);
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
}

@@ -488,20 +494,27 @@ static void rmnet_mhi_rx_cb(struct mhi_result *result)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	unsigned long flags;

	rmnet_mhi_ptr = result->user_data;
	dev = rmnet_mhi_ptr->dev;

	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
	rmnet_mhi_ptr->debug.rx_interrupts_count++;

	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
	read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, flags);
	if (likely(rmnet_mhi_ptr->mhi_enabled)) {
		if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) {
			if (!test_and_set_bit(IRQ_MASKED_BIT,
					      &rmnet_mhi_ptr->flags))
				mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
			mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
			rmnet_mhi_ptr->wake_count++;
			__napi_schedule(&rmnet_mhi_ptr->napi);
		} else {
			rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++;
		}
	}
	read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, flags);
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
}

@@ -510,8 +523,7 @@ static int rmnet_mhi_open(struct net_device *dev)
	struct rmnet_mhi_private *rmnet_mhi_ptr =
			*(struct rmnet_mhi_private **)netdev_priv(dev);

	rmnet_log(rmnet_mhi_ptr,
		  MSG_INFO,
	rmnet_log(rmnet_mhi_ptr, MSG_INFO,
		  "Opened net dev interface for MHI chans %d and %d\n",
		  rmnet_mhi_ptr->tx_channel,
		  rmnet_mhi_ptr->rx_channel);
@@ -527,43 +539,35 @@ static int rmnet_mhi_open(struct net_device *dev)
	/* Poll to check if any buffers are accumulated in the
	 * transport buffers
	 */
	if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) {
	read_lock_bh(&rmnet_mhi_ptr->pm_lock);
	if (likely(rmnet_mhi_ptr->mhi_enabled)) {
		if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) {
			if (!test_and_set_bit(IRQ_MASKED_BIT,
					      &rmnet_mhi_ptr->flags)) {
				mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr);
			}
			mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false);
		__napi_schedule(&(rmnet_mhi_ptr->napi));
			rmnet_mhi_ptr->wake_count++;
			__napi_schedule(&rmnet_mhi_ptr->napi);
		} else {
			rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++;
		}
	return 0;

}

static int rmnet_mhi_disable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	rmnet_mhi_ptr->rx_enabled = 0;
	rmnet_mhi_ptr->tx_enabled = 0;
	rmnet_mhi_ptr->mhi_enabled = 0;
	if (rmnet_mhi_ptr->dev != 0) {
		netif_stop_queue(rmnet_mhi_ptr->dev);
		netif_napi_del(&(rmnet_mhi_ptr->napi));
		rmnet_mhi_disable_channels(rmnet_mhi_ptr);
		unregister_netdev(rmnet_mhi_ptr->dev);
		free_netdev(rmnet_mhi_ptr->dev);
		rmnet_mhi_ptr->dev = 0;
	}
	read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
	return 0;

}

static int rmnet_mhi_disable(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	rmnet_mhi_ptr->mhi_enabled = 0;
	rmnet_mhi_disable_iface(rmnet_mhi_ptr);
	napi_disable(&(rmnet_mhi_ptr->napi));
	if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) {
	rmnet_mhi_ptr->rx_enabled = 0;
	rmnet_mhi_internal_clean_unmap_buffers(rmnet_mhi_ptr->dev,
					       &rmnet_mhi_ptr->rx_buffers,
					       DMA_FROM_DEVICE);
	if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags))
		mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr);
	}

	return 0;
}

@@ -574,11 +578,9 @@ static int rmnet_mhi_stop(struct net_device *dev)

	netif_stop_queue(dev);
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
	if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) {
	if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags)) {
		mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
		atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr);
		rmnet_log(rmnet_mhi_ptr,
			  MSG_ERROR,
		rmnet_log(rmnet_mhi_ptr, MSG_ERROR,
			  "IRQ was masked, unmasking...\n");
	}
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
@@ -605,14 +607,23 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
	unsigned long flags;
	struct mhi_skb_priv *tx_priv;

	rmnet_log(rmnet_mhi_ptr,
		  MSG_VERBOSE,
		  "Entered chan %d\n",
		  rmnet_mhi_ptr->tx_channel);
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE,
		  "Entered chan %d\n", rmnet_mhi_ptr->tx_channel);

	tx_priv = (struct mhi_skb_priv *)(skb->cb);
	tx_priv->dma_size = skb->len;
	tx_priv->dma_addr = 0;
	read_lock_bh(&rmnet_mhi_ptr->pm_lock);
	if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
		/* Only reason interface could be disabled and we get data
		 * is due to an SSR. We do not want to stop the queue and
		 * return error. instead we will flush all the uplink packets
		 * and return successful
		 */
		res = NETDEV_TX_OK;
		dev_kfree_skb_any(skb);
		goto mhi_xmit_exit;
	}

	if (mhi_get_free_desc(rmnet_mhi_ptr->tx_client_handle) <= 0) {
		rmnet_log(rmnet_mhi_ptr,
@@ -624,7 +635,8 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
				       flags);
		return NETDEV_TX_BUSY;
		res = NETDEV_TX_BUSY;
		goto mhi_xmit_exit;
	}
	res = mhi_queue_xfer(rmnet_mhi_ptr->tx_client_handle,
			     skb->data,
@@ -641,15 +653,17 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
				       flags);
		return NETDEV_TX_BUSY;
		res = NETDEV_TX_BUSY;
		goto mhi_xmit_exit;
	}

	res = NETDEV_TX_OK;
	skb_queue_tail(&(rmnet_mhi_ptr->tx_buffers), skb);
	dev->trans_start = jiffies;
	rmnet_mhi_ptr->debug.tx_queued_packets_count++;

mhi_xmit_exit:
	read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
	rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
	return NETDEV_TX_OK;
	return res;
}

static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
@@ -698,16 +712,19 @@ static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
			sizeof(ext_cmd.u.if_name));
		break;
	case RMNET_IOCTL_SET_SLEEP_STATE:
		read_lock_bh(&rmnet_mhi_ptr->pm_lock);
		if (rmnet_mhi_ptr->mhi_enabled &&
		    rmnet_mhi_ptr->tx_client_handle != NULL) {
			rmnet_mhi_ptr->wake_count += (ext_cmd.u.data) ? -1 : 1;
			mhi_set_lpm(rmnet_mhi_ptr->tx_client_handle,
				   ext_cmd.u.data);
		} else {
			rmnet_log(rmnet_mhi_ptr,
				  MSG_ERROR,
			rmnet_log(rmnet_mhi_ptr, MSG_ERROR,
				  "Cannot set LPM value, MHI is not up.\n");
			read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
			return -ENODEV;
		}
		read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
		break;
	default:
		rc = -EINVAL;
@@ -832,9 +849,8 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
				  "Failed to start TX chan ret %d\n",
				  r);
			goto mhi_tx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->tx_enabled = 1;
		}

		client_handle = rmnet_mhi_ptr->tx_client_handle;
	}
	if (rmnet_mhi_ptr->rx_client_handle != NULL) {
@@ -848,8 +864,6 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
				  "Failed to start RX chan ret %d\n",
				  r);
			goto mhi_rx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->rx_enabled = 1;
		}
		/* Both tx & rx client handle contain same device info */
		client_handle = rmnet_mhi_ptr->rx_client_handle;
@@ -860,8 +874,9 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
		goto net_dev_alloc_fail;
	}

	snprintf(ifalias,
		 sizeof(ifalias),

	if (!rmnet_mhi_ptr->dev) {
		snprintf(ifalias, sizeof(ifalias),
			 "%s_%04x_%02u.%02u.%02u_%u",
			 rmnet_mhi_ptr->interface_name,
			 client_handle->dev_id,
@@ -874,12 +889,12 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
			 rmnet_mhi_ptr->interface_name);

		rtnl_lock();
	rmnet_mhi_ptr->dev =
		alloc_netdev(sizeof(struct rmnet_mhi_private *),
		rmnet_mhi_ptr->dev = alloc_netdev(
				sizeof(struct rmnet_mhi_private *),
				ifname, NET_NAME_PREDICTABLE, rmnet_mhi_setup);

		if (!rmnet_mhi_ptr->dev) {
		rmnet_log(rmnet_mhi_ptr,
			  MSG_CRITICAL,
			rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
				  "Network device allocation failed\n");
			ret = -ENOMEM;
			goto net_dev_alloc_fail;
@@ -890,32 +905,33 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
		rtnl_unlock();
		*rmnet_mhi_ctxt = rmnet_mhi_ptr;

	ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev),
						MHI_DMA_MASK);
		ret = dma_set_mask(&rmnet_mhi_ptr->dev->dev, MHI_DMA_MASK);
		if (ret)
			rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
		else
			rmnet_mhi_ptr->allocation_flags = GFP_DMA;

	r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
	if (r) {
		rmnet_log(rmnet_mhi_ptr,
			  MSG_CRITICAL,
			  "Failed to init inbound ret %d\n",
			  r);
	}

	netif_napi_add(rmnet_mhi_ptr->dev, &(rmnet_mhi_ptr->napi),
		netif_napi_add(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->napi,
			       rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);

	rmnet_mhi_ptr->mhi_enabled = 1;
		ret = register_netdev(rmnet_mhi_ptr->dev);
		if (ret) {
		rmnet_log(rmnet_mhi_ptr,
			  MSG_CRITICAL,
			rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
				  "Network device registration failed\n");
			goto net_dev_reg_fail;
		}
	}

	write_lock_irq(&rmnet_mhi_ptr->pm_lock);
	rmnet_mhi_ptr->mhi_enabled = 1;
	write_unlock_irq(&rmnet_mhi_ptr->pm_lock);

	r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
	if (r) {
		rmnet_log(rmnet_mhi_ptr, MSG_INFO,
			  "Failed to init inbound ret %d\n", r);
	}

	napi_enable(&(rmnet_mhi_ptr->napi));

	rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited.\n");
@@ -951,25 +967,47 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)

	switch (cb_info->cb_reason) {
	case MHI_CB_MHI_DISABLED:
		rmnet_log(rmnet_mhi_ptr,
			  MSG_CRITICAL,
			  "Got MHI_DISABLED notification. Stopping stack\n");
		if (rmnet_mhi_ptr->mhi_enabled) {
	case MHI_CB_MHI_SHUTDOWN:
	case MHI_CB_SYS_ERROR:
		rmnet_log(rmnet_mhi_ptr, MSG_INFO,
			  "Got MHI_SYS_ERROR notification. Stopping stack\n");

		/* Disable interface on first notification.  Long
		 * as we set mhi_enabled = 0, we gurantee rest of
		 * driver will not touch any critical data.
		*/
		write_lock_irq(&rmnet_mhi_ptr->pm_lock);
		rmnet_mhi_ptr->mhi_enabled = 0;
			/* Ensure MHI is disabled before other mem ops */
			wmb();
			while (atomic_read(&rmnet_mhi_ptr->pending_data)) {
				rmnet_log(rmnet_mhi_ptr,
					  MSG_CRITICAL,
					  "Waiting for channels to stop.\n");
				msleep(25);
			}
		write_unlock_irq(&rmnet_mhi_ptr->pm_lock);

		if (cb_info->chan == rmnet_mhi_ptr->rx_channel) {
			rmnet_log(rmnet_mhi_ptr, MSG_INFO,
				  "Receive MHI_DISABLE notification for rx path\n");
			rmnet_mhi_disable(rmnet_mhi_ptr);
		} else {
			rmnet_log(rmnet_mhi_ptr, MSG_INFO,
				  "Receive MHI_DISABLE notification for tx path\n");
			rmnet_mhi_ptr->tx_enabled = 0;
			rmnet_mhi_internal_clean_unmap_buffers
				(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->tx_buffers,
				 DMA_TO_DEVICE);
		}

		/* Remove all votes disabling low power mode */
		if (!rmnet_mhi_ptr->tx_enabled && !rmnet_mhi_ptr->rx_enabled) {
			struct mhi_client_handle *handle =
				rmnet_mhi_ptr->rx_client_handle;

			if (!handle)
				handle = rmnet_mhi_ptr->tx_client_handle;
			while (rmnet_mhi_ptr->wake_count) {
				mhi_set_lpm(handle, true);
				rmnet_mhi_ptr->wake_count--;
			}
		}
		break;
	case MHI_CB_MHI_ENABLED:
		rmnet_log(rmnet_mhi_ptr,
			  MSG_CRITICAL,
		rmnet_log(rmnet_mhi_ptr, MSG_INFO,
			  "Got MHI_ENABLED notification. Starting stack\n");
		if (cb_info->chan == rmnet_mhi_ptr->rx_channel)
			rmnet_mhi_ptr->rx_enabled = 1;
@@ -998,16 +1036,10 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
		}
		break;
	case MHI_CB_XFER:
		atomic_inc(&rmnet_mhi_ptr->pending_data);
		/* Flush pending data is set before any other mem operations */
		wmb();
		if (rmnet_mhi_ptr->mhi_enabled) {
		if (cb_info->chan == rmnet_mhi_ptr->rx_channel)
			rmnet_mhi_rx_cb(cb_info->result);
		else
			rmnet_mhi_tx_cb(cb_info->result);
		}
		atomic_dec(&rmnet_mhi_ptr->pending_data);
		break;
	default:
		break;
@@ -1172,6 +1204,7 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
		return -ENOMEM;
	rmnet_mhi_ptr->pdev = pdev;
	spin_lock_init(&rmnet_mhi_ptr->out_chan_full_lock);
	rwlock_init(&rmnet_mhi_ptr->pm_lock);

	rc = of_property_read_u32(pdev->dev.of_node,
				  "qcom,mhi-mru",
+35 −1
Original line number Diff line number Diff line
@@ -1185,9 +1185,43 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
		mutex_unlock(&chan_attr->chan_lock);
		wake_up(&chan_attr->wq);
		break;
	case MHI_CB_SYS_ERROR:
	case MHI_CB_MHI_SHUTDOWN:
	case MHI_CB_MHI_DISABLED:
		uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO,
			"MHI disabled CB received\n");
			"MHI disabled CB received 0x%x for chan:%d\n",
			cb_info->cb_reason, cb_info->chan);

		chan_attr = (cb_info->chan % 2) ? &uci_handle->in_attr :
			&uci_handle->out_attr;
		mutex_lock(&chan_attr->chan_lock);
		chan_attr->enabled = false;
		/* we disable entire handler by grabbing only one lock */
		uci_handle->enabled = false;
		mutex_unlock(&chan_attr->chan_lock);
		wake_up(&chan_attr->wq);

		/*
		 * if it's ctrl channel clear the resource now
		 * otherwise during file close we will release the
		 * resources
		 */
		if (uci_handle == uci_handle->uci_ctxt->ctrl_client &&
		    chan_attr == &uci_handle->out_attr) {
			struct uci_buf *itr, *tmp;

			mutex_lock(&chan_attr->chan_lock);
			atomic_set(&uci_handle->out_attr.avail_pkts, 0);
			atomic_set(&uci_handle->out_pkt_pend_ack, 0);
			list_for_each_entry_safe(itr, tmp, &chan_attr->buf_head,
						 node) {
				list_del(&itr->node);
				kfree(itr->data);
			}
			atomic_set(&uci_handle->completion_ack, 0);
			INIT_LIST_HEAD(&uci_handle->out_attr.buf_head);
			mutex_unlock(&chan_attr->chan_lock);
		}
		break;
	case MHI_CB_XFER:
		if (!cb_info->result) {