Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 446a396c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: bam_dmux: modify DL low memory algorithm"

parents 61a55379 48e69386
Loading
Loading
Loading
Loading
+29 −14
Original line number Diff line number Diff line
@@ -216,9 +216,10 @@ static void notify_all(int event, unsigned long data);
static void bam_mux_write_done(struct work_struct *work);
static void handle_bam_mux_cmd(struct work_struct *work);
static void rx_timer_work_func(struct work_struct *work);
static void queue_rx_work_func(struct work_struct *work);

static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
static struct delayed_work queue_rx_work;
static DECLARE_WORK(queue_rx_work, queue_rx_work_func);

static struct workqueue_struct *bam_mux_rx_workqueue;
static struct workqueue_struct *bam_mux_tx_workqueue;
@@ -385,7 +386,7 @@ static inline void verify_tx_queue_is_empty(const char *func)
	spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
}

static void queue_rx(void)
static void __queue_rx(gfp_t alloc_flags)
{
	void *ptr;
	struct rx_pkt_info *info;
@@ -400,23 +401,23 @@ static void queue_rx(void)
		if (in_global_reset)
			goto fail;

		info = kmalloc(sizeof(struct rx_pkt_info),
						GFP_NOWAIT | __GFP_NOWARN);
		info = kmalloc(sizeof(struct rx_pkt_info), alloc_flags);
		if (!info) {
			DMUX_LOG_KERR(
			"%s: unable to alloc rx_pkt_info, will retry later\n",
								__func__);
			"%s: unable to alloc rx_pkt_info w/ flags %x, will retry later\n",
								__func__,
								alloc_flags);
			goto fail;
		}

		INIT_WORK(&info->work, handle_bam_mux_cmd);

		info->skb = __dev_alloc_skb(BUFFER_SIZE,
						GFP_NOWAIT | __GFP_NOWARN);
		info->skb = __dev_alloc_skb(BUFFER_SIZE, alloc_flags);
		if (info->skb == NULL) {
			DMUX_LOG_KERR(
				"%s: unable to alloc skb, will retry later\n",
								__func__);
				"%s: unable to alloc skb w/ flags %x, will retry later\n",
								__func__,
								alloc_flags);
			goto fail_info;
		}
		ptr = skb_put(info->skb, BUFFER_SIZE);
@@ -458,15 +459,30 @@ fail_info:
	kfree(info);

fail:
	if (rx_len_cached == 0 && !in_global_reset) {
	if (!in_global_reset) {
		DMUX_LOG_KERR("%s: rescheduling\n", __func__);
		schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
		schedule_work(&queue_rx_work);
	}
}

static void queue_rx(void)
{
	/*
	 * Hot path.  Delays waiting for the allocation to find memory if its
	 * not immediately available, and delays from logging allocation
	 * failures which cannot be tolerated at this time.
	 */
	__queue_rx(GFP_NOWAIT | __GFP_NOWARN);
}

static void queue_rx_work_func(struct work_struct *work)
{
	queue_rx();
	/*
	 * Cold path.  Delays can be tolerated.  Use of GFP_KERNEL should
	 * guarentee the requested memory will be found, after some ammount of
	 * delay.
	 */
	__queue_rx(GFP_KERNEL);
}

static void bam_mux_process_data(struct sk_buff *rx_skb)
@@ -2496,7 +2512,6 @@ static int bam_dmux_probe(struct platform_device *pdev)
	init_completion(&shutdown_completion);
	complete_all(&shutdown_completion);
	INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
	INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
	wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
	init_srcu_struct(&bam_dmux_srcu);