Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1de3bb8a authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 08e84df9 ("selftests: timers: clocksource-switch: fix passing errors...


Merge 08e84df9 ("selftests: timers: clocksource-switch: fix passing errors from child") into android-mainline

Steps on the way to 4.19.256

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
Change-Id: I4f59d2cdb4467a5ffa2896a77e546dee92d654c1
parents d3c4087a 08e84df9
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -457,7 +457,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
		 */
	}

	mutex_lock(&ctx->queue->queuelock);
	spin_lock_bh(&ctx->queue->queuelock);
	/* Put the IV in place for chained cases */
	switch (ctx->cipher_alg) {
	case SEC_C_AES_CBC_128:
@@ -517,7 +517,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
			list_del(&backlog_req->backlog_head);
		}
	}
	mutex_unlock(&ctx->queue->queuelock);
	spin_unlock_bh(&ctx->queue->queuelock);

	mutex_lock(&sec_req->lock);
	list_del(&sec_req_el->head);
@@ -806,7 +806,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
	 */

	/* Grab a big lock for a long time to avoid concurrency issues */
	mutex_lock(&queue->queuelock);
	spin_lock_bh(&queue->queuelock);

	/*
	 * Can go on to queue if we have space in either:
@@ -822,15 +822,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
		ret = -EBUSY;
		if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
			list_add_tail(&sec_req->backlog_head, &ctx->backlog);
			mutex_unlock(&queue->queuelock);
			spin_unlock_bh(&queue->queuelock);
			goto out;
		}

		mutex_unlock(&queue->queuelock);
		spin_unlock_bh(&queue->queuelock);
		goto err_free_elements;
	}
	ret = sec_send_request(sec_req, queue);
	mutex_unlock(&queue->queuelock);
	spin_unlock_bh(&queue->queuelock);
	if (ret)
		goto err_free_elements;

@@ -889,7 +889,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
	if (IS_ERR(ctx->queue))
		return PTR_ERR(ctx->queue);

	mutex_init(&ctx->queue->queuelock);
	spin_lock_init(&ctx->queue->queuelock);
	ctx->queue->havesoftqueue = false;

	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -347,7 +347,7 @@ struct sec_queue {
	DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
	DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
	bool havesoftqueue;
	struct mutex queuelock;
	spinlock_t queuelock;
	void *shadow[SEC_QUEUE_LEN];
};

+2 −0
Original line number Diff line number Diff line
@@ -48,12 +48,14 @@ struct mdp_ipi_init {
 * @ipi_id        : IPI_MDP
 * @ap_inst       : AP mtk_mdp_vpu address
 * @vpu_inst_addr : VPU MDP instance address
 * @padding       : Alignment padding
 */
struct mdp_ipi_comm {
	uint32_t msg_id;
	uint32_t ipi_id;
	uint64_t ap_inst;
	uint32_t vpu_inst_addr;
	uint32_t padding;
};

/**
+1 −0
Original line number Diff line number Diff line
@@ -114,6 +114,7 @@ static int mt76_led_init(struct mt76_dev *dev)
		if (!of_property_read_u32(np, "led-sources", &led_pin))
			dev->led_pin = led_pin;
		dev->led_al = of_property_read_bool(np, "led-active-low");
		of_node_put(np);
	}

	return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+16 −7
Original line number Diff line number Diff line
@@ -2856,7 +2856,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
	struct tcp_sock *tp = tcp_sk(sk);
	unsigned int cur_mss;
	int diff, len, err;

	int avail_wnd;

	/* Inconclusive MTU probe */
	if (icsk->icsk_mtup.probe_size)
@@ -2886,17 +2886,25 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
		return -EHOSTUNREACH; /* Routing failure or similar. */

	cur_mss = tcp_current_mss(sk);
	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;

	/* If receiver has shrunk his window, and skb is out of
	 * new window, do not retransmit it. The exception is the
	 * case, when window is shrunk to zero. In this case
	 * our retransmit serves as a zero window probe.
	 * our retransmit of one segment serves as a zero window probe.
	 */
	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
	    TCP_SKB_CB(skb)->seq != tp->snd_una)
	if (avail_wnd <= 0) {
		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
			return -EAGAIN;
		avail_wnd = cur_mss;
	}

	len = cur_mss * segs;
	if (len > avail_wnd) {
		len = rounddown(avail_wnd, cur_mss);
		if (!len)
			len = avail_wnd;
	}
	if (skb->len > len) {
		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
				 cur_mss, GFP_ATOMIC))
@@ -2910,8 +2918,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
		diff -= tcp_skb_pcount(skb);
		if (diff)
			tcp_adjust_pcount(sk, skb, diff);
		if (skb->len < cur_mss)
			tcp_retrans_try_collapse(sk, skb, cur_mss);
		avail_wnd = min_t(int, avail_wnd, cur_mss);
		if (skb->len < avail_wnd)
			tcp_retrans_try_collapse(sk, skb, avail_wnd);
	}

	/* RFC3168, section 6.1.1.1. ECN fallback */
Loading