Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6ee8be4d authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 4.9.219 into android-4.9-q



Changes in 4.9.219
	l2tp: ensure sessions are freed after their PPPOL2TP socket
	l2tp: fix race between l2tp_session_delete() and l2tp_tunnel_closeall()
	drm/bochs: downgrade pci_request_region failure from error to warning
	ipv4: fix a RCU-list lock in fib_triestat_seq_show
	net, ip_tunnel: fix interface lookup with no key
	sctp: fix refcount bug in sctp_wfree
	sctp: fix possibly using a bad saddr with a given dst
	drm/etnaviv: replace MMU flush marker with flush sequence
	blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter
	blk-mq: Allow blocking queue tag iter callbacks
	coresight: do not use the BIT() macro in the UAPI header
	net: dsa: tag_brcm: Fix skb->fwd_offload_mark location
	padata: always acquire cpu_hotplug_lock before pinst->lock
	mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
	net: dsa: bcm_sf2: Ensure correct sub-node is parsed
	net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
	slcan: Don't transmit uninitialized stack data in padding
	net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers
	random: always use batched entropy for get_random_u{32,64}
	tools/accounting/getdelays.c: fix netlink attribute length
	ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
	IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
	IB/hfi1: Fix memory leaks in sysfs registration and unregistration
	ceph: remove the extra slashes in the server path
	ceph: canonicalize server path in place
	Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
	RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
	clk: qcom: rcg: Return failure for RCG update
	drm/msm: stop abusing dma_map/unmap for cache
	arm64: Fix size of __early_cpu_boot_status
	usb: dwc3: don't set gadget->is_otg flag
	drm_dp_mst_topology: fix broken drm_dp_sideband_parse_remote_dpcd_read()
	drm/msm: Use the correct dma_sync calls in msm_gem
	Linux 4.9.219

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
Change-Id: Ic787d6aa9374864ce928ac1ef802077ac55ca232
parents 16b7ce5a 5188957a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 218
SUBLEVEL = 219
EXTRAVERSION =
NAME = Roaring Lionus

+1 −1
Original line number Diff line number Diff line
@@ -651,7 +651,7 @@ ENTRY(__boot_cpu_mode)
 * with MMU turned off.
 */
ENTRY(__early_cpu_boot_status)
	.long 	0
	.quad 	0

	.popsection

+8 −1
Original line number Diff line number Diff line
@@ -336,6 +336,13 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
	struct blk_mq_hw_ctx *hctx;
	int i;

	/*
	 * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
	 * queue_hw_ctx after freeze the queue, so we use q_usage_counter
	 * to avoid race with it.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
		return;

	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;
@@ -351,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
	}

	blk_queue_exit(q);
}

static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
+4 −0
Original line number Diff line number Diff line
@@ -2346,6 +2346,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
	/*
	 * Sync with blk_mq_queue_tag_busy_iter.
	 */
	synchronize_rcu();
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

+2 −8
Original line number Diff line number Diff line
@@ -2119,8 +2119,8 @@ struct batched_entropy {

/*
 * Get a random word for internal kernel use only. The quality of the random
 * number is either as good as RDRAND or as good as /dev/urandom, with the
 * goal of being quite fast and not depleting entropy.
 * number is good as /dev/urandom, but there is no backtrack protection, with
 * the goal of being quite fast and not depleting entropy.
 */
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
unsigned long get_random_long(void)
@@ -2128,9 +2128,6 @@ unsigned long get_random_long(void)
	unsigned long ret;
	struct batched_entropy *batch;

	if (arch_get_random_long(&ret))
		return ret;

	batch = &get_cpu_var(batched_entropy_long);
	if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
		extract_crng((u8 *)batch->entropy_long);
@@ -2154,9 +2151,6 @@ unsigned int get_random_int(void)
	unsigned int ret;
	struct batched_entropy *batch;

	if (arch_get_random_int(&ret))
		return ret;

	batch = &get_cpu_var(batched_entropy_int);
	if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
		extract_crng((u8 *)batch->entropy_int);
Loading