Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 477bb53b authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 5.4.247 into android11-5.4-lts



Changes in 5.4.247
	blk-iocost: avoid 64-bit division in ioc_timer_fn
	block/blk-iocost (gcc13): keep large values in a new enum
	i40iw: fix build warning in i40iw_manage_apbvt()
	i40e: fix build warnings in i40e_alloc.h
	spi: qup: Request DMA before enabling clocks
	neighbour: Replace zero-length array with flexible-array member
	neighbour: fix unaligned access to pneigh_entry
	net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods
	Bluetooth: Fix l2cap_disconnect_req deadlock
	Bluetooth: L2CAP: Add missing checks for invalid DCID
	netfilter: conntrack: fix NULL pointer dereference in nf_confirm_cthelper
	netfilter: ipset: Add schedule point in call_ad().
	rfs: annotate lockless accesses to sk->sk_rxhash
	rfs: annotate lockless accesses to RFS sock flow table
	net: sched: move rtm_tca_policy declaration to include file
	net: sched: fix possible refcount leak in tc_chain_tmplt_add()
	lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
	bnxt_en: Query default VLAN before VNIC setup on a VF
	batman-adv: Broken sync while rescheduling delayed work
	Input: xpad - delete a Razer DeathAdder mouse VID/PID entry
	Input: psmouse - fix OOB access in Elantech protocol
	ALSA: hda/realtek: Add a quirk for HP Slim Desktop S01
	ALSA: hda/realtek: Add Lenovo P3 Tower platform
	drm/amdgpu: fix xclk freq on CHIP_STONEY
	can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in J1939 Socket
	can: j1939: change j1939_netdev_lock type to mutex
	can: j1939: avoid possible use-after-free when j1939_can_rx_register fails
	ceph: fix use-after-free bug for inodes when flushing capsnaps
	Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk
	rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting
	pinctrl: meson-axg: add missing GPIOA_18 gpio group
	usb: usbfs: Enforce page requirements for mmap
	usb: usbfs: Use consistent mmap functions
	bonding (gcc13): synchronize bond_{a,t}lb_xmit() types
	i2c: sprd: Delete i2c adapter in .remove's error path
	eeprom: at24: also select REGMAP
	ext4: only check dquot_initialize_needed() when debugging
	cifs: get rid of unused parameter in reconn_setup_dfs_targets()
	cifs: handle empty list of targets in cifs_reconnect()
	drm/atomic: Don't pollute crtc_state->mode_blob with error pointers
	rbd: get snapshot context after exclusive lock is ensured to be held
	btrfs: check return value of btrfs_commit_transaction in relocation
	btrfs: unset reloc control if transaction commit fails in prepare_to_relocate()
	mtd: spinand: macronix: Add support for MX35LFxGE4AD
	Revert "staging: rtl8192e: Replace macro RTL_PCI_DEVICE with PCI_DEVICE"
	Linux 5.4.247

Change-Id: Icf4b5a01b7db586596461c533e181b6ca83d4ffd
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents de0a430e 61a2f83e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 246
SUBLEVEL = 247
EXTRAVERSION =
NAME = Kleptomaniac Octopus

+7 −3
Original line number Diff line number Diff line
@@ -248,7 +248,9 @@ enum {

	/* 1/64k is granular enough and can easily be handled w/ u32 */
	HWEIGHT_WHOLE		= 1 << 16,
};

enum {
	/*
	 * As vtime is used to calculate the cost of each IO, it needs to
	 * be fairly high precision.  For example, it should be able to
@@ -271,6 +273,11 @@ enum {
	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
	VRATE_CLAMP_ADJ_PCT	= 4,

	/* switch iff the conditions are met for longer than this */
	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
};

enum {
	/* if IOs end up waiting for requests, issue less */
	RQ_WAIT_BUSY_PCT	= 5,

@@ -288,9 +295,6 @@ enum {
	SURPLUS_SCALE_ABS	= HWEIGHT_WHOLE / 50,	/* + 2% */
	SURPLUS_MIN_ADJ_DELTA	= HWEIGHT_WHOLE / 33,	/* 3% */

	/* switch iff the conditions are met for longer than this */
	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,

	/*
	 * Count IO size in 4k pages.  The 12bit shift helps keeping
	 * size-proportional components of cost calculation in closer
+45 −28
Original line number Diff line number Diff line
@@ -1493,14 +1493,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
/*
 * Must be called after rbd_obj_calc_img_extents().
 */
static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
	if (!obj_req->num_img_extents ||
	    (rbd_obj_is_entire(obj_req) &&
	     !obj_req->img_request->snapc->num_snaps))
		return false;
	rbd_assert(obj_req->img_request->snapc);

	return true;
	if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
		dout("%s %p objno %llu discard\n", __func__, obj_req,
		     obj_req->ex.oe_objno);
		return;
	}

	if (!obj_req->num_img_extents) {
		dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
		     obj_req->ex.oe_objno);
		return;
	}

	if (rbd_obj_is_entire(obj_req) &&
	    !obj_req->img_request->snapc->num_snaps) {
		dout("%s %p objno %llu entire\n", __func__, obj_req,
		     obj_req->ex.oe_objno);
		return;
	}

	obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}

static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
@@ -1599,6 +1615,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
	rbd_assert(obj_req->img_request->snapc);
	return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
					 num_ops);
}
@@ -1727,11 +1744,14 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 *
 * Only snap_id is captured here, for reads.  For writes, snapshot
 * context is captured in rbd_img_object_requests() after exclusive
 * lock is ensured to be held.
 */
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
					enum obj_operation_type op_type,
					struct ceph_snap_context *snapc)
					enum obj_operation_type op_type)
{
	struct rbd_img_request *img_request;

@@ -1743,8 +1763,6 @@ static struct rbd_img_request *rbd_img_request_create(
	img_request->op_type = op_type;
	if (!rbd_img_is_write(img_request))
		img_request->snap_id = rbd_dev->spec->snap_id;
	else
		img_request->snapc = snapc;

	if (rbd_dev_parent_get(rbd_dev))
		img_request_layered_set(img_request);
@@ -2389,9 +2407,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
	if (ret)
		return ret;

	if (rbd_obj_copyup_enabled(obj_req))
		obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;

	obj_req->write_state = RBD_OBJ_WRITE_START;
	return 0;
}
@@ -2497,8 +2512,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
	if (ret)
		return ret;

	if (rbd_obj_copyup_enabled(obj_req))
		obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
	if (!obj_req->num_img_extents) {
		obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
		if (rbd_obj_is_entire(obj_req))
@@ -2935,7 +2948,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
	int ret;

	child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
					       OBJ_OP_READ, NULL);
					       OBJ_OP_READ);
	if (!child_img_req)
		return -ENOMEM;

@@ -3439,6 +3452,7 @@ static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
	case RBD_OBJ_WRITE_START:
		rbd_assert(!*result);

		rbd_obj_set_copyup_enabled(obj_req);
		if (rbd_obj_write_is_noop(obj_req))
			return true;

@@ -3625,9 +3639,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)

static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
	struct rbd_device *rbd_dev = img_req->rbd_dev;
	struct rbd_obj_request *obj_req;

	rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
	rbd_assert(!need_exclusive_lock(img_req) ||
		   __rbd_is_lock_owner(rbd_dev));

	if (rbd_img_is_write(img_req)) {
		rbd_assert(!img_req->snapc);
		down_read(&rbd_dev->header_rwsem);
		img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
		up_read(&rbd_dev->header_rwsem);
	}

	for_each_obj_request(img_req, obj_req) {
		int result = 0;
@@ -3645,7 +3669,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)

static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
	struct rbd_device *rbd_dev = img_req->rbd_dev;
	int ret;

again:
@@ -3666,9 +3689,6 @@ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
		if (*result)
			return true;

		rbd_assert(!need_exclusive_lock(img_req) ||
			   __rbd_is_lock_owner(rbd_dev));

		rbd_img_object_requests(img_req);
		if (!img_req->pending.num_pending) {
			*result = img_req->pending.result;
@@ -4130,6 +4150,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
	int ret;

	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
		return ret;

	if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
		ret = rbd_object_map_open(rbd_dev);
		if (ret)
@@ -4788,7 +4812,6 @@ static void rbd_queue_workfn(struct work_struct *work)
	struct request *rq = blk_mq_rq_from_pdu(work);
	struct rbd_device *rbd_dev = rq->q->queuedata;
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc = NULL;
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
	enum obj_operation_type op_type;
@@ -4853,10 +4876,6 @@ static void rbd_queue_workfn(struct work_struct *work)

	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
	if (op_type != OBJ_OP_READ) {
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
@@ -4866,13 +4885,12 @@ static void rbd_queue_workfn(struct work_struct *work)
		goto err_rq;
	}

	img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
	img_request = rbd_img_request_create(rbd_dev, op_type);
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
	snapc = NULL; /* img_request consumes a ref */

	dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
	     img_request, obj_op_name(op_type), offset, length);
@@ -4894,7 +4912,6 @@ static void rbd_queue_workfn(struct work_struct *work)
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
			 obj_op_name(op_type), length, offset, result);
	ceph_put_snap_context(snapc);
err:
	blk_mq_end_request(rq, errno_to_blk_status(result));
}
+9 −2
Original line number Diff line number Diff line
@@ -329,8 +329,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
	u32 reference_clock = adev->clock.spll.reference_freq;
	u32 tmp;

	if (adev->flags & AMD_IS_APU)
	if (adev->flags & AMD_IS_APU) {
		switch (adev->asic_type) {
		case CHIP_STONEY:
			/* vbios says 48Mhz, but the actual freq is 100Mhz */
			return 10000;
		default:
			return reference_clock;
		}
	}

	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
+8 −6
Original line number Diff line number Diff line
@@ -75,15 +75,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
	state->mode_blob = NULL;

	if (mode) {
		struct drm_property_blob *blob;

		drm_mode_convert_to_umode(&umode, mode);
		state->mode_blob =
			drm_property_create_blob(state->crtc->dev,
		                                 sizeof(umode),
		                                 &umode);
		if (IS_ERR(state->mode_blob))
			return PTR_ERR(state->mode_blob);
		blob = drm_property_create_blob(crtc->dev,
						sizeof(umode), &umode);
		if (IS_ERR(blob))
			return PTR_ERR(blob);

		drm_mode_copy(&state->mode, mode);

		state->mode_blob = blob;
		state->enable = true;
		DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
				 mode->name, crtc->base.id, crtc->name, state);
Loading