Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6bc6256a authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 4.14.62 into android-4.14



Changes in 4.14.62
	scsi: qla2xxx: Fix unintialized List head crash
	scsi: qla2xxx: Fix NPIV deletion by calling wait_for_sess_deletion
	scsi: qla2xxx: Fix ISP recovery on unload
	scsi: qla2xxx: Return error when TMF returns
	genirq: Make force irq threading setup more robust
	nohz: Fix local_timer_softirq_pending()
	nohz: Fix missing tick reprogram when interrupting an inline softirq
	netlink: Don't shift on 64 for ngroups
	ext4: fix false negatives *and* false positives in ext4_check_descriptors()
	ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle
	ring_buffer: tracing: Inherit the tracing setting to next ring buffer
	i2c: imx: Fix reinit_completion() use
	Btrfs: fix file data corruption after cloning a range and fsync
	nvme-pci: allocate device queues storage space at probe
	nvme-pci: Fix queue double allocations
	nvmet-fc: fix target sgl list on large transfers
	intel_idle: Graceful probe failure when MWAIT is disabled
	xfs: catch inode allocation state mismatch corruption
	xfs: validate cached inodes are free when allocated
	xfs: don't call xfs_da_shrink_inode with NULL bp
	jfs: Fix inconsistency between memory allocation and ea_buf->max_size
	Linux 4.14.62

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 0dafb9f6 1aa1166e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 61
SUBLEVEL = 62
EXTRAVERSION =
NAME = Petit Gorille

+1 −2
Original line number Diff line number Diff line
@@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
		goto err_desc;
	}

	reinit_completion(&dma->cmd_complete);
	txdesc->callback = i2c_imx_dma_callback;
	txdesc->callback_param = i2c_imx;
	if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
	 * The first byte must be transmitted by the CPU.
	 */
	imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
	reinit_completion(&i2c_imx->dma->cmd_complete);
	time_left = wait_for_completion_timeout(
				&i2c_imx->dma->cmd_complete,
				msecs_to_jiffies(DMA_TIMEOUT));
@@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
	if (result)
		return result;

	reinit_completion(&i2c_imx->dma->cmd_complete);
	time_left = wait_for_completion_timeout(
				&i2c_imx->dma->cmd_complete,
				msecs_to_jiffies(DMA_TIMEOUT));
+6 −1
Original line number Diff line number Diff line
@@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = {
};

#define ICPU(model, cpu) \
	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }

static const struct x86_cpu_id intel_idle_ids[] __initconst = {
	ICPU(INTEL_FAM6_NEHALEM_EP,		idle_cpu_nehalem),
@@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void)
		return -ENODEV;
	}

	if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
		pr_debug("Please enable MWAIT in BIOS SETUP\n");
		return -ENODEV;
	}

	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
		return -ENODEV;

+28 −36
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue **queues;
	struct nvme_queue *queues;
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
@@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
{
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[0];
	struct nvme_queue *nvmeq = &dev->queues[0];

	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
{
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
	struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];

	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
	struct nvme_dev *dev = set->driver_data;
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
	struct nvme_queue *nvmeq = dev->queues[queue_idx];
	struct nvme_queue *nvmeq = &dev->queues[queue_idx];

	BUG_ON(!nvmeq);
	iod->nvmeq = nvmeq;
@@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
	struct nvme_dev *dev = to_nvme_dev(ctrl);
	struct nvme_queue *nvmeq = dev->queues[0];
	struct nvme_queue *nvmeq = &dev->queues[0];
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
@@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
	int i;

	for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
		struct nvme_queue *nvmeq = dev->queues[i];
		dev->ctrl.queue_count--;
		dev->queues[i] = NULL;
		nvme_free_queue(nvmeq);
		nvme_free_queue(&dev->queues[i]);
	}
}

@@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)

static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
	struct nvme_queue *nvmeq = dev->queues[0];
	struct nvme_queue *nvmeq = &dev->queues[0];

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

@@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
	return 0;
}

static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
		int depth, int node)
{
	struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
							node);
	if (!nvmeq)
		return NULL;
	struct nvme_queue *nvmeq = &dev->queues[qid];

	if (dev->ctrl.queue_count > qid)
		return 0;

	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
	nvmeq->q_depth = depth;
	nvmeq->qid = qid;
	nvmeq->cq_vector = -1;
	dev->queues[qid] = nvmeq;
	dev->ctrl.queue_count++;

	return nvmeq;
	return 0;

 free_cqdma:
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
	return -ENOMEM;
}

static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
	if (result < 0)
		return result;

	nvmeq = dev->queues[0];
	if (!nvmeq) {
		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
	result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
			dev_to_node(dev->dev));
		if (!nvmeq)
			return -ENOMEM;
	}
	if (result)
		return result;

	nvmeq = &dev->queues[0];
	aqa = nvmeq->q_depth - 1;
	aqa |= aqa << 16;

@@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)

	for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
		/* vector == qid - 1, match nvme_create_queue */
		if (!nvme_alloc_queue(dev, i, dev->q_depth,
		if (nvme_alloc_queue(dev, i, dev->q_depth,
		     pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
			ret = -ENOMEM;
			break;
@@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)

	max = min(dev->max_qid, dev->ctrl.queue_count - 1);
	for (i = dev->online_queues; i <= max; i++) {
		ret = nvme_create_queue(dev->queues[i], i);
		ret = nvme_create_queue(&dev->queues[i], i);
		if (ret)
			break;
	}
@@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)

static int nvme_setup_io_queues(struct nvme_dev *dev)
{
	struct nvme_queue *adminq = dev->queues[0];
	struct nvme_queue *adminq = &dev->queues[0];
	struct pci_dev *pdev = to_pci_dev(dev->dev);
	int result, nr_io_queues;
	unsigned long size;
@@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
 retry:
		timeout = ADMIN_TIMEOUT;
		for (; i > 0; i--, sent++)
			if (nvme_delete_queue(dev->queues[i], opcode))
			if (nvme_delete_queue(&dev->queues[i], opcode))
				break;

		while (sent--) {
@@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)

	queues = dev->online_queues - 1;
	for (i = dev->ctrl.queue_count - 1; i > 0; i--)
		nvme_suspend_queue(dev->queues[i]);
		nvme_suspend_queue(&dev->queues[i]);

	if (dead) {
		/* A device might become IO incapable very soon during
@@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
		 * queue_count can be 0 here.
		 */
		if (dev->ctrl.queue_count)
			nvme_suspend_queue(dev->queues[0]);
			nvme_suspend_queue(&dev->queues[0]);
	} else {
		nvme_disable_io_queues(dev, queues);
		nvme_disable_admin_queue(dev, shutdown);
@@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
	if (!dev)
		return -ENOMEM;
	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),

	dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue),
							GFP_KERNEL, node);
	if (!dev->queues)
		goto free;
+35 −9
Original line number Diff line number Diff line
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
	struct work_struct		work;
} __aligned(sizeof(unsigned long long));

/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
#define NVMET_FC_MAX_XFR_SGENTS		(NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)

enum nvmet_fcp_datadir {
	NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
	struct nvme_fc_cmd_iu		cmdiubuf;
	struct nvme_fc_ersp_iu		rspiubuf;
	dma_addr_t			rspdma;
	struct scatterlist		*next_sg;
	struct scatterlist		*data_sg;
	int				data_sg_cnt;
	u32				total_length;
@@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
	INIT_LIST_HEAD(&newrec->assoc_list);
	kref_init(&newrec->ref);
	ida_init(&newrec->assoc_cnt);
	newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
					template->max_sgl_segments);
	newrec->max_sg_cnt = template->max_sgl_segments;

	ret = nvmet_fc_alloc_ls_iodlist(newrec);
	if (ret) {
@@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
				((fod->io_dir == NVMET_FCP_WRITE) ?
					DMA_FROM_DEVICE : DMA_TO_DEVICE));
				/* note: write from initiator perspective */
	fod->next_sg = fod->data_sg;

	return 0;

@@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_fcp_iod *fod, u8 op)
{
	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
	struct scatterlist *sg = fod->next_sg;
	unsigned long flags;
	u32 tlen;
	u32 remaininglen = fod->total_length - fod->offset;
	u32 tlen = 0;
	int ret;

	fcpreq->op = op;
	fcpreq->offset = fod->offset;
	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;

	tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
			(fod->total_length - fod->offset));
	/*
	 * for next sequence:
	 *  break at a sg element boundary
	 *  attempt to keep sequence length capped at
	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
	 *    be longer if a single sg element is larger
	 *    than that amount. This is done to avoid creating
	 *    a new sg list to use for the tgtport api.
	 */
	fcpreq->sg = sg;
	fcpreq->sg_cnt = 0;
	while (tlen < remaininglen &&
	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
		fcpreq->sg_cnt++;
		tlen += sg_dma_len(sg);
		sg = sg_next(sg);
	}
	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
		fcpreq->sg_cnt++;
		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
		sg = sg_next(sg);
	}
	if (tlen < remaininglen)
		fod->next_sg = sg;
	else
		fod->next_sg = NULL;

	fcpreq->transfer_length = tlen;
	fcpreq->transferred_length = 0;
	fcpreq->fcp_error = 0;
	fcpreq->rsplen = 0;

	fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
	fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);

	/*
	 * If the last READDATA request: check if LLDD supports
	 * combined xfr with response.
Loading