Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8c6c9ba authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull misc SCSI patches from James Bottomley:
 "This is a short patch set representing a couple of left overs from the
  merge window (debug removal and MAINTAINER changes).

  Plus one merge window regression (the local workqueue for hpsa) and a
  set of bug fixes for several issues (two for scsi-mq and the rest an
  assortment of long standing stuff, all cc'd to stable)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  sg: fix EWOULDBLOCK errors with scsi-mq
  sg: fix unkillable I/O wait deadlock with scsi-mq
  sg: fix read() error reporting
  wd719x: add missing .module to wd719x_template
  hpsa: correct compiler warnings introduced by hpsa-add-local-workqueue patch
  fixed invalid assignment of 64bit mask to host dma_boundary for scatter gather segment boundary limit.
  fcoe: Transition maintainership to Vasu
  am53c974: remove left-over debugging code
parents 93aaa830 7772855a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3937,7 +3937,7 @@ S: Maintained
F:	drivers/staging/fbtft/

FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M:	Robert Love <robert.w.love@intel.com>
M:	Vasu Dev <vasu.dev@intel.com>
L:	fcoe-devel@open-fcoe.org
W:	www.Open-FCoE.org
S:	Supported
+0 −6
Original line number Diff line number Diff line
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
			break;
		cpu_relax();
	}
	if (resid > 1) {
		/* FIFO not cleared */
		shost_printk(KERN_INFO, esp->host,
			     "FIFO not cleared, %d bytes left\n",
			     resid);
	}

	/*
	 * When there is a residual BCMPLT will never be set
+0 −1
Original line number Diff line number Diff line
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
		return NULL;
	}
	shost->dma_boundary = pcidev->dma_mask;
	shost->max_id = BE2_MAX_SESSIONS;
	shost->max_channel = 0;
	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+1 −3
Original line number Diff line number Diff line
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
						char *name)
{
	struct workqueue_struct *wq = NULL;
	char wq_name[20];

	snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
	wq = alloc_ordered_workqueue(wq_name, 0);
	wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
	if (!wq)
		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);

+33 −7
Original line number Diff line number Diff line
@@ -546,7 +546,7 @@ static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
	sg_io_hdr_t *hp = &srp->header;
	int err = 0;
	int err = 0, err2;
	int len;

	if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
		goto err_out;
	}
err_out:
	err = sg_finish_rem_req(srp);
	return (0 == err) ? count : err;
	err2 = sg_finish_rem_req(srp);
	return err ? : err2 ? : count;
}

static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
	}
	/* Rely on write phase to clean out srp status values, so no "else" */

	/*
	 * Free the request as soon as it is complete so that its resources
	 * can be reused without waiting for userspace to read() the
	 * result.  But keep the associated bio (if any) around until
	 * blk_rq_unmap_user() can be called from user context.
	 */
	srp->rq = NULL;
	if (rq->cmd != rq->__cmd)
		kfree(rq->cmd);
	__blk_put_request(rq->q, rq);

	write_lock_irqsave(&sfp->rq_list_lock, iflags);
	if (unlikely(srp->orphan)) {
		if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
			return -ENOMEM;
	}

	rq = blk_get_request(q, rw, GFP_ATOMIC);
	/*
	 * NOTE
	 *
	 * With scsi-mq enabled, there are a fixed number of preallocated
	 * requests equal in number to shost->can_queue.  If all of the
	 * preallocated requests are already in use, then using GFP_ATOMIC with
	 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
	 * will cause blk_get_request() to sleep until an active command
	 * completes, freeing up a request.  Neither option is ideal, but
	 * GFP_KERNEL is the better choice to prevent userspace from getting an
	 * unexpected EWOULDBLOCK.
	 *
	 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
	 * does not sleep except under memory pressure.
	 */
	rq = blk_get_request(q, rw, GFP_KERNEL);
	if (IS_ERR(rq)) {
		kfree(long_cmdp);
		return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
				      "sg_finish_rem_req: res_used=%d\n",
				      (int) srp->res_used));
	if (srp->rq) {
	if (srp->bio)
		ret = blk_rq_unmap_user(srp->bio);

	if (srp->rq) {
		if (srp->rq->cmd != srp->rq->__cmd)
			kfree(srp->rq->cmd);
		blk_put_request(srp->rq);
Loading