Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6f9a35e2 authored by Boaz Harrosh's avatar Boaz Harrosh Committed by James Bottomley
Browse files

[SCSI] bidirectional command support



At the block level bidi request uses req->next_rq pointer for a second
bidi_read request.
At Scsi-midlayer a second scsi_data_buffer structure is used for the
bidi_read part. This bidi scsi_data_buffer is put on
request->next_rq->special. Struct scsi_cmnd is not changed.

- Define scsi_bidi_cmnd() to return true if it is a bidi request and a
  second sgtable was allocated.

- Define scsi_in()/scsi_out() to return the in or out scsi_data_buffer
  from this command This API is to isolate users from the mechanics of
  bidi.

- Define scsi_end_bidi_request() to do what scsi_end_request() does but
  for a bidi request. This is necessary because bidi commands are a bit
  tricky here. (See comments in body)

- scsi_release_buffers() will also release the bidi_read scsi_data_buffer

- scsi_io_completion() on bidi commands will now call
  scsi_end_bidi_request() and return.

- The previous work done in scsi_init_io() is now done in a new
  scsi_init_sgtable() (which is 99% identical to old scsi_init_io())
  The new scsi_init_io() will call the above twice if needed also for
  the bidi_read command. Only at this point is a command bidi.

- In scsi_error.c at scsi_eh_prep/restore_cmnd() make sure bidi-lld is not
  confused by a get-sense command that looks like bidi. This is done
  by puting NULL at request->next_rq, and restoring.

[jejb: update to sg_table and resolve conflicts
also update to blk-end-request and resolve conflicts]

Signed-off-by: default avatarBoaz Harrosh <bharrosh@panasas.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent 30b0c37b
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -618,9 +618,11 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
	memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd));
	ses->data_direction = scmd->sc_data_direction;
	ses->sdb = scmd->sdb;
	ses->next_rq = scmd->request->next_rq;
	ses->result = scmd->result;

	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
	scmd->request->next_rq = NULL;

	if (sense_bytes) {
		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -673,6 +675,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
	memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd));
	scmd->sc_data_direction = ses->data_direction;
	scmd->sdb = ses->sdb;
	scmd->request->next_rq = ses->next_rq;
	scmd->result = ses->result;
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
+91 −15
Original line number Diff line number Diff line
@@ -64,6 +64,8 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
};
#undef SP

static struct kmem_cache *scsi_bidi_sdb_cache;

static void scsi_run_queue(struct request_queue *q);

/*
@@ -790,9 +792,37 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
		scsi_free_sgtable(&cmd->sdb);

	memset(&cmd->sdb, 0, sizeof(cmd->sdb));

	if (scsi_bidi_cmnd(cmd)) {
		struct scsi_data_buffer *bidi_sdb =
			cmd->request->next_rq->special;
		scsi_free_sgtable(bidi_sdb);
		kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
		cmd->request->next_rq->special = NULL;
	}
}
EXPORT_SYMBOL(scsi_release_buffers);

/*
 * Bidi commands Must be complete as a whole, both sides at once.
 * If part of the bytes were written and lld returned
 * scsi_in()->resid and/or scsi_out()->resid this information will be left
 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
 * decide what to do with this information.
 */
void scsi_end_bidi_request(struct scsi_cmnd *cmd)
{
	blk_end_bidi_request(cmd->request, 0, scsi_out(cmd)->resid,
							scsi_in(cmd)->resid);
	scsi_release_buffers(cmd);

	/*
	 * This will goose the queue request function at the end, so we don't
	 * need to worry about launching another command.
	 */
	scsi_next_command(cmd);
}

/*
 * Function:    scsi_io_completion()
 *
@@ -854,9 +884,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
				req->sense_len = len;
			}
		}
		if (scsi_bidi_cmnd(cmd)) {
			/* will also release_buffers */
			scsi_end_bidi_request(cmd);
			return;
		}
		req->data_len = scsi_get_resid(cmd);
	}

	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
	scsi_release_buffers(cmd);

	/*
@@ -982,28 +1018,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
	scsi_end_request(cmd, -EIO, this_count, !result);
}

/*
 * Function:    scsi_init_io()
 *
 * Purpose:     SCSI I/O initialize function.
 *
 * Arguments:   cmd   - Command descriptor we wish to initialize
 *
 * Returns:     0 on success
 *		BLKPREP_DEFER if the failure is retryable
 */
int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
			     gfp_t gfp_mask)
{
	struct request     *req = cmd->request;
	int count;
	struct scsi_data_buffer *sdb = &cmd->sdb;

	/*
	 * If sg table allocation fails, requeue request later.
	 */
	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
					gfp_mask))) {
		scsi_unprep_request(req);
		return BLKPREP_DEFER;
	}

@@ -1022,6 +1046,50 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
	sdb->table.nents = count;
	return BLKPREP_OK;
}

/*
 * Function:    scsi_init_io()
 *
 * Purpose:     SCSI I/O initialize function.
 *
 * Arguments:   cmd   - Command descriptor we wish to initialize
 *
 * Returns:     0 on success
 *		BLKPREP_DEFER if the failure is retryable
 *		BLKPREP_KILL if the failure is fatal
 */
int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
	if (error)
		goto err_exit;

	if (blk_bidi_rq(cmd->request)) {
		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
			scsi_bidi_sdb_cache, GFP_ATOMIC);
		if (!bidi_sdb) {
			error = BLKPREP_DEFER;
			goto err_exit;
		}

		cmd->request->next_rq->special = bidi_sdb;
		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
								    GFP_ATOMIC);
		if (error)
			goto err_exit;
	}

	return BLKPREP_OK ;

err_exit:
	scsi_release_buffers(cmd);
	if (error == BLKPREP_KILL)
		scsi_put_command(cmd);
	else /* BLKPREP_DEFER */
		scsi_unprep_request(cmd->request);

	return error;
}
EXPORT_SYMBOL(scsi_init_io);

static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
@@ -1639,6 +1707,14 @@ int __init scsi_init_queue(void)
		return -ENOMEM;
	}

	scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
					sizeof(struct scsi_data_buffer),
					0, 0, NULL);
	if (!scsi_bidi_sdb_cache) {
		printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
		return -ENOMEM;
	}

	for (i = 0; i < SG_MEMPOOL_NR; i++) {
		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
		int size = sgp->size * sizeof(struct scatterlist);
+18 −1
Original line number Diff line number Diff line
@@ -2,12 +2,12 @@
#define _SCSI_SCSI_CMND_H

#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>

struct request;
struct Scsi_Host;
struct scsi_device;

@@ -158,4 +158,21 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
#define scsi_for_each_sg(cmd, sg, nseg, __i)			\
	for_each_sg(scsi_sglist(cmd), sg, nseg, __i)

static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
{
	return blk_bidi_rq(cmd->request) &&
		(cmd->request->next_rq->special != NULL);
}

static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
{
	return scsi_bidi_cmnd(cmd) ?
		cmd->request->next_rq->special : &cmd->sdb;
}

static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
{
	return &cmd->sdb;
}

#endif /* _SCSI_SCSI_CMND_H */
+1 −0
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ struct scsi_eh_save {
	unsigned char cmd_len;
	unsigned char cmnd[MAX_COMMAND_SIZE];
	struct scsi_data_buffer sdb;
	struct request *next_rq;

	/* new command support */
	struct scatterlist sense_sgl;