Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5951146d authored by Andy Grover's avatar Andy Grover Committed by Nicholas Bellinger
Browse files

target: More core cleanups from AGrover (round 2)



This patch contains the squashed version of second round of target core
cleanups and simplifications and Andy and Co.   It also contains a handful
of fixes to address bugs the original series and other minor cleanups.

Here is the condensed shortlog:

target: Remove unneeded casts to void*
target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun
target: Make t_task a member of se_cmd, not a pointer
target: Handle functions returning "-2"
target: Use cmd->se_dev over cmd->se_lun->lun_se_dev
target: Embed qr in struct se_cmd
target: Replace embedded struct se_queue_req with a list_head
target: Rename list_heads that are nodes in struct se_cmd to "*_node"
target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun()
target: Make t_mem_list and t_mem_list_bidi members of t_task
target: Add comment & cleanup transport_map_sg_to_mem()
target: Remove unneeded checks in transport_free_pages()

(Roland: Fix se_queue_req removal leftovers OOPs)
(nab: Fix transport_lookup_tmr_lun failure case)
(nab: Fix list_empty(&cmd->t_task.t_mem_bidi_list) inversion bugs)

Signed-off-by: default avatarAndy Grover <agrover@redhat.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent f22c1196
Loading
Loading
Loading
Loading
+22 −34
Original line number Diff line number Diff line
@@ -118,17 +118,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
	 */
	if (scsi_bidi_cmnd(sc))
		se_cmd->t_task->t_tasks_bidi = 1;
		se_cmd->t_task.t_tasks_bidi = 1;
	/*
	 * Locate the struct se_lun pointer and attach it to struct se_cmd
	 */
	if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) {
	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		return NULL;
	}

	transport_device_setup_cmd(se_cmd);
	return se_cmd;
}

@@ -143,17 +142,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	void *mem_ptr, *mem_bidi_ptr = NULL;
	u32 sg_no_bidi = 0;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
	if (ret == -1) {
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -2) {
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
@@ -165,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}
	/*
	 * Setup the struct scatterlist memory from the received
	 * struct scsi_cmnd.
	 */
	if (scsi_sg_count(sc)) {
		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
		mem_ptr = (void *)scsi_sglist(sc);

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
		if (se_cmd->t_task->t_tasks_bidi) {
	if (se_cmd->t_task.t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

			mem_bidi_ptr = (void *)sdb->table.sgl;
			sg_no_bidi = sdb->table.nents;
		}
	} else {
		/*
		 * Used for DMA_NONE
		 */
		mem_ptr = NULL;
		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}

	/*
	 * Map the SG memory into struct se_mem->page linked list using the same
	 * physical memory at sg->page_link.
	 */
	ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
			scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

@@ -384,14 +372,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
	/*
	 * Allocate the LUN_RESET TMR
	 */
	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
				TMR_LUN_RESET);
	if (IS_ERR(se_cmd->se_tmr_req))
		goto release;
	/*
	 * Locate the underlying TCM struct se_lun from sc->device->lun
	 */
	if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
	if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
		goto release;
	/*
	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -904,7 +892,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {

		memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
				SCSI_SENSE_BUFFERSIZE);
		sc->result = SAM_STAT_CHECK_CONDITION;
		set_driver_byte(sc, DRIVER_SENSE);
@@ -1054,7 +1042,7 @@ static int tcm_loop_make_nexus(
	 * transport_register_session()
	 */
	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
			tl_nexus->se_sess, (void *)tl_nexus);
			tl_nexus->se_sess, tl_nexus);
	tl_tpg->tl_hba->tl_nexus = tl_nexus;
	printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
@@ -1242,7 +1230,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
	 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
	 */
	ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
			wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
			wwn, &tl_tpg->tl_se_tpg, tl_tpg,
			TRANSPORT_TPG_TYPE_NORMAL);
	if (ret < 0)
		return ERR_PTR(-ENOMEM);
+4 −4
Original line number Diff line number Diff line
@@ -61,11 +61,11 @@ struct t10_alua_lu_gp *default_lu_gp;
 */
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
{
	struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev;
	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
	struct se_port *port;
	struct t10_alua_tg_pt_gp *tg_pt_gp;
	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
				    Target port group descriptor */

@@ -151,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 */
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;
	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
	u32 len = 4; /* Skip over RESERVED area in header */
	int alua_access_state, primary = 0, rc;
+31 −31
Original line number Diff line number Diff line
@@ -65,8 +65,8 @@ static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
	struct se_lun *lun = cmd->se_lun;
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	unsigned char *buf = cmd->t_task->t_task_buf;
	struct se_device *dev = cmd->se_dev;
	unsigned char *buf = cmd->t_task.t_task_buf;

	/*
	 * Make sure we at least have 6 bytes of INQUIRY response
@@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
	 * Registered Extended LUN WWN has been set via ConfigFS
	 * during device creation/restart.
	 */
	if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags &
	if (cmd->se_dev->se_sub_dev->su_dev_flags &
			SDF_EMULATED_VPD_UNIT_SERIAL) {
		buf[3] = 3;
		buf[5] = 0x80;
@@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;
	u16 len = 0;

	buf[1] = 0x80;
@@ -176,7 +176,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;
	struct se_lun *lun = cmd->se_lun;
	struct se_port *port = NULL;
	struct se_portal_group *tpg = NULL;
@@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
	buf[5] = 0x07;

	/* If WriteCache emulation is enabled, set V_SUP */
	if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
	if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
		buf[6] = 0x01;
	return 0;
}
@@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;
	int have_tp = 0;

	/*
@@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;

	/*
	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -620,9 +620,9 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_inquiry(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	unsigned char *buf = cmd->t_task->t_task_buf;
	unsigned char *cdb = cmd->t_task->t_task_cdb;
	struct se_device *dev = cmd->se_dev;
	unsigned char *buf = cmd->t_task.t_task_buf;
	unsigned char *cdb = cmd->t_task.t_task_cdb;

	if (!(cdb[1] & 0x1))
		return target_emulate_inquiry_std(cmd);
@@ -665,8 +665,8 @@ target_emulate_inquiry(struct se_cmd *cmd)
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	unsigned char *buf = cmd->t_task->t_task_buf;
	struct se_device *dev = cmd->se_dev;
	unsigned char *buf = cmd->t_task.t_task_buf;
	unsigned long long blocks_long = dev->transport->get_blocks(dev);
	u32 blocks;

@@ -695,8 +695,8 @@ target_emulate_readcapacity(struct se_cmd *cmd)
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	unsigned char *buf = cmd->t_task->t_task_buf;
	struct se_device *dev = cmd->se_dev;
	unsigned char *buf = cmd->t_task.t_task_buf;
	unsigned long long blocks = dev->transport->get_blocks(dev);

	buf[0] = (blocks >> 56) & 0xff;
@@ -830,9 +830,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	char *cdb = cmd->t_task->t_task_cdb;
	unsigned char *rbuf = cmd->t_task->t_task_buf;
	struct se_device *dev = cmd->se_dev;
	char *cdb = cmd->t_task.t_task_cdb;
	unsigned char *rbuf = cmd->t_task.t_task_buf;
	int type = dev->transport->get_device_type(dev);
	int offset = (ten) ? 8 : 4;
	int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
static int
target_emulate_request_sense(struct se_cmd *cmd)
{
	unsigned char *cdb = cmd->t_task->t_task_cdb;
	unsigned char *buf = cmd->t_task->t_task_buf;
	unsigned char *cdb = cmd->t_task.t_task_cdb;
	unsigned char *buf = cmd->t_task.t_task_buf;
	u8 ua_asc = 0, ua_ascq = 0;

	if (cdb[1] & 0x01) {
@@ -964,9 +964,9 @@ static int
target_emulate_unmap(struct se_task *task)
{
	struct se_cmd *cmd = task->task_se_cmd;
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
	struct se_device *dev = cmd->se_dev;
	unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
	sector_t lba;
	unsigned int size = cmd->data_length, range;
	int ret, offset;
@@ -1011,8 +1011,8 @@ static int
target_emulate_write_same(struct se_task *task)
{
	struct se_cmd *cmd = task->task_se_cmd;
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	sector_t lba = cmd->t_task->t_task_lba;
	struct se_device *dev = cmd->se_dev;
	sector_t lba = cmd->t_task.t_task_lba;
	unsigned int range;
	int ret;

@@ -1036,11 +1036,11 @@ int
transport_emulate_control_cdb(struct se_task *task)
{
	struct se_cmd *cmd = task->task_se_cmd;
	struct se_device *dev = cmd->se_lun->lun_se_dev;
	struct se_device *dev = cmd->se_dev;
	unsigned short service_action;
	int ret = 0;

	switch (cmd->t_task->t_task_cdb[0]) {
	switch (cmd->t_task.t_task_cdb[0]) {
	case INQUIRY:
		ret = target_emulate_inquiry(cmd);
		break;
@@ -1054,13 +1054,13 @@ transport_emulate_control_cdb(struct se_task *task)
		ret = target_emulate_modesense(cmd, 1);
		break;
	case SERVICE_ACTION_IN:
		switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
		switch (cmd->t_task.t_task_cdb[1] & 0x1f) {
		case SAI_READ_CAPACITY_16:
			ret = target_emulate_readcapacity_16(cmd);
			break;
		default:
			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
				cmd->t_task->t_task_cdb[1] & 0x1f);
				cmd->t_task.t_task_cdb[1] & 0x1f);
			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
		}
		break;
@@ -1085,7 +1085,7 @@ transport_emulate_control_cdb(struct se_task *task)
		break;
	case VARIABLE_LENGTH_CMD:
		service_action =
			get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
			get_unaligned_be16(&cmd->t_task.t_task_cdb[8]);
		switch (service_action) {
		case WRITE_SAME_32:
			if (!dev->transport->do_discard) {
@@ -1124,7 +1124,7 @@ transport_emulate_control_cdb(struct se_task *task)
		break;
	default:
		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
			cmd->t_task->t_task_cdb[0], dev->transport->name);
			cmd->t_task.t_task_cdb[0], dev->transport->name);
		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
	}

+2 −2
Original line number Diff line number Diff line
@@ -2037,7 +2037,7 @@ static ssize_t target_core_dev_show(struct config_item *item,
	if (!(tc_attr->show))
		return -EINVAL;

	return tc_attr->show((void *)se_dev, page);
	return tc_attr->show(se_dev, page);
}

static ssize_t target_core_dev_store(struct config_item *item,
@@ -2053,7 +2053,7 @@ static ssize_t target_core_dev_store(struct config_item *item,
	if (!(tc_attr->store))
		return -EINVAL;

	return tc_attr->store((void *)se_dev, page, count);
	return tc_attr->store(se_dev, page, count);
}

static struct configfs_item_operations target_core_dev_item_ops = {
+78 −89
Original line number Diff line number Diff line
@@ -59,15 +59,12 @@ static struct se_subsystem_dev *lun0_su_dev;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

int transport_get_lun_for_cmd(
	struct se_cmd *se_cmd,
	u32 unpacked_lun)
int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
	struct se_session *se_sess = se_cmd->se_sess;
	struct se_device *dev;
	unsigned long flags;
	int read_only = 0;

	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
@@ -76,46 +73,42 @@ int transport_get_lun_for_cmd(
	}

	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
	deve = se_cmd->se_deve =
			&se_sess->se_node_acl->device_list[unpacked_lun];
	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
		if (se_cmd) {
	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
		struct se_dev_entry *deve = se_cmd->se_deve;

		deve->total_cmds++;
		deve->total_bytes += se_cmd->data_length;

			if (se_cmd->data_direction == DMA_TO_DEVICE) {
				if (deve->lun_flags &
						TRANSPORT_LUNFLAGS_READ_ONLY) {
					read_only = 1;
					goto out;
		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
				" Access for 0x%08x\n",
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
			spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
			return -EACCES;
		}

		if (se_cmd->data_direction == DMA_TO_DEVICE)
			deve->write_bytes += se_cmd->data_length;
			} else if (se_cmd->data_direction ==
				   DMA_FROM_DEVICE) {
		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
			deve->read_bytes += se_cmd->data_length;
			}
		}

		deve->deve_cmds++;

		se_lun = se_cmd->se_lun = deve->se_lun;
		se_lun = deve->se_lun;
		se_cmd->se_lun = deve->se_lun;
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
	}
out:
	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);

	if (!se_lun) {
		if (read_only) {
			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
				" Access for 0x%08x\n",
				se_cmd->se_tfo->get_fabric_name(),
				unpacked_lun);
			return -EACCES;
		} else {
		/*
		 * Use the se_portal_group->tpg_virt_lun0 to allow for
		 * REPORT_LUNS, et al to be returned when no active
@@ -139,28 +132,28 @@ out:
			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
			return -EACCES;
		}
#if 0
			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
				se_cmd->se_tfo->get_fabric_name());
#endif
			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;

		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
		se_cmd->orig_fe_lun = 0;
		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
	}
	}
	/*
	 * Determine if the struct se_lun is online.
	 * FIXME: Check for LUN_RESET + UNIT Attention
	 */
/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		return -ENODEV;
	}

	{
	struct se_device *dev = se_lun->lun_se_dev;
	/* Directly associate cmd with se_dev */
	se_cmd->se_dev = se_lun->lun_se_dev;

	/* TODO: get rid of this and use atomics for stats */
	dev = se_lun->lun_se_dev;
	spin_lock_irq(&dev->stats_lock);
	dev->num_cmds++;
	if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -168,30 +161,22 @@ out:
	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
		dev->read_bytes += se_cmd->data_length;
	spin_unlock_irq(&dev->stats_lock);
	}

	/*
	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
	 * for tracking state of struct se_cmds during LUN shutdown events.
	 */
	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
	atomic_set(&se_cmd->t_task->transport_lun_active, 1);
#if 0
	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
		se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun);
#endif
	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);

	return 0;
}
EXPORT_SYMBOL(transport_get_lun_for_cmd);
EXPORT_SYMBOL(transport_lookup_cmd_lun);

int transport_get_lun_for_tmr(
	struct se_cmd *se_cmd,
	u32 unpacked_lun)
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
	struct se_device *dev = NULL;
	struct se_dev_entry *deve;
	struct se_lun *se_lun = NULL;
	struct se_session *se_sess = se_cmd->se_sess;
@@ -204,15 +189,16 @@ int transport_get_lun_for_tmr(
	}

	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
	deve = se_cmd->se_deve =
			&se_sess->se_node_acl->device_list[unpacked_lun];
	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
	deve = se_cmd->se_deve;

	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
		dev = se_lun->lun_se_dev;
		se_tmr->tmr_lun = deve->se_lun;
		se_cmd->se_lun = deve->se_lun;
		se_lun = deve->se_lun;
		se_cmd->pr_res_key = deve->pr_res_key;
		se_cmd->orig_fe_lun = unpacked_lun;
		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
/*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
		se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
	}
	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);

@@ -226,21 +212,24 @@ int transport_get_lun_for_tmr(
	}
	/*
	 * Determine if the struct se_lun is online.
	 * FIXME: Check for LUN_RESET + UNIT Attention
	 */
/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
		return -ENODEV;
	}
	se_tmr->tmr_dev = dev;

	spin_lock(&dev->se_tmr_lock);
	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
	spin_unlock(&dev->se_tmr_lock);
	/* Directly associate cmd with se_dev */
	se_cmd->se_dev = se_lun->lun_se_dev;
	se_tmr->tmr_dev = se_lun->lun_se_dev;

	spin_lock(&se_tmr->tmr_dev->se_tmr_lock);
	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
	spin_unlock(&se_tmr->tmr_dev->se_tmr_lock);

	return 0;
}
EXPORT_SYMBOL(transport_get_lun_for_tmr);
EXPORT_SYMBOL(transport_lookup_tmr_lun);

/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
@@ -667,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
	struct se_lun *se_lun;
	struct se_session *se_sess = se_cmd->se_sess;
	struct se_task *se_task;
	unsigned char *buf = se_cmd->t_task->t_task_buf;
	unsigned char *buf = se_cmd->t_task.t_task_buf;
	u32 cdb_offset = 0, lun_count = 0, offset = 8, i;

	list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list)
	list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
		break;

	if (!(se_task)) {
Loading