Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4be0ed42 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch '3.1-rc-fixes' of git://linux-iscsi.org/target-pending

* '3.1-rc-fixes' of git://linux-iscsi.org/target-pending:
  iscsi-target: Fix sendpage breakage with proper padding+DataDigest iovec offsets
  iscsi-target: Disable markers + remove dangerous local scope array usage
  target: Skip non hex characters for VPD=0x83 NAA IEEE Registered Extended
  tcm_fc: Work queue based approach instead of managing own thread and event based mechanism
  tcm_fc: Invalidation of DDP context for FCoE target in error conditions
  target: Fix race between multiple invocations of target_qf_do_work()
parents 279b1e0f 40b05497
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
	u8 DataSequenceInOrder = 0;
	u8 DataSequenceInOrder = 0;
	u8 ErrorRecoveryLevel = 0, SessionType = 0;
	u8 ErrorRecoveryLevel = 0, SessionType = 0;
	u8 IFMarker = 0, OFMarker = 0;
	u8 IFMarker = 0, OFMarker = 0;
	u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
	u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
	u32 FirstBurstLength = 0, MaxBurstLength = 0;
	u32 FirstBurstLength = 0, MaxBurstLength = 0;
	struct iscsi_param *param = NULL;
	struct iscsi_param *param = NULL;


+21 −249
Original line number Original line Diff line number Diff line
@@ -874,40 +874,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
	spin_unlock_bh(&sess->session_usage_lock);
	spin_unlock_bh(&sess->session_usage_lock);
}
}


/*
 *	Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
 *	array counts needed for sync and steering.
 */
static int iscsit_determine_sync_and_steering_counts(
	struct iscsi_conn *conn,
	struct iscsi_data_count *count)
{
	u32 length = count->data_length;
	u32 marker, markint;

	count->sync_and_steering = 1;

	marker = (count->type == ISCSI_RX_DATA) ?
			conn->of_marker : conn->if_marker;
	markint = (count->type == ISCSI_RX_DATA) ?
			(conn->conn_ops->OFMarkInt * 4) :
			(conn->conn_ops->IFMarkInt * 4);
	count->ss_iov_count = count->iov_count;

	while (length > 0) {
		if (length >= marker) {
			count->ss_iov_count += 3;
			count->ss_marker_count += 2;

			length -= marker;
			marker = markint;
		} else
			length = 0;
	}

	return 0;
}

/*
/*
 *	Setup conn->if_marker and conn->of_marker values based upon
 *	Setup conn->if_marker and conn->of_marker values based upon
 *	the initial marker-less interval. (see iSCSI v19 A.2)
 *	the initial marker-less interval. (see iSCSI v19 A.2)
@@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
	struct kvec iov;
	struct kvec iov;
	u32 tx_hdr_size, data_len;
	u32 tx_hdr_size, data_len;
	u32 offset = cmd->first_data_sg_off;
	u32 offset = cmd->first_data_sg_off;
	int tx_sent;
	int tx_sent, iov_off;


send_hdr:
send_hdr:
	tx_hdr_size = ISCSI_HDR_LEN;
	tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1276,19 @@ int iscsit_fe_sendpage_sg(
	}
	}


	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
	if (conn->conn_ops->DataDigest)
	/*
	 * Set iov_off used by padding and data digest tx_data() calls below
	 * in order to determine proper offset into cmd->iov_data[]
	 */
	if (conn->conn_ops->DataDigest) {
		data_len -= ISCSI_CRC_LEN;
		data_len -= ISCSI_CRC_LEN;

		if (cmd->padding)
			iov_off = (cmd->iov_data_count - 2);
		else
			iov_off = (cmd->iov_data_count - 1);
	} else {
		iov_off = (cmd->iov_data_count - 1);
	}
	/*
	/*
	 * Perform sendpage() for each page in the scatterlist
	 * Perform sendpage() for each page in the scatterlist
	 */
	 */
@@ -1341,8 +1317,7 @@ int iscsit_fe_sendpage_sg(


send_padding:
send_padding:
	if (cmd->padding) {
	if (cmd->padding) {
		struct kvec *iov_p =
		struct kvec *iov_p = &cmd->iov_data[iov_off++];
			&cmd->iov_data[cmd->iov_data_count-1];


		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
		if (cmd->padding != tx_sent) {
		if (cmd->padding != tx_sent) {
@@ -1356,8 +1331,7 @@ int iscsit_fe_sendpage_sg(


send_datacrc:
send_datacrc:
	if (conn->conn_ops->DataDigest) {
	if (conn->conn_ops->DataDigest) {
		struct kvec *iov_d =
		struct kvec *iov_d = &cmd->iov_data[iov_off];
			&cmd->iov_data[cmd->iov_data_count];


		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
		if (ISCSI_CRC_LEN != tx_sent) {
		if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
	struct iscsi_data_count *count)
	struct iscsi_data_count *count)
{
{
	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
	u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
	struct kvec *iov_p;
	struct kvec iov[count->ss_iov_count], *iov_p;
	struct msghdr msg;
	struct msghdr msg;


	if (!conn || !conn->sock || !conn->conn_ops)
	if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(


	memset(&msg, 0, sizeof(struct msghdr));
	memset(&msg, 0, sizeof(struct msghdr));


	if (count->sync_and_steering) {
		int size = 0;
		u32 i, orig_iov_count = 0;
		u32 orig_iov_len = 0, orig_iov_loc = 0;
		u32 iov_count = 0, per_iov_bytes = 0;
		u32 *rx_marker, old_rx_marker = 0;
		struct kvec *iov_record;

		memset(&rx_marker_val, 0,
				count->ss_marker_count * sizeof(u32));
		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));

		iov_record = count->iov;
		orig_iov_count = count->iov_count;
		rx_marker = &conn->of_marker;

		i = 0;
		size = data;
		orig_iov_len = iov_record[orig_iov_loc].iov_len;
		while (size > 0) {
			pr_debug("rx_data: #1 orig_iov_len %u,"
			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
			pr_debug("rx_data: #2 rx_marker %u, size"
				" %u\n", *rx_marker, size);

			if (orig_iov_len >= *rx_marker) {
				iov[iov_count].iov_len = *rx_marker;
				iov[iov_count++].iov_base =
					(iov_record[orig_iov_loc].iov_base +
						per_iov_bytes);

				iov[iov_count].iov_len = (MARKER_SIZE / 2);
				iov[iov_count++].iov_base =
					&rx_marker_val[rx_marker_iov++];
				iov[iov_count].iov_len = (MARKER_SIZE / 2);
				iov[iov_count++].iov_base =
					&rx_marker_val[rx_marker_iov++];
				old_rx_marker = *rx_marker;

				/*
				 * OFMarkInt is in 32-bit words.
				 */
				*rx_marker = (conn->conn_ops->OFMarkInt * 4);
				size -= old_rx_marker;
				orig_iov_len -= old_rx_marker;
				per_iov_bytes += old_rx_marker;

				pr_debug("rx_data: #3 new_rx_marker"
					" %u, size %u\n", *rx_marker, size);
			} else {
				iov[iov_count].iov_len = orig_iov_len;
				iov[iov_count++].iov_base =
					(iov_record[orig_iov_loc].iov_base +
						per_iov_bytes);

				per_iov_bytes = 0;
				*rx_marker -= orig_iov_len;
				size -= orig_iov_len;

				if (size)
					orig_iov_len =
					iov_record[++orig_iov_loc].iov_len;

				pr_debug("rx_data: #4 new_rx_marker"
					" %u, size %u\n", *rx_marker, size);
			}
		}
		data += (rx_marker_iov * (MARKER_SIZE / 2));

		iov_p	= &iov[0];
		iov_len	= iov_count;

		if (iov_count > count->ss_iov_count) {
			pr_err("iov_count: %d, count->ss_iov_count:"
				" %d\n", iov_count, count->ss_iov_count);
			return -1;
		}
		if (rx_marker_iov > count->ss_marker_count) {
			pr_err("rx_marker_iov: %d, count->ss_marker"
				"_count: %d\n", rx_marker_iov,
				count->ss_marker_count);
			return -1;
		}
	} else {
	iov_p = count->iov;
	iov_p = count->iov;
	iov_len	= count->iov_count;
	iov_len	= count->iov_count;
	}


	while (total_rx < data) {
	while (total_rx < data) {
		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
				rx_loop, total_rx, data);
				rx_loop, total_rx, data);
	}
	}


	if (count->sync_and_steering) {
		int j;
		for (j = 0; j < rx_marker_iov; j++) {
			pr_debug("rx_data: #5 j: %d, offset: %d\n",
				j, rx_marker_val[j]);
			conn->of_marker_offset = rx_marker_val[j];
		}
		total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
	}

	return total_rx;
	return total_rx;
}
}


@@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
	struct iscsi_data_count *count)
	struct iscsi_data_count *count)
{
{
	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
	u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
	struct kvec *iov_p;
	struct kvec iov[count->ss_iov_count], *iov_p;
	struct msghdr msg;
	struct msghdr msg;


	if (!conn || !conn->sock || !conn->conn_ops)
	if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(


	memset(&msg, 0, sizeof(struct msghdr));
	memset(&msg, 0, sizeof(struct msghdr));


	if (count->sync_and_steering) {
		int size = 0;
		u32 i, orig_iov_count = 0;
		u32 orig_iov_len = 0, orig_iov_loc = 0;
		u32 iov_count = 0, per_iov_bytes = 0;
		u32 *tx_marker, old_tx_marker = 0;
		struct kvec *iov_record;

		memset(&tx_marker_val, 0,
			count->ss_marker_count * sizeof(u32));
		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));

		iov_record = count->iov;
		orig_iov_count = count->iov_count;
		tx_marker = &conn->if_marker;

		i = 0;
		size = data;
		orig_iov_len = iov_record[orig_iov_loc].iov_len;
		while (size > 0) {
			pr_debug("tx_data: #1 orig_iov_len %u,"
			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
			pr_debug("tx_data: #2 tx_marker %u, size"
				" %u\n", *tx_marker, size);

			if (orig_iov_len >= *tx_marker) {
				iov[iov_count].iov_len = *tx_marker;
				iov[iov_count++].iov_base =
					(iov_record[orig_iov_loc].iov_base +
						per_iov_bytes);

				tx_marker_val[tx_marker_iov] =
						(size - *tx_marker);
				iov[iov_count].iov_len = (MARKER_SIZE / 2);
				iov[iov_count++].iov_base =
					&tx_marker_val[tx_marker_iov++];
				iov[iov_count].iov_len = (MARKER_SIZE / 2);
				iov[iov_count++].iov_base =
					&tx_marker_val[tx_marker_iov++];
				old_tx_marker = *tx_marker;

				/*
				 * IFMarkInt is in 32-bit words.
				 */
				*tx_marker = (conn->conn_ops->IFMarkInt * 4);
				size -= old_tx_marker;
				orig_iov_len -= old_tx_marker;
				per_iov_bytes += old_tx_marker;

				pr_debug("tx_data: #3 new_tx_marker"
					" %u, size %u\n", *tx_marker, size);
				pr_debug("tx_data: #4 offset %u\n",
					tx_marker_val[tx_marker_iov-1]);
			} else {
				iov[iov_count].iov_len = orig_iov_len;
				iov[iov_count++].iov_base
					= (iov_record[orig_iov_loc].iov_base +
						per_iov_bytes);

				per_iov_bytes = 0;
				*tx_marker -= orig_iov_len;
				size -= orig_iov_len;

				if (size)
					orig_iov_len =
					iov_record[++orig_iov_loc].iov_len;

				pr_debug("tx_data: #5 new_tx_marker"
					" %u, size %u\n", *tx_marker, size);
			}
		}

		data += (tx_marker_iov * (MARKER_SIZE / 2));

		iov_p = &iov[0];
		iov_len = iov_count;

		if (iov_count > count->ss_iov_count) {
			pr_err("iov_count: %d, count->ss_iov_count:"
				" %d\n", iov_count, count->ss_iov_count);
			return -1;
		}
		if (tx_marker_iov > count->ss_marker_count) {
			pr_err("tx_marker_iov: %d, count->ss_marker"
				"_count: %d\n", tx_marker_iov,
				count->ss_marker_count);
			return -1;
		}
	} else {
	iov_p = count->iov;
	iov_p = count->iov;
	iov_len = count->iov_count;
	iov_len = count->iov_count;
	}


	while (total_tx < data) {
	while (total_tx < data) {
		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
					tx_loop, total_tx, data);
					tx_loop, total_tx, data);
	}
	}


	if (count->sync_and_steering)
		total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));

	return total_tx;
	return total_tx;
}
}


@@ -1702,12 +1486,6 @@ int rx_data(
	c.data_length = data;
	c.data_length = data;
	c.type = ISCSI_RX_DATA;
	c.type = ISCSI_RX_DATA;


	if (conn->conn_ops->OFMarker &&
	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
			return -1;
	}

	return iscsit_do_rx_data(conn, &c);
	return iscsit_do_rx_data(conn, &c);
}
}


@@ -1728,12 +1506,6 @@ int tx_data(
	c.data_length = data;
	c.data_length = data;
	c.type = ISCSI_TX_DATA;
	c.type = ISCSI_TX_DATA;


	if (conn->conn_ops->IFMarker &&
	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
			return -1;
	}

	return iscsit_do_tx_data(conn, &c);
	return iscsit_do_tx_data(conn, &c);
}
}


+33 −2
Original line number Original line Diff line number Diff line
@@ -24,6 +24,7 @@
 */
 */


#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi.h>


@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
	return 0;
	return 0;
}
}


static void
target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
{
	unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
	unsigned char *buf = buf_off;
	int cnt = 0, next = 1;
	/*
	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
	 * per device uniqeness.
	 */
	while (*p != '\0') {
		if (cnt >= 13)
			break;
		if (!isxdigit(*p)) {
			p++;
			continue;
		}
		if (next != 0) {
			buf[cnt++] |= hex_to_bin(*p++);
			next = 0;
		} else {
			buf[cnt] = hex_to_bin(*p++) << 4;
			next = 1;
		}
	}
}

/*
/*
 * Device identification VPD, for a complete list of
 * Device identification VPD, for a complete list of
 * DESIGNATOR TYPEs see spc4r17 Table 459.
 * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
	 * VENDOR_SPECIFIC_IDENTIFIER and
	 * VENDOR_SPECIFIC_IDENTIFIER and
	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
	 */
	 */
	buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
	target_parse_naa_6h_vendor_specific(dev, &buf[off]);
	hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);


	len = 20;
	len = 20;
	off = (len + 4);
	off = (len + 4);
+4 −5
Original line number Original line Diff line number Diff line
@@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
{
{
	struct se_device *dev = container_of(work, struct se_device,
	struct se_device *dev = container_of(work, struct se_device,
					qf_work_queue);
					qf_work_queue);
	LIST_HEAD(qf_cmd_list);
	struct se_cmd *cmd, *cmd_tmp;
	struct se_cmd *cmd, *cmd_tmp;


	spin_lock_irq(&dev->qf_cmd_lock);
	spin_lock_irq(&dev->qf_cmd_lock);
	list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
	spin_unlock_irq(&dev->qf_cmd_lock);


	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
		list_del(&cmd->se_qf_node);
		list_del(&cmd->se_qf_node);
		atomic_dec(&dev->dev_qf_count);
		atomic_dec(&dev->dev_qf_count);
		smp_mb__after_atomic_dec();
		smp_mb__after_atomic_dec();
		spin_unlock_irq(&dev->qf_cmd_lock);


		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
		 * has been added to head of queue
		 * has been added to head of queue
		 */
		 */
		transport_add_cmd_to_queue(cmd, cmd->t_state);
		transport_add_cmd_to_queue(cmd, cmd->t_state);

		spin_lock_irq(&dev->qf_cmd_lock);
	}
	}
	spin_unlock_irq(&dev->qf_cmd_lock);
}
}


unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+2 −10
Original line number Original line Diff line number Diff line
@@ -98,8 +98,7 @@ struct ft_tpg {
	struct list_head list;		/* linkage in ft_lport_acl tpg_list */
	struct list_head list;		/* linkage in ft_lport_acl tpg_list */
	struct list_head lun_list;	/* head of LUNs */
	struct list_head lun_list;	/* head of LUNs */
	struct se_portal_group se_tpg;
	struct se_portal_group se_tpg;
	struct task_struct *thread;	/* processing thread */
	struct workqueue_struct *workqueue;
	struct se_queue_obj qobj;	/* queue for processing thread */
};
};


struct ft_lport_acl {
struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
	struct se_wwn fc_lport_wwn;
	struct se_wwn fc_lport_wwn;
};
};


enum ft_cmd_state {
	FC_CMD_ST_NEW = 0,
	FC_CMD_ST_REJ
};

/*
/*
 * Commands
 * Commands
 */
 */
struct ft_cmd {
struct ft_cmd {
	enum ft_cmd_state state;
	u32 lun;                        /* LUN from request */
	u32 lun;                        /* LUN from request */
	struct ft_sess *sess;		/* session held for cmd */
	struct ft_sess *sess;		/* session held for cmd */
	struct fc_seq *seq;		/* sequence in exchange mgr */
	struct fc_seq *seq;		/* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
	struct fc_frame *req_frame;
	struct fc_frame *req_frame;
	unsigned char *cdb;		/* pointer to CDB inside frame */
	unsigned char *cdb;		/* pointer to CDB inside frame */
	u32 write_data_len;		/* data received on writes */
	u32 write_data_len;		/* data received on writes */
	struct se_queue_req se_req;
	struct work_struct work;
	/* Local sense buffer */
	/* Local sense buffer */
	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
/*
/*
 * other internal functions.
 * other internal functions.
 */
 */
int ft_thread(void *);
void ft_recv_req(struct ft_sess *, struct fc_frame *);
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
Loading