Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 48efe453 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SCSI target updates from Nicholas Bellinger:
 "Lots of activity again this round for I/O performance optimizations
  (per-cpu IDA pre-allocation for vhost + iscsi/target), and the
  addition of new fabric independent features to target-core
  (COMPARE_AND_WRITE + EXTENDED_COPY).

  The main highlights include:

   - Support for iscsi-target login multiplexing across individual
     network portals
   - Generic Per-cpu IDA logic (kent + akpm + clameter)
   - Conversion of vhost to use per-cpu IDA pre-allocation for
     descriptors, SGLs and userspace page pointer list
   - Conversion of iscsi-target + iser-target to use per-cpu IDA
     pre-allocation for descriptors
   - Add support for generic COMPARE_AND_WRITE (AtomicTestandSet)
     emulation for virtual backend drivers
   - Add support for generic EXTENDED_COPY (CopyOffload) emulation for
     virtual backend drivers.
   - Add support for fast memory registration mode to iser-target (Vu)

  The patches to add COMPARE_AND_WRITE and EXTENDED_COPY support are of
  particular significance, which make us the first and only open source
  target to support the full set of VAAI primitives.

  Currently Linux clients are lacking upstream support to actually
  utilize these primitives.  However, with server side support now in
  place for folks like MKP + ZAB working on the client, this logic once
  reserved for the highest end of storage arrays, can now be run in VMs
  on their laptops"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits)
  target/iscsi: Bump versions to v4.1.0
  target: Update copyright ownership/year information to 2013
  iscsi-target: Bump default TCP listen backlog to 256
  target: Fix >= v3.9+ regression in PR APTPL + ALUA metadata write-out
  iscsi-target; Bump default CmdSN Depth to 64
  iscsi-target: Remove unnecessary wait_for_completion in iscsi_get_thread_set
  iscsi-target: Add thread_set->ts_activate_sem + use common deallocate
  iscsi-target: Fix race with thread_pre_handler flush_signals + ISCSI_THREAD_SET_DIE
  target: remove unused including <linux/version.h>
  iser-target: introduce fast memory registration mode (FRWR)
  iser-target: generalize rdma memory registration and cleanup
  iser-target: move rdma wr processing to a shared function
  target: Enable global EXTENDED_COPY setup/release
  target: Add Third Party Copy (3PC) bit in INQUIRY response
  target: Enable EXTENDED_COPY setup in spc_parse_cdb
  target: Add support for EXTENDED_COPY copy offload emulation
  target: Avoid non-existent tg_pt_gp_mem in target_alua_state_check
  target: Add global device list for EXTENDED_COPY
  target: Make helpers non static for EXTENDED_COPY command setup
  target: Make spc_parse_naa_6h_vendor_specific non static
  ...
parents ac4de954 2999ee7f
Loading
Loading
Loading
Loading
+521 −226

File changed.

Preview size limit exceeded, changes collapsed.

+24 −2
Original line number Original line Diff line number Diff line
@@ -5,6 +5,7 @@
#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm.h>


#define ISERT_RDMA_LISTEN_BACKLOG	10
#define ISERT_RDMA_LISTEN_BACKLOG	10
#define ISCSI_ISER_SG_TABLESIZE		256


enum isert_desc_type {
enum isert_desc_type {
	ISCSI_TX_CONTROL,
	ISCSI_TX_CONTROL,
@@ -45,15 +46,26 @@ struct iser_tx_desc {
	struct ib_send_wr send_wr;
	struct ib_send_wr send_wr;
} __packed;
} __packed;


struct fast_reg_descriptor {
	struct list_head	list;
	struct ib_mr		*data_mr;
	struct ib_fast_reg_page_list	*data_frpl;
	bool			valid;
};

struct isert_rdma_wr {
struct isert_rdma_wr {
	struct list_head	wr_list;
	struct list_head	wr_list;
	struct isert_cmd	*isert_cmd;
	struct isert_cmd	*isert_cmd;
	enum iser_ib_op_code	iser_ib_op;
	enum iser_ib_op_code	iser_ib_op;
	struct ib_sge		*ib_sge;
	struct ib_sge		*ib_sge;
	struct ib_sge		s_ib_sge;
	int			num_sge;
	int			num_sge;
	struct scatterlist	*sge;
	struct scatterlist	*sge;
	int			send_wr_num;
	int			send_wr_num;
	struct ib_send_wr	*send_wr;
	struct ib_send_wr	*send_wr;
	struct ib_send_wr	s_send_wr;
	u32			cur_rdma_length;
	struct fast_reg_descriptor *fr_desc;
};
};


struct isert_cmd {
struct isert_cmd {
@@ -67,8 +79,7 @@ struct isert_cmd {
	u32			write_va_off;
	u32			write_va_off;
	u32			rdma_wr_num;
	u32			rdma_wr_num;
	struct isert_conn	*conn;
	struct isert_conn	*conn;
	struct iscsi_cmd	iscsi_cmd;
	struct iscsi_cmd	*iscsi_cmd;
	struct ib_sge		*ib_sge;
	struct iser_tx_desc	tx_desc;
	struct iser_tx_desc	tx_desc;
	struct isert_rdma_wr	rdma_wr;
	struct isert_rdma_wr	rdma_wr;
	struct work_struct	comp_work;
	struct work_struct	comp_work;
@@ -106,6 +117,10 @@ struct isert_conn {
	wait_queue_head_t	conn_wait;
	wait_queue_head_t	conn_wait;
	wait_queue_head_t	conn_wait_comp_err;
	wait_queue_head_t	conn_wait_comp_err;
	struct kref		conn_kref;
	struct kref		conn_kref;
	struct list_head	conn_frwr_pool;
	int			conn_frwr_pool_size;
	/* lock to protect frwr_pool */
	spinlock_t		conn_lock;
};
};


#define ISERT_MAX_CQ 64
#define ISERT_MAX_CQ 64
@@ -118,6 +133,7 @@ struct isert_cq_desc {
};
};


struct isert_device {
struct isert_device {
	int			use_frwr;
	int			cqs_used;
	int			cqs_used;
	int			refcount;
	int			refcount;
	int			cq_active_qps[ISERT_MAX_CQ];
	int			cq_active_qps[ISERT_MAX_CQ];
@@ -128,6 +144,12 @@ struct isert_device {
	struct ib_cq		*dev_tx_cq[ISERT_MAX_CQ];
	struct ib_cq		*dev_tx_cq[ISERT_MAX_CQ];
	struct isert_cq_desc	*cq_desc;
	struct isert_cq_desc	*cq_desc;
	struct list_head	dev_node;
	struct list_head	dev_node;
	struct ib_device_attr	dev_attr;
	int			(*reg_rdma_mem)(struct iscsi_conn *conn,
						    struct iscsi_cmd *cmd,
						    struct isert_rdma_wr *wr);
	void			(*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
						  struct isert_conn *isert_conn);
};
};


struct isert_np {
struct isert_np {
+1 −1
Original line number Original line Diff line number Diff line
@@ -10,7 +10,7 @@
 *
 *
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *  Forward port and refactoring to modern qla2xxx and target/configfs
 *
 *
 *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
 *
 *
 *  This program is free software; you can redistribute it and/or
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  modify it under the terms of the GNU General Public License
+24 −35
Original line number Original line Diff line number Diff line
@@ -2,12 +2,9 @@
 * This file contains tcm implementation using v4 configfs fabric infrastructure
 * This file contains tcm implementation using v4 configfs fabric infrastructure
 * for QLogic target mode HBAs
 * for QLogic target mode HBAs
 *
 *
 * ?? Copyright 2010-2011 RisingTide Systems LLC.
 * (c) Copyright 2010-2013 Datera, Inc.
 *
 *
 * Licensed to the Linux Foundation under the General Public License (GPL)
 * Author: Nicholas A. Bellinger <nab@daterainc.com>
 * version 2.
 *
 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
 *
 *
 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
 * the TCM_FC / Open-FCoE.org fabric module.
 * the TCM_FC / Open-FCoE.org fabric module.
@@ -360,6 +357,14 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
	return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
	return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
}
}


static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
{
	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
				struct tcm_qla2xxx_tpg, se_tpg);

	return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
}

static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
	struct se_portal_group *se_tpg)
	struct se_portal_group *se_tpg)
{
{
@@ -489,38 +494,13 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
	return 0;
	return 0;
}
}


/*
 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
 * that data is coming from the target (eg handling a READ).  However,
 * this is just the opposite of what we have to tell the DMA mapping
 * layer -- eg when handling a READ, the HBA will have to DMA the data
 * out of memory so it can send it to the initiator, which means we
 * need to use DMA_TO_DEVICE when we map the data.
 */
static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
{
	if (se_cmd->se_cmd_flags & SCF_BIDI)
		return DMA_BIDIRECTIONAL;

	switch (se_cmd->data_direction) {
	case DMA_TO_DEVICE:
		return DMA_FROM_DEVICE;
	case DMA_FROM_DEVICE:
		return DMA_TO_DEVICE;
	case DMA_NONE:
	default:
		return DMA_NONE;
	}
}

static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
{
	struct qla_tgt_cmd *cmd = container_of(se_cmd,
	struct qla_tgt_cmd *cmd = container_of(se_cmd,
				struct qla_tgt_cmd, se_cmd);
				struct qla_tgt_cmd, se_cmd);


	cmd->bufflen = se_cmd->data_length;
	cmd->bufflen = se_cmd->data_length;
	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);


	cmd->sg_cnt = se_cmd->t_data_nents;
	cmd->sg_cnt = se_cmd->t_data_nents;
	cmd->sg = se_cmd->t_data_sg;
	cmd->sg = se_cmd->t_data_sg;
@@ -656,7 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
				struct qla_tgt_cmd, se_cmd);
				struct qla_tgt_cmd, se_cmd);


	cmd->bufflen = se_cmd->data_length;
	cmd->bufflen = se_cmd->data_length;
	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);


	cmd->sg_cnt = se_cmd->t_data_nents;
	cmd->sg_cnt = se_cmd->t_data_nents;
@@ -680,7 +660,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
	cmd->sg = NULL;
	cmd->sg = NULL;
	cmd->sg_cnt = 0;
	cmd->sg_cnt = 0;
	cmd->offset = 0;
	cmd->offset = 0;
	cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);


	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -939,11 +919,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);


/*
 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
 */
DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);

static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
	&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
	&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
	&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
	&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
	&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
	&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
	&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
	&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
	&tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
	NULL,
	NULL,
};
};


@@ -1042,6 +1030,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
	QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
	QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
	QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
	QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
	QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
	QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
	QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;


	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1736,7 +1725,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
					tcm_qla2xxx_check_demo_write_protect,
					tcm_qla2xxx_check_demo_write_protect,
	.tpg_check_prod_mode_write_protect =
	.tpg_check_prod_mode_write_protect =
					tcm_qla2xxx_check_prod_write_protect,
					tcm_qla2xxx_check_prod_write_protect,
	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
@@ -1784,7 +1773,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true,
	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true,
	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_true,
	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_demo_mode_login_only,
	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+1 −0
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@ struct tcm_qla2xxx_tpg_attrib {
	int cache_dynamic_acls;
	int cache_dynamic_acls;
	int demo_mode_write_protect;
	int demo_mode_write_protect;
	int prod_mode_write_protect;
	int prod_mode_write_protect;
	int demo_mode_login_only;
};
};


struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg {
Loading