Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 312e0c24 authored by Dan Williams's avatar Dan Williams
Browse files

isci: unify can_queue tracking on the tci_pool, uplevel tag assignment



The tci_pool tracks our outstanding command slots which are also the 'index'
portion of our tags.  Grabbing the tag early in ->lldd_execute_task let's us
drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure.
->was_tag_assigned_by_user required the task context to be duplicated in
request-local buffer.  With the tci established early we can build the
task_context directly into its final location and skip a memcpy.

With the task context buffer at a known address at request construction we
have the opportunity/obligation to also fix sgl handling.  This rework feels
like it belongs in another patch but the sgl handling and task_context are too
intertwined.
1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in
   the task context (previously we were prematurely linking to the staging
   buffer).
2/ fix the broken iteration of pio sgls that assumes all sgls are relative to
   the request, and does a dangerous looking reverse lookup of physical
   address to virtual address.

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 9274f45e
Loading
Loading
Loading
Loading
+64 −201
Original line number Original line Diff line number Diff line
@@ -1018,33 +1018,11 @@ done:
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
	spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
}


static void isci_tci_free(struct isci_host *ihost, u16 tci)
{
	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);

	ihost->tci_pool[tail] = tci;
	ihost->tci_tail = tail + 1;
}

static u16 isci_tci_alloc(struct isci_host *ihost)
{
	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
	u16 tci = ihost->tci_pool[head];

	ihost->tci_head = head + 1;
	return tci;
}

static u16 isci_tci_active(struct isci_host *ihost)
static u16 isci_tci_active(struct isci_host *ihost)
{
{
	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}
}


static u16 isci_tci_space(struct isci_host *ihost)
{
	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
					     u32 timeout)
					     u32 timeout)
{
{
@@ -1205,6 +1183,11 @@ static void isci_host_completion_routine(unsigned long data)
				task->task_done(task);
				task->task_done(task);
			}
			}
		}
		}

		spin_lock_irq(&isci_host->scic_lock);
		isci_free_tag(isci_host, request->sci.io_tag);
		spin_unlock_irq(&isci_host->scic_lock);

		/* Free the request object. */
		/* Free the request object. */
		isci_request_free(isci_host, request);
		isci_request_free(isci_host, request);
	}
	}
@@ -1242,6 +1225,7 @@ static void isci_host_completion_routine(unsigned long data)
			* of pending requests.
			* of pending requests.
			*/
			*/
			list_del_init(&request->dev_node);
			list_del_init(&request->dev_node);
			isci_free_tag(isci_host, request->sci.io_tag);
			spin_unlock_irq(&isci_host->scic_lock);
			spin_unlock_irq(&isci_host->scic_lock);


			/* Free the request object. */
			/* Free the request object. */
@@ -2375,6 +2359,7 @@ static int scic_controller_mem_init(struct scic_sds_controller *scic)
	if (!scic->task_context_table)
	if (!scic->task_context_table)
		return -ENOMEM;
		return -ENOMEM;


	scic->task_context_dma = dma;
	writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower);
	writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower);
	writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper);
	writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper);


@@ -2409,11 +2394,9 @@ int isci_host_init(struct isci_host *isci_host)


	spin_lock_init(&isci_host->state_lock);
	spin_lock_init(&isci_host->state_lock);
	spin_lock_init(&isci_host->scic_lock);
	spin_lock_init(&isci_host->scic_lock);
	spin_lock_init(&isci_host->queue_lock);
	init_waitqueue_head(&isci_host->eventq);
	init_waitqueue_head(&isci_host->eventq);


	isci_host_change_state(isci_host, isci_starting);
	isci_host_change_state(isci_host, isci_starting);
	isci_host->can_queue = ISCI_CAN_QUEUE_VAL;


	status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
	status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
					   smu_base(isci_host));
					   smu_base(isci_host));
@@ -2611,51 +2594,6 @@ void scic_sds_controller_post_request(
	writel(request, &scic->smu_registers->post_context_port);
	writel(request, &scic->smu_registers->post_context_port);
}
}


/**
 * This method will copy the soft copy of the task context into the physical
 *    memory accessible by the controller.
 * @scic: This parameter specifies the controller for which to copy
 *    the task context.
 * @sci_req: This parameter specifies the request for which the task
 *    context is being copied.
 *
 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
 * the physical memory version of the task context. Thus, all subsequent
 * updates to the task context are performed in the TC table (i.e. DMAable
 * memory). none
 */
void scic_sds_controller_copy_task_context(
	struct scic_sds_controller *scic,
	struct scic_sds_request *sci_req)
{
	struct scu_task_context *task_context_buffer;

	task_context_buffer = scic_sds_controller_get_task_context_buffer(
		scic, sci_req->io_tag);

	memcpy(task_context_buffer,
	       sci_req->task_context_buffer,
	       offsetof(struct scu_task_context, sgl_snapshot_ac));

	/*
	 * Now that the soft copy of the TC has been copied into the TC
	 * table accessible by the silicon.  Thus, any further changes to
	 * the TC (e.g. TC termination) occur in the appropriate location. */
	sci_req->task_context_buffer = task_context_buffer;
}

struct scu_task_context *scic_sds_controller_get_task_context_buffer(struct scic_sds_controller *scic,
								     u16 io_tag)
{
	u16 tci = ISCI_TAG_TCI(io_tag);

	if (tci < scic->task_context_entries) {
		return &scic->task_context_table[tci];
	}

	return NULL;
}

struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
{
{
	u16 task_index;
	u16 task_index;
@@ -2801,6 +2739,60 @@ void scic_sds_controller_release_frame(
			&scic->scu_registers->sdma.unsolicited_frame_get_pointer);
			&scic->scu_registers->sdma.unsolicited_frame_get_pointer);
}
}


void isci_tci_free(struct isci_host *ihost, u16 tci)
{
	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);

	ihost->tci_pool[tail] = tci;
	ihost->tci_tail = tail + 1;
}

static u16 isci_tci_alloc(struct isci_host *ihost)
{
	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
	u16 tci = ihost->tci_pool[head];

	ihost->tci_head = head + 1;
	return tci;
}

static u16 isci_tci_space(struct isci_host *ihost)
{
	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}

u16 isci_alloc_tag(struct isci_host *ihost)
{
	if (isci_tci_space(ihost)) {
		u16 tci = isci_tci_alloc(ihost);
		u8 seq = ihost->sci.io_request_sequence[tci];

		return ISCI_TAG(seq, tci);
	}

	return SCI_CONTROLLER_INVALID_IO_TAG;
}

enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
{
	struct scic_sds_controller *scic = &ihost->sci;
	u16 tci = ISCI_TAG_TCI(io_tag);
	u16 seq = ISCI_TAG_SEQ(io_tag);

	/* prevent tail from passing head */
	if (isci_tci_active(ihost) == 0)
		return SCI_FAILURE_INVALID_IO_TAG;

	if (seq == scic->io_request_sequence[tci]) {
		scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);

		isci_tci_free(ihost, tci);

		return SCI_SUCCESS;
	}
	return SCI_FAILURE_INVALID_IO_TAG;
}

/**
/**
 * scic_controller_start_io() - This method is called by the SCI user to
 * scic_controller_start_io() - This method is called by the SCI user to
 *    send/start an IO request. If the method invocation is successful, then
 *    send/start an IO request. If the method invocation is successful, then
@@ -2811,27 +2803,11 @@ void scic_sds_controller_release_frame(
 *    IO request.
 *    IO request.
 * @io_request: the handle to the io request object to start.
 * @io_request: the handle to the io request object to start.
 * @io_tag: This parameter specifies a previously allocated IO tag that the
 * @io_tag: This parameter specifies a previously allocated IO tag that the
 *    user desires to be utilized for this request. This parameter is optional.
 *    user desires to be utilized for this request.
 *     The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
 *    for this parameter.
 *
 * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
 * to ensure that each of the methods that may allocate or free available IO
 * tags are handled in a mutually exclusive manner.  This method is one of said
 * methods requiring proper critical code section protection (e.g. semaphore,
 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags.  As a
 * result, it is expected the user will have set the NCQ tag field in the host
 * to device register FIS prior to calling this method.  There is also a
 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
 * more information on allocating a tag. Indicate if the controller
 * successfully started the IO request. SCI_SUCCESS if the IO request was
 * successfully started. Determine the failure situations and return values.
 */
 */
enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
					 struct scic_sds_remote_device *rdev,
					 struct scic_sds_remote_device *rdev,
					 struct scic_sds_request *req,
					 struct scic_sds_request *req)
					 u16 io_tag)
{
{
	enum sci_status status;
	enum sci_status status;


@@ -2902,17 +2878,6 @@ enum sci_status scic_controller_terminate_request(
 * @remote_device: The handle to the remote device object for which to complete
 * @remote_device: The handle to the remote device object for which to complete
 *    the IO request.
 *    the IO request.
 * @io_request: the handle to the io request object to complete.
 * @io_request: the handle to the io request object to complete.
 *
 * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
 * to ensure that each of the methods that may allocate or free available IO
 * tags are handled in a mutually exclusive manner.  This method is one of said
 * methods requiring proper critical code section protection (e.g. semaphore,
 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
 * Core user, using the scic_controller_allocate_io_tag() method, then it is
 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
 * method to free the tag (i.e. this method will not free the IO tag). Indicate
 * if the controller successfully completed the IO request. SCI_SUCCESS if the
 * completion process was successful.
 */
 */
enum sci_status scic_controller_complete_io(
enum sci_status scic_controller_complete_io(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
@@ -2963,31 +2928,11 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
 * @remote_device: the handle to the remote device object for which to start
 * @remote_device: the handle to the remote device object for which to start
 *    the task management request.
 *    the task management request.
 * @task_request: the handle to the task request object to start.
 * @task_request: the handle to the task request object to start.
 * @io_tag: This parameter specifies a previously allocated IO tag that the
 *    user desires to be utilized for this request.  Note this not the io_tag
 *    of the request being managed.  It is to be utilized for the task request
 *    itself. This parameter is optional.  The user is allowed to supply
 *    SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
 *
 * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
 * to ensure that each of the methods that may allocate or free available IO
 * tags are handled in a mutually exclusive manner.  This method is one of said
 * methods requiring proper critical code section protection (e.g. semaphore,
 * spin-lock, etc.). - The user must synchronize this task with completion
 * queue processing.  If they are not synchronized then it is possible for the
 * io requests that are being managed by the task request can complete before
 * starting the task request. scic_controller_allocate_tag() for more
 * information on allocating a tag. Indicate if the controller successfully
 * started the IO request. SCI_TASK_SUCCESS if the task request was
 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
 * returned if there is/are task(s) outstanding that require termination or
 * completion before this request can succeed.
 */
 */
enum sci_task_status scic_controller_start_task(
enum sci_task_status scic_controller_start_task(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
	struct scic_sds_remote_device *rdev,
	struct scic_sds_remote_device *rdev,
	struct scic_sds_request *req,
	struct scic_sds_request *req)
	u16 task_tag)
{
{
	enum sci_status status;
	enum sci_status status;


@@ -3022,85 +2967,3 @@ enum sci_task_status scic_controller_start_task(


	return status;
	return status;
}
}

/**
 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
 *    pool of free IO tags. Direct allocation of IO tags by the SCI Core user
 *    is optional. The scic_controller_start_io() method will allocate an IO
 *    tag if this method is not utilized and the tag is not supplied to the IO
 *    construct routine.  Direct allocation of IO tags may provide additional
 *    performance improvements in environments capable of supporting this usage
 *    model.  Additionally, direct allocation of IO tags also provides
 *    additional flexibility to the SCI Core user.  Specifically, the user may
 *    retain IO tags across the lives of multiple IO requests.
 * @controller: the handle to the controller object for which to allocate the
 *    tag.
 *
 * IO tags are a protected resource.  It is incumbent upon the SCI Core user to
 * ensure that each of the methods that may allocate or free available IO tags
 * are handled in a mutually exclusive manner.  This method is one of said
 * methods requiring proper critical code section protection (e.g. semaphore,
 * spin-lock, etc.). An unsigned integer representing an available IO tag.
 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
 * currently available tags to be allocated. All return other values indicate a
 * legitimate tag.
 */
u16 scic_controller_allocate_io_tag(struct scic_sds_controller *scic)
{
	struct isci_host *ihost = scic_to_ihost(scic);

	if (isci_tci_space(ihost)) {
		u16 tci = isci_tci_alloc(ihost);
		u8 seq = scic->io_request_sequence[tci];

		return ISCI_TAG(seq, tci);
	}

	return SCI_CONTROLLER_INVALID_IO_TAG;
}

/**
 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
 *    of free IO tags. This method provides the SCI Core user more flexibility
 *    with regards to IO tags.  The user may desire to keep an IO tag after an
 *    IO request has completed, because they plan on re-using the tag for a
 *    subsequent IO request.  This method is only legal if the tag was
 *    allocated via scic_controller_allocate_io_tag().
 * @controller: This parameter specifies the handle to the controller object
 *    for which to free/return the tag.
 * @io_tag: This parameter represents the tag to be freed to the pool of
 *    available tags.
 *
 * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
 * to ensure that each of the methods that may allocate or free available IO
 * tags are handled in a mutually exclusive manner.  This method is one of said
 * methods requiring proper critical code section protection (e.g. semaphore,
 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
 * Core user, using the scic_controller_allocate_io_tag() method, then it is
 * the responsibility of the caller to invoke this method to free the tag. This
 * method returns an indication of whether the tag was successfully put back
 * (freed) to the pool of available tags. SCI_SUCCESS This return value
 * indicates the tag was successfully placed into the pool of available IO
 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
 * is not a valid IO tag value.
 */
enum sci_status scic_controller_free_io_tag(struct scic_sds_controller *scic,
					    u16 io_tag)
{
	struct isci_host *ihost = scic_to_ihost(scic);
	u16 tci = ISCI_TAG_TCI(io_tag);
	u16 seq = ISCI_TAG_SEQ(io_tag);

	/* prevent tail from passing head */
	if (isci_tci_active(ihost) == 0)
		return SCI_FAILURE_INVALID_IO_TAG;

	if (seq == scic->io_request_sequence[tci]) {
		scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);

		isci_tci_free(ihost, ISCI_TAG_TCI(io_tag));

		return SCI_SUCCESS;
	}
	return SCI_FAILURE_INVALID_IO_TAG;
}
+6 −49
Original line number Original line Diff line number Diff line
@@ -192,6 +192,7 @@ struct scic_sds_controller {
	 * context table.  This data is shared between the hardware and software.
	 * context table.  This data is shared between the hardware and software.
	 */
	 */
	struct scu_task_context *task_context_table;
	struct scu_task_context *task_context_table;
	dma_addr_t task_context_dma;


	/**
	/**
	 * This field is a pointer to the memory allocated by the driver for the
	 * This field is a pointer to the memory allocated by the driver for the
@@ -302,12 +303,8 @@ struct isci_host {
	struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
	struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
	struct sas_ha_struct sas_ha;
	struct sas_ha_struct sas_ha;


	int can_queue;
	spinlock_t queue_lock;
	spinlock_t state_lock;
	spinlock_t state_lock;

	struct pci_dev *pdev;
	struct pci_dev *pdev;

	enum isci_status status;
	enum isci_status status;
	#define IHOST_START_PENDING 0
	#define IHOST_START_PENDING 0
	#define IHOST_STOP_PENDING 1
	#define IHOST_STOP_PENDING 1
@@ -451,36 +448,6 @@ static inline void isci_host_change_state(struct isci_host *isci_host,


}
}


static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
{
	int ret = 0;
	unsigned long flags;

	spin_lock_irqsave(&isci_host->queue_lock, flags);
	if ((isci_host->can_queue - num) < 0) {
		dev_dbg(&isci_host->pdev->dev,
			"%s: isci_host->can_queue = %d\n",
			__func__,
			isci_host->can_queue);
		ret = -SAS_QUEUE_FULL;

	} else
		isci_host->can_queue -= num;

	spin_unlock_irqrestore(&isci_host->queue_lock, flags);

	return ret;
}

static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
{
	unsigned long flags;

	spin_lock_irqsave(&isci_host->queue_lock, flags);
	isci_host->can_queue += num;
	spin_unlock_irqrestore(&isci_host->queue_lock, flags);
}

static inline void wait_for_start(struct isci_host *ihost)
static inline void wait_for_start(struct isci_host *ihost)
{
{
	wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
	wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -646,10 +613,6 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
					     u16 io_tag);
					     u16 io_tag);


struct scu_task_context *scic_sds_controller_get_task_context_buffer(
	struct scic_sds_controller *scic,
	u16 io_tag);

void scic_sds_controller_power_control_queue_insert(
void scic_sds_controller_power_control_queue_insert(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
	struct scic_sds_phy *sci_phy);
	struct scic_sds_phy *sci_phy);
@@ -681,6 +644,9 @@ void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);


int isci_host_init(struct isci_host *);
int isci_host_init(struct isci_host *);


@@ -708,14 +674,12 @@ void scic_controller_disable_interrupts(
enum sci_status scic_controller_start_io(
enum sci_status scic_controller_start_io(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_request *io_request,
	struct scic_sds_request *io_request);
	u16 io_tag);


enum sci_task_status scic_controller_start_task(
enum sci_task_status scic_controller_start_task(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_request *task_request,
	struct scic_sds_request *task_request);
	u16 io_tag);


enum sci_status scic_controller_terminate_request(
enum sci_status scic_controller_terminate_request(
	struct scic_sds_controller *scic,
	struct scic_sds_controller *scic,
@@ -727,13 +691,6 @@ enum sci_status scic_controller_complete_io(
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_remote_device *remote_device,
	struct scic_sds_request *io_request);
	struct scic_sds_request *io_request);


u16 scic_controller_allocate_io_tag(
	struct scic_sds_controller *scic);

enum sci_status scic_controller_free_io_tag(
	struct scic_sds_controller *scic,
	u16 io_tag);

void scic_sds_port_configuration_agent_construct(
void scic_sds_port_configuration_agent_construct(
	struct scic_sds_port_configuration_agent *port_agent);
	struct scic_sds_port_configuration_agent *port_agent);


+23 −38
Original line number Original line Diff line number Diff line
@@ -695,35 +695,21 @@ static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u1
 */
 */
static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag)
static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag)
{
{
	struct scic_sds_controller *scic = sci_port->owning_controller;
	struct scu_task_context *task_context;
	struct scu_task_context *task_context;


	task_context = scic_sds_controller_get_task_context_buffer(sci_port->owning_controller, tag);
	task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)];

	memset(task_context, 0, sizeof(struct scu_task_context));
	memset(task_context, 0, sizeof(struct scu_task_context));


	task_context->abort = 0;
	task_context->priority = 0;
	task_context->initiator_request = 1;
	task_context->initiator_request = 1;
	task_context->connection_rate = 1;
	task_context->connection_rate = 1;
	task_context->protocol_engine_index = 0;
	task_context->logical_port_index = sci_port->physical_port_index;
	task_context->logical_port_index = sci_port->physical_port_index;
	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
	task_context->task_index = ISCI_TAG_TCI(tag);
	task_context->task_index = ISCI_TAG_TCI(tag);
	task_context->valid = SCU_TASK_CONTEXT_VALID;
	task_context->valid = SCU_TASK_CONTEXT_VALID;
	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
	task_context->context_type = SCU_TASK_CONTEXT_TYPE;

	task_context->remote_node_index = sci_port->reserved_rni;
	task_context->remote_node_index = sci_port->reserved_rni;
	task_context->command_code = 0;

	task_context->link_layer_control = 0;
	task_context->do_not_dma_ssp_good_response = 1;
	task_context->do_not_dma_ssp_good_response = 1;
	task_context->strict_ordering = 0;
	task_context->control_frame = 0;
	task_context->timeout_enable = 0;
	task_context->block_guard_enable = 0;

	task_context->address_modifier = 0;

	task_context->task_phase = 0x01;
	task_context->task_phase = 0x01;
}
}


@@ -731,15 +717,15 @@ static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port
{
{
	struct scic_sds_controller *scic = sci_port->owning_controller;
	struct scic_sds_controller *scic = sci_port->owning_controller;


	if (sci_port->reserved_tci != SCU_DUMMY_INDEX)
	if (sci_port->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
		scic_controller_free_io_tag(scic, sci_port->reserved_tci);
		isci_free_tag(scic_to_ihost(scic), sci_port->reserved_tag);


	if (sci_port->reserved_rni != SCU_DUMMY_INDEX)
	if (sci_port->reserved_rni != SCU_DUMMY_INDEX)
		scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes,
		scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes,
								     1, sci_port->reserved_rni);
								     1, sci_port->reserved_rni);


	sci_port->reserved_rni = SCU_DUMMY_INDEX;
	sci_port->reserved_rni = SCU_DUMMY_INDEX;
	sci_port->reserved_tci = SCU_DUMMY_INDEX;
	sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
}
}


/**
/**
@@ -1119,18 +1105,17 @@ scic_sds_port_suspend_port_task_scheduler(struct scic_sds_port *port)
 */
 */
static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port)
static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port)
{
{
	u32 command;
	struct scu_task_context *task_context;
	struct scic_sds_controller *scic = sci_port->owning_controller;
	struct scic_sds_controller *scic = sci_port->owning_controller;
	u16 tci = sci_port->reserved_tci;
	u16 tag = sci_port->reserved_tag;

	struct scu_task_context *tc;
	task_context = scic_sds_controller_get_task_context_buffer(scic, tci);
	u32 command;


	task_context->abort = 0;
	tc = &scic->task_context_table[ISCI_TAG_TCI(tag)];
	tc->abort = 0;


	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
		  sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
		  sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
		  tci;
		  ISCI_TAG_TCI(tag);


	scic_sds_controller_post_request(scic, command);
	scic_sds_controller_post_request(scic, command);
}
}
@@ -1145,17 +1130,16 @@ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port)
static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port)
static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port)
{
{
	struct scic_sds_controller *scic = sci_port->owning_controller;
	struct scic_sds_controller *scic = sci_port->owning_controller;
	u16 tci = sci_port->reserved_tci;
	u16 tag = sci_port->reserved_tag;
	struct scu_task_context *tc;
	struct scu_task_context *tc;
	u32 command;
	u32 command;


	tc = scic_sds_controller_get_task_context_buffer(scic, tci);
	tc = &scic->task_context_table[ISCI_TAG_TCI(tag)];

	tc->abort = 1;
	tc->abort = 1;


	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
		  sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
		  sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
		  tci;
		  ISCI_TAG_TCI(tag);


	scic_sds_controller_post_request(scic, command);
	scic_sds_controller_post_request(scic, command);
}
}
@@ -1333,15 +1317,16 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port)
		sci_port->reserved_rni = rni;
		sci_port->reserved_rni = rni;
	}
	}


	if (sci_port->reserved_tci == SCU_DUMMY_INDEX) {
	if (sci_port->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
		/* Allocate a TCI and remove the sequence nibble */
		struct isci_host *ihost = scic_to_ihost(scic);
		u16 tci = scic_controller_allocate_io_tag(scic);
		u16 tag;


		if (tci != SCU_DUMMY_INDEX)
		tag = isci_alloc_tag(ihost);
			scic_sds_port_construct_dummy_task(sci_port, tci);
		if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
		else
			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
		sci_port->reserved_tci = tci;
		else
			scic_sds_port_construct_dummy_task(sci_port, tag);
		sci_port->reserved_tag = tag;
	}
	}


	if (status == SCI_SUCCESS) {
	if (status == SCI_SUCCESS) {
@@ -1859,7 +1844,7 @@ void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index,
	sci_port->assigned_device_count = 0;
	sci_port->assigned_device_count = 0;


	sci_port->reserved_rni = SCU_DUMMY_INDEX;
	sci_port->reserved_rni = SCU_DUMMY_INDEX;
	sci_port->reserved_tci = SCU_DUMMY_INDEX;
	sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;


	sci_init_timer(&sci_port->timer, port_timeout);
	sci_init_timer(&sci_port->timer, port_timeout);


+1 −1
Original line number Original line Diff line number Diff line
@@ -108,7 +108,7 @@ struct scic_sds_port {
	u8 active_phy_mask;
	u8 active_phy_mask;


	u16 reserved_rni;
	u16 reserved_rni;
	u16 reserved_tci;
	u16 reserved_tag;


	/**
	/**
	 * This field contains the count of the io requests started on this port
	 * This field contains the count of the io requests started on this port
+163 −306

File changed.

Preview size limit exceeded, changes collapsed.

Loading