*/
#include "isci.h"
-#include "scic_remote_device.h"
-#include "scic_io_request.h"
-#include "scic_task_request.h"
-#include "scic_port.h"
#include "task.h"
#include "request.h"
#include "sata.h"
#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+/**
+ * This method returns the sgl element pair for the specificed sgl_pair index.
+ * @sci_req: This parameter specifies the IO request for which to retrieve
+ * the Scatter-Gather List element pair.
+ * @sgl_pair_index: This parameter specifies the index into the SGL element
+ * pair to be retrieved.
+ *
+ * This method returns a pointer to an struct scu_sgl_element_pair.
+ */
+static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
+ struct scic_sds_request *sci_req,
+ u32 sgl_pair_index
+ ) {
+ struct scu_task_context *task_context;
+
+ task_context = (struct scu_task_context *)sci_req->task_context_buffer;
+
+ if (sgl_pair_index == 0) {
+ return &task_context->sgl_pair_ab;
+ } else if (sgl_pair_index == 1) {
+ return &task_context->sgl_pair_cd;
+ }
-static enum sci_status isci_request_ssp_request_construct(
- struct isci_request *request)
-{
- enum sci_status status;
-
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request = %p\n",
- __func__,
- request);
- status = scic_io_request_construct_basic_ssp(
- request->sci_request_handle
- );
- return status;
+ return &sci_req->sg_table[sgl_pair_index - 2];
}
-static enum sci_status isci_request_stp_request_construct(
- struct isci_request *request)
+/**
+ * This function will build the SGL list for an IO request.
+ * @sci_req: This parameter specifies the IO request for which to build
+ * the Scatter-Gather List.
+ *
+ */
+static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
{
- struct sas_task *task = isci_request_access_task(request);
- enum sci_status status;
- struct host_to_dev_fis *register_fis;
+ struct isci_request *isci_request = sci_req_to_ireq(sds_request);
+ struct isci_host *isci_host = isci_request->isci_host;
+ struct sas_task *task = isci_request_access_task(isci_request);
+ struct scatterlist *sg = NULL;
+ dma_addr_t dma_addr;
+ u32 sg_idx = 0;
+ struct scu_sgl_element_pair *scu_sg = NULL;
+ struct scu_sgl_element_pair *prev_sg = NULL;
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request = %p\n",
- __func__,
- request);
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
- /* Get the host_to_dev_fis from the core and copy
- * the fis from the task into it.
- */
- register_fis = isci_sata_task_to_fis_copy(task);
+ while (sg) {
+ scu_sg = scic_sds_request_get_sgl_element_pair(
+ sds_request,
+ sg_idx);
- status = scic_io_request_construct_basic_sata(
- request->sci_request_handle
- );
+ SCU_SGL_COPY(scu_sg->A, sg);
- /* Set the ncq tag in the fis, from the queue
- * command in the task.
- */
- if (isci_sata_is_task_ncq(task)) {
+ sg = sg_next(sg);
- isci_sata_set_ncq_tag(
- register_fis,
- task
- );
+ if (sg) {
+ SCU_SGL_COPY(scu_sg->B, sg);
+ sg = sg_next(sg);
+ } else
+ SCU_SGL_ZERO(scu_sg->B);
+
+ if (prev_sg) {
+ dma_addr =
+ scic_io_request_get_dma_addr(
+ sds_request,
+ scu_sg);
+
+ prev_sg->next_pair_upper =
+ upper_32_bits(dma_addr);
+ prev_sg->next_pair_lower =
+ lower_32_bits(dma_addr);
+ }
+
+ prev_sg = scu_sg;
+ sg_idx++;
+ }
+ } else { /* handle when no sg */
+ scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
+ sg_idx);
+
+ dma_addr = dma_map_single(&isci_host->pdev->dev,
+ task->scatter,
+ task->total_xfer_len,
+ task->data_dir);
+
+ isci_request->zero_scatter_daddr = dma_addr;
+
+ scu_sg->A.length = task->total_xfer_len;
+ scu_sg->A.address_upper = upper_32_bits(dma_addr);
+ scu_sg->A.address_lower = lower_32_bits(dma_addr);
}
- return status;
+ if (scu_sg) {
+ scu_sg->next_pair_upper = 0;
+ scu_sg->next_pair_lower = 0;
+ }
}
-/**
- * isci_smp_request_build() - This function builds the smp request object.
- * @isci_host: This parameter specifies the ISCI host object
- * @request: This parameter points to the isci_request object allocated in the
- * request construct function.
- * @sci_device: This parameter is the handle for the sci core's remote device
- * object that is the destination for this request.
- *
- * SCI_SUCCESS on successfull completion, or specific failure code.
- */
-static enum sci_status isci_smp_request_build(
- struct isci_request *request)
+static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
{
- enum sci_status status = SCI_FAILURE;
- struct sas_task *task = isci_request_access_task(request);
+ struct ssp_cmd_iu *cmd_iu;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ cmd_iu = &sci_req->ssp.cmd;
+
+ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+ cmd_iu->add_cdb_len = 0;
+ cmd_iu->_r_a = 0;
+ cmd_iu->_r_b = 0;
+ cmd_iu->en_fburst = 0; /* unsupported */
+ cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_attr = task->ssp_task.task_attr;
+ cmd_iu->_r_c = 0;
+
+ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+ sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
- void *command_iu_address =
- scic_io_request_get_command_iu_address(
- request->sci_request_handle
- );
+static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
+{
+ struct ssp_task_iu *task_iu;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request = %p\n",
- __func__,
- request);
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: smp_req len = %d\n",
- __func__,
- task->smp_task.smp_req.length);
+ task_iu = &sci_req->ssp.tmf;
- /* copy the smp_command to the address; */
- sg_copy_to_buffer(&task->smp_task.smp_req, 1,
- (char *)command_iu_address,
- sizeof(struct smp_request)
- );
+ memset(task_iu, 0, sizeof(struct ssp_task_iu));
- status = scic_io_request_construct_smp(request->sci_request_handle);
- if (status != SCI_SUCCESS)
- dev_warn(&request->isci_host->pdev->dev,
- "%s: scic_io_request_construct_smp failed with "
- "status = %d\n",
- __func__,
- status);
+ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
- return status;
+ task_iu->task_func = isci_tmf->tmf_code;
+ task_iu->task_tag =
+ (ireq->ttype == tmf_task) ?
+ isci_tmf->io_tag :
+ SCI_CONTROLLER_INVALID_IO_TAG;
}
/**
- * isci_io_request_build() - This function builds the io request object.
- * @isci_host: This parameter specifies the ISCI host object
- * @request: This parameter points to the isci_request object allocated in the
- * request construct function.
- * @sci_device: This parameter is the handle for the sci core's remote device
- * object that is the destination for this request.
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
*
- * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-static enum sci_status isci_io_request_build(
- struct isci_host *isci_host,
- struct isci_request *request,
- struct isci_remote_device *isci_device)
+static void scu_ssp_reqeust_construct_task_context(
+ struct scic_sds_request *sds_request,
+ struct scu_task_context *task_context)
{
- struct smp_discover_response_protocols dev_protocols;
- enum sci_status status = SCI_SUCCESS;
- struct sas_task *task = isci_request_access_task(request);
- struct scic_sds_remote_device *sci_device = to_sci_dev(isci_device);
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device = 0x%p; request = %p, "
- "num_scatter = %d\n",
- __func__,
- isci_device,
- request,
- task->num_scatter);
-
- /* map the sgl addresses, if present.
- * libata does the mapping for sata devices
- * before we get the request.
- */
- if (task->num_scatter &&
- !sas_protocol_ata(task->task_proto) &&
- !(SAS_PROTOCOL_SMP & task->task_proto)) {
-
- request->num_sg_entries = dma_map_sg(
- &isci_host->pdev->dev,
- task->scatter,
- task->num_scatter,
- task->data_dir
- );
-
- if (request->num_sg_entries == 0)
- return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ dma_addr_t dma_addr;
+ struct scic_sds_remote_device *target_device;
+ struct scic_sds_port *target_port;
+
+ target_device = scic_sds_request_get_device(sds_request);
+ target_port = scic_sds_request_get_port(sds_request);
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = target_device->connection_rate;
+ task_context->protocol_engine_index =
+ scic_sds_controller_get_protocol_engine_group(controller);
+ task_context->logical_port_index =
+ scic_sds_port_get_index(target_port);
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index =
+ scic_sds_remote_device_get_index(sds_request->target_device);
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+
+ /* task_context->type.ssp.tag = sci_req->io_tag; */
+ task_context->task_phase = 0x01;
+
+ if (sds_request->was_tag_assigned_by_user) {
+ /*
+ * Build the task context now since we have already read
+ * the data
+ */
+ sds_request->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(
+ controller) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(target_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ scic_sds_io_tag_get_index(sds_request->io_tag));
+ } else {
+ /*
+ * Build the task context now since we have already read
+ * the data
+ *
+ * I/O tag index is not assigned because we have to wait
+ * until we get a TCi
+ */
+ sds_request->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(
+ owning_controller) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(target_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
}
- /* build the common request object. For now,
- * we will let the core allocate the IO tag.
+ /*
+ * Copy the physical address for the command buffer to the
+ * SCU Task Context
*/
- status = scic_io_request_construct(
- isci_host->core_controller,
- sci_device,
- SCI_CONTROLLER_INVALID_IO_TAG,
- request,
- request->sci_request_mem_ptr,
- (struct scic_sds_request **)&request->sci_request_handle
- );
-
- if (status != SCI_SUCCESS) {
- dev_warn(&isci_host->pdev->dev,
- "%s: failed request construct\n",
- __func__);
- return SCI_FAILURE;
- }
+ dma_addr = scic_io_request_get_dma_addr(sds_request,
+ &sds_request->ssp.cmd);
- sci_object_set_association(request->sci_request_handle, request);
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
- /* Determine protocol and call the appropriate basic constructor */
- scic_remote_device_get_protocols(sci_device, &dev_protocols);
- if (dev_protocols.u.bits.attached_ssp_target)
- status = isci_request_ssp_request_construct(request);
- else if (dev_protocols.u.bits.attached_stp_target)
- status = isci_request_stp_request_construct(request);
- else if (dev_protocols.u.bits.attached_smp_target)
- status = isci_smp_request_build(request);
- else {
- dev_warn(&isci_host->pdev->dev,
- "%s: unknown protocol\n", __func__);
- return SCI_FAILURE;
- }
+ /*
+ * Copy the physical address for the response buffer to the
+ * SCU Task Context
+ */
+ dma_addr = scic_io_request_get_dma_addr(sds_request,
+ &sds_request->ssp.rsp);
- return SCI_SUCCESS;
+ task_context->response_iu_upper = upper_32_bits(dma_addr);
+ task_context->response_iu_lower = lower_32_bits(dma_addr);
}
-
/**
- * isci_request_alloc_core() - This function gets the request object from the
- * isci_host dma cache.
- * @isci_host: This parameter specifies the ISCI host object
- * @isci_request: This parameter will contain the pointer to the new
- * isci_request object.
- * @isci_device: This parameter is the pointer to the isci remote device object
- * that is the destination for this request.
- * @gfp_flags: This parameter specifies the os allocation flags.
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
*
- * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-static int isci_request_alloc_core(
- struct isci_host *isci_host,
- struct isci_request **isci_request,
- struct isci_remote_device *isci_device,
- gfp_t gfp_flags)
+static void scu_ssp_io_request_construct_task_context(
+ struct scic_sds_request *sci_req,
+ enum dma_data_direction dir,
+ u32 len)
{
- int ret = 0;
- dma_addr_t handle;
- struct isci_request *request;
+ struct scu_task_context *task_context;
+ task_context = scic_sds_request_get_task_context(sci_req);
- /* get pointer to dma memory. This actually points
- * to both the isci_remote_device object and the
- * sci object. The isci object is at the beginning
- * of the memory allocated here.
- */
- request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
- if (!request) {
- dev_warn(&isci_host->pdev->dev,
- "%s: dma_pool_alloc returned NULL\n", __func__);
- return -ENOMEM;
+ scu_ssp_reqeust_construct_task_context(sci_req, task_context);
+
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_cmd_iu) / sizeof(u32);
+ task_context->type.ssp.frame_type = SSP_COMMAND;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ default:
+ task_context->task_type = SCU_TASK_TYPE_IOREAD;
+ break;
+ case DMA_TO_DEVICE:
+ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+ break;
}
- /* initialize the request object. */
- spin_lock_init(&request->state_lock);
- request->sci_request_mem_ptr = ((u8 *)request) +
- sizeof(struct isci_request);
- request->request_daddr = handle;
- request->isci_host = isci_host;
- request->isci_device = isci_device;
- request->io_request_completion = NULL;
+ task_context->transfer_length_bytes = len;
- request->request_alloc_size = isci_host->dma_pool_alloc_size;
- request->num_sg_entries = 0;
+ if (task_context->transfer_length_bytes > 0)
+ scic_sds_request_build_sgl(sci_req);
+}
- request->complete_in_target = false;
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request. The
+ * following important settings are utilized: -# priority ==
+ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
+ * ahead of other task destined for the same Remote Node. -# task_type ==
+ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ * constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(
+ struct scic_sds_request *sci_req)
+{
+ struct scu_task_context *task_context;
- INIT_LIST_HEAD(&request->completed_node);
- INIT_LIST_HEAD(&request->dev_node);
+ task_context = scic_sds_request_get_task_context(sci_req);
- *isci_request = request;
- isci_request_change_state(request, allocated);
+ scu_ssp_reqeust_construct_task_context(sci_req, task_context);
- return ret;
+ task_context->control_frame = 1;
+ task_context->priority = SCU_TASK_PRIORITY_HIGH;
+ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
+ task_context->transfer_length_bytes = 0;
+ task_context->type.ssp.frame_type = SSP_TASK;
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_task_iu) / sizeof(u32);
}
-static int isci_request_alloc_io(
- struct isci_host *isci_host,
- struct sas_task *task,
- struct isci_request **isci_request,
- struct isci_remote_device *isci_device,
- gfp_t gfp_flags)
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ * request. This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ * constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ * constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+ struct scic_sds_request *sci_req,
+ struct scu_task_context *task_context)
{
- int retval = isci_request_alloc_core(isci_host, isci_request,
- isci_device, gfp_flags);
+ dma_addr_t dma_addr;
+ struct scic_sds_remote_device *target_device;
+ struct scic_sds_port *target_port;
+
+ target_device = scic_sds_request_get_device(sci_req);
+ target_port = scic_sds_request_get_port(sci_req);
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = target_device->connection_rate;
+ task_context->protocol_engine_index =
+ scic_sds_controller_get_protocol_engine_group(controller);
+ task_context->logical_port_index =
+ scic_sds_port_get_index(target_port);
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index =
+ scic_sds_remote_device_get_index(sci_req->target_device);
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+ task_context->task_phase = 0x01;
+
+ task_context->ssp_command_iu_length =
+ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+ /* Set the first word of the H2D REG FIS */
+ task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
+
+ if (sci_req->was_tag_assigned_by_user) {
+ /*
+ * Build the task context now since we have already read
+ * the data
+ */
+ sci_req->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(
+ controller) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(target_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ scic_sds_io_tag_get_index(sci_req->io_tag));
+ } else {
+ /*
+ * Build the task context now since we have already read
+ * the data.
+ * I/O tag index is not assigned because we have to wait
+ * until we get a TCi.
+ */
+ sci_req->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(
+ controller) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(target_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
+ }
- if (!retval) {
- (*isci_request)->ttype_ptr.io_task_ptr = task;
- (*isci_request)->ttype = io_task;
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context. We must offset the command buffer by 4 bytes because the
+ * first 4 bytes are transfered in the body of the TC.
+ */
+ dma_addr = scic_io_request_get_dma_addr(sci_req,
+ ((char *) &sci_req->stp.cmd) +
+ sizeof(u32));
- task->lldd_task = *isci_request;
- }
- return retval;
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SATA Requests do not have a response buffer */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
}
+
+
/**
- * isci_request_alloc_tmf() - This function gets the request object from the
- * isci_host dma cache and initializes the relevant fields as a sas_task.
- * @isci_host: This parameter specifies the ISCI host object
- * @sas_task: This parameter is the task struct from the upper layer driver.
- * @isci_request: This parameter will contain the pointer to the new
- * isci_request object.
- * @isci_device: This parameter is the pointer to the isci remote device object
- * that is the destination for this request.
- * @gfp_flags: This parameter specifies the os allocation flags.
+ * scu_stp_raw_request_construct_task_context -
+ * @sci_req: This parameter specifies the STP request object for which to
+ * construct a RAW command frame task context.
+ * @task_context: This parameter specifies the SCU specific task context buffer
+ * to construct.
*
- * SCI_SUCCESS on successfull completion, or specific failure code.
+ * This method performs the operations common to all SATA/STP requests
+ * utilizing the raw frame method. none
*/
-int isci_request_alloc_tmf(
- struct isci_host *isci_host,
- struct isci_tmf *isci_tmf,
- struct isci_request **isci_request,
- struct isci_remote_device *isci_device,
- gfp_t gfp_flags)
+static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
+ struct scu_task_context *task_context)
{
- int retval = isci_request_alloc_core(isci_host, isci_request,
- isci_device, gfp_flags);
+ struct scic_sds_request *sci_req = to_sci_req(stp_req);
- if (!retval) {
+ scu_sata_reqeust_construct_task_context(sci_req, task_context);
- (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
- (*isci_request)->ttype = tmf_task;
+ task_context->control_frame = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
+ task_context->type.stp.fis_type = FIS_REGH2D;
+ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status
+scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
+ bool copy_rx_frame)
+{
+ struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
+ struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
+
+ scu_stp_raw_request_construct_task_context(stp_req,
+ sci_req->task_context_buffer);
+
+ pio->current_transfer_bytes = 0;
+ pio->ending_error = 0;
+ pio->ending_status = 0;
+
+ pio->request_current.sgl_offset = 0;
+ pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
+
+ if (copy_rx_frame) {
+ scic_sds_request_build_sgl(sci_req);
+ /* Since the IO request copy of the TC contains the same data as
+ * the actual TC this pointer is vaild for either.
+ */
+ pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
+ } else {
+ /* The user does not want the data copied to the SGL buffer location */
+ pio->request_current.sgl_pair = NULL;
}
- return retval;
+
+ return SCI_SUCCESS;
}
/**
- * isci_request_signal_device_reset() - This function will set the "device
- * needs target reset" flag in the given sas_tasks' task_state_flags, and
- * then cause the task to be added into the SCSI error handler queue which
- * will eventually be escalated to a target reset.
*
+ * @sci_req: This parameter specifies the request to be constructed as an
+ * optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ * value of 1 indicates NCQ.
*
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
*/
-static void isci_request_signal_device_reset(
- struct isci_request *isci_request)
+static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
+ u8 optimized_task_type,
+ u32 len,
+ enum dma_data_direction dir)
{
- unsigned long flags;
- struct sas_task *task = isci_request_access_task(isci_request);
+ struct scu_task_context *task_context = sci_req->task_context_buffer;
- dev_dbg(&isci_request->isci_host->pdev->dev,
- "%s: request=%p, task=%p\n", __func__, isci_request, task);
+ /* Build the STP task context structure */
+ scu_sata_reqeust_construct_task_context(sci_req, task_context);
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
+ /* Copy over the SGL elements */
+ scic_sds_request_build_sgl(sci_req);
- /* Cause this task to be scheduled in the SCSI error handler
- * thread.
- */
- sas_task_abort(task);
-}
+ /* Copy over the number of bytes to be transfered */
+ task_context->transfer_length_bytes = len;
+
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * The difference between the DMA IN and DMA OUT request task type
+ * values are consistent with the difference between FPDMA READ
+ * and FPDMA WRITE values. Add the supplied task type parameter
+ * to this difference to set the task type properly for this
+ * DATA OUT (WRITE) case. */
+ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+ - SCU_TASK_TYPE_DMA_IN);
+ } else {
+ /*
+ * For the DATA IN (READ) case, simply save the supplied
+ * optimized task type. */
+ task_context->task_type = optimized_task_type;
+ }
+}
+
+
+
+static enum sci_status
+scic_io_request_construct_sata(struct scic_sds_request *sci_req,
+ u32 len,
+ enum dma_data_direction dir,
+ bool copy)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
+ sci_req->task_context_buffer);
+ return SCI_SUCCESS;
+ } else {
+ dev_err(scic_to_dev(sci_req->owning_controller),
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, sci_req, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ dev_err(scic_to_dev(sci_req->owning_controller),
+ "%s: Non-ATA protocol in SATA path: 0x%x\n",
+ __func__,
+ task->task_proto);
+ return SCI_FAILURE;
+
+ }
+
+ /* non data */
+ if (task->data_dir == DMA_NONE) {
+ scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
+ sci_req->task_context_buffer);
+ return SCI_SUCCESS;
+ }
+
+ /* NCQ */
+ if (task->ata_task.use_ncq) {
+ scic_sds_stp_optimized_request_construct(sci_req,
+ SCU_TASK_TYPE_FPDMAQ_READ,
+ len, dir);
+ return SCI_SUCCESS;
+ }
+
+ /* DMA */
+ if (task->ata_task.dma_xfer) {
+ scic_sds_stp_optimized_request_construct(sci_req,
+ SCU_TASK_TYPE_DMA_IN,
+ len, dir);
+ return SCI_SUCCESS;
+ } else /* PIO */
+ return scic_sds_stp_pio_request_construct(sci_req, copy);
+
+ return status;
+}
+
+static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
+{
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ sci_req->protocol = SCIC_SSP_PROTOCOL;
+
+ scu_ssp_io_request_construct_task_context(sci_req,
+ task->data_dir,
+ task->total_xfer_len);
+
+ scic_sds_io_request_build_ssp_command_iu(sci_req);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status scic_task_request_construct_ssp(
+ struct scic_sds_request *sci_req)
+{
+ /* Construct the SSP Task SCU Task Context */
+ scu_ssp_task_request_construct_task_context(sci_req);
+
+ /* Fill in the SSP Task IU */
+ scic_sds_task_request_build_ssp_task_iu(sci_req);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
+{
+ enum sci_status status;
+ bool copy = false;
+ struct isci_request *isci_request = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(isci_request);
+
+ sci_req->protocol = SCIC_STP_PROTOCOL;
+
+ copy = (task->data_dir == DMA_NONE) ? false : true;
+
+ status = scic_io_request_construct_sata(sci_req,
+ task->total_xfer_len,
+ task->data_dir,
+ copy);
+
+ if (status == SCI_SUCCESS)
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
+ sci_req->task_context_buffer);
+ } else {
+ dev_err(scic_to_dev(sci_req->owning_controller),
+ "%s: Request 0x%p received un-handled SAT "
+ "Protocol 0x%x.\n",
+ __func__, sci_req, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
+{
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ u32 ret_val = 0;
+
+ if (readl(&scic->smu_registers->address_modifier) == 0) {
+ void __iomem *scu_reg_base = scic->scu_registers;
+
+ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+ * BAR1 is the scu_registers
+ * 0x20002C = 0x200000 + 0x2c
+ * = start of task context SRAM + offset of (type.ssp.data_offset)
+ * TCi is the io_tag of struct scic_sds_request
+ */
+ ret_val = readl(scu_reg_base +
+ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+ ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
+ }
+
+ return ret_val;
+}
+
+enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
+{
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ struct scu_task_context *task_context;
+ enum sci_base_request_states state;
+
+ if (sci_req->device_sequence !=
+ scic_sds_remote_device_get_sequence(sci_req->target_device))
+ return SCI_FAILURE;
+
+ state = sci_req->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
+ dev_warn(scic_to_dev(scic),
+ "%s: SCIC IO Request requested to start while in wrong "
+ "state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* if necessary, allocate a TCi for the io request object and then will,
+ * if necessary, copy the constructed TC data into the actual TC buffer.
+ * If everything is successful the post context field is updated with
+ * the TCi so the controller can post the request to the hardware.
+ */
+ if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ sci_req->io_tag = scic_controller_allocate_io_tag(scic);
+
+ /* Record the IO Tag in the request */
+ if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+ task_context = sci_req->task_context_buffer;
+
+ task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag);
+
+ switch (task_context->protocol_type) {
+ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+ /* SSP/SMP Frame */
+ task_context->type.ssp.tag = sci_req->io_tag;
+ task_context->type.ssp.target_port_transfer_tag =
+ 0xFFFF;
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_STP:
+ /* STP/SATA Frame
+ * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
+ */
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+ /* / @todo When do we set no protocol type? */
+ break;
+
+ default:
+ /* This should never happen since we build the IO
+ * requests */
+ break;
+ }
+
+ /*
+ * Check to see if we need to copy the task context buffer
+ * or have been building into the task context buffer */
+ if (sci_req->was_tag_assigned_by_user == false)
+ scic_sds_controller_copy_task_context(scic, sci_req);
+
+ /* Add to the post_context the io tag value */
+ sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag);
+
+ /* Everything is good go ahead and change state */
+ sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+enum sci_status
+scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
+{
+ enum sci_base_request_states state;
+
+ state = sci_req->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_CONSTRUCTED:
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_TASK_ABORT,
+ SCI_FAILURE_IO_TERMINATED);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
+ return SCI_SUCCESS;
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_ABORTING:
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_COMPLETED:
+ default:
+ dev_warn(scic_to_dev(sci_req->owning_controller),
+ "%s: SCIC IO Request requested to abort while in wrong "
+ "state %d\n",
+ __func__,
+ sci_req->sm.current_state_id);
+ break;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req)
+{
+ enum sci_base_request_states state;
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+
+ state = sci_req->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+ "isci: request completion from wrong state (%d)\n", state))
+ return SCI_FAILURE_INVALID_STATE;
+
+ if (!sci_req->was_tag_assigned_by_user)
+ scic_controller_free_io_tag(scic, sci_req->io_tag);
+
+ if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+ scic_sds_controller_release_frame(scic,
+ sci_req->saved_rx_frame_index);
+
+ /* XXX can we just stop the machine and remove the 'final' state? */
+ sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
+ return SCI_SUCCESS;
+}
+
+enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
+ u32 event_code)
+{
+ enum sci_base_request_states state;
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+
+ state = sci_req->sm.current_state_id;
+
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
+ dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
+ __func__, event_code, state);
+
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ switch (scu_get_event_specifier(event_code)) {
+ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+ /* We are waiting for data and the SCU has R_ERR the data frame.
+ * Go back to waiting for the D2H Register FIS
+ */
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ return SCI_SUCCESS;
+ default:
+ dev_err(scic_to_dev(scic),
+ "%s: pio request unexpected event %#x\n",
+ __func__, event_code);
+
+ /* TODO Should we fail the PIO request when we get an
+ * unexpected event?
+ */
+ return SCI_FAILURE;
+ }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ * instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ * the response data.
+ */
+static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
+{
+ void *resp_buf;
+ u32 len;
+ struct ssp_response_iu *ssp_response;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ ssp_response = &sci_req->ssp.rsp;
+
+ resp_buf = &isci_tmf->resp.resp_iu;
+
+ len = min_t(u32,
+ SSP_RESP_IU_MAX_SIZE,
+ be32_to_cpu(ssp_response->response_data_len));
+
+ memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ struct ssp_response_iu *resp_iu;
+ u8 datapres;
+
+ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+ * to determine SDMA status
+ */
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+ /* There are times when the SCU hardware will return an early
+ * response because the io request specified more data than is
+ * returned by the target device (mode pages, inquiry data,
+ * etc.). We must check the response stats to see if this is
+ * truly a failed request or a good request that just got
+ * completed early.
+ */
+ struct ssp_response_iu *resp = &sci_req->ssp.rsp;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&sci_req->ssp.rsp,
+ &sci_req->ssp.rsp,
+ word_cnt);
+
+ if (resp->status == 0) {
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS_IO_DONE_EARLY);
+ } else {
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ }
+ break;
+ }
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&sci_req->ssp.rsp,
+ &sci_req->ssp.rsp,
+ word_cnt);
+
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ break;
+ }
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+ * guaranteed to be received before this completion status is
+ * posted?
+ */
+ resp_iu = &sci_req->ssp.rsp;
+ datapres = resp_iu->datapres;
+
+ if (datapres == 1 || datapres == 2) {
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ } else
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+ break;
+ /* only stp device gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+ if (sci_req->protocol == SCIC_STP_PROTOCOL) {
+ scic_sds_request_set_status(sci_req,
+ SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT,
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
+ } else {
+ scic_sds_request_set_status(sci_req,
+ SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT,
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+ }
+ break;
+
+ /* both stp/ssp device gets suspended */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+ scic_sds_request_set_status(sci_req,
+ SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT,
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
+ break;
+
+ /* neither ssp nor stp gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+ default:
+ scic_sds_request_set_status(
+ sci_req,
+ SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT,
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+ break;
+ }
+
+ /*
+ * TODO: This is probably wrong for ACK/NAK timeout conditions
+ */
+
+ /* In all cases we will treat this as the completion of the IO req. */
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
+ SCI_FAILURE_IO_TERMINATED);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* Unless we get some strange error wait for the task abort to complete
+ * TODO: Should there be a state change for this completion?
+ */
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ /* Currently, the decision is to simply allow the task request
+ * to timeout if the task IU wasn't received successfully.
+ * There is a potential for receiving multiple task responses if
+ * we decide to send the task IU again.
+ */
+ dev_warn(scic_to_dev(sci_req->owning_controller),
+ "%s: TaskRequest:0x%p CompletionCode:%x - "
+ "ACK/NAK timeout\n", __func__, sci_req,
+ completion_code);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* In the AWAIT RESPONSE state, any TC completion is
+ * unexpected. but if the TC has success status, we
+ * complete the IO anyway.
+ */
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ /* These status has been seen in a specific LSI
+ * expander, which sometimes is not able to send smp
+ * response within 2 ms. This causes our hardware break
+ * the connection and set TC completion with one of
+ * these SMP_XXX_XX_ERR status. For these type of error,
+ * we ask scic user to retry the request.
+ */
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
+ SCI_FAILURE_RETRY_REQUIRED);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be complete. If a NAK
+ * was received, then it is up to the user to retry the request
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
+ u16 ncq_tag)
+{
+ /**
+ * @note This could be made to return an error to the user if the user
+ * attempts to set the NCQ tag in the wrong state.
+ */
+ req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
+}
+
+/**
+ *
+ * @sci_req:
+ *
+ * Get the next SGL element from the request. - Check on which SGL element pair
+ * we are working - if working on SLG pair element A - advance to element B -
+ * else - check to see if there are more SGL element pairs for this IO request
+ * - if there are more SGL element pairs - advance to the next pair and return
+ * element A struct scu_sgl_element*
+ */
+static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
+{
+ struct scu_sgl_element *current_sgl;
+ struct scic_sds_request *sci_req = to_sci_req(stp_req);
+ struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
+
+ if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
+ if (pio_sgl->sgl_pair->B.address_lower == 0 &&
+ pio_sgl->sgl_pair->B.address_upper == 0) {
+ current_sgl = NULL;
+ } else {
+ pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
+ current_sgl = &pio_sgl->sgl_pair->B;
+ }
+ } else {
+ if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
+ pio_sgl->sgl_pair->next_pair_upper == 0) {
+ current_sgl = NULL;
+ } else {
+ u64 phys_addr;
+
+ phys_addr = pio_sgl->sgl_pair->next_pair_upper;
+ phys_addr <<= 32;
+ phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
+
+ pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
+ pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
+ current_sgl = &pio_sgl->sgl_pair->A;
+ }
+ }
+
+ return current_sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
+ struct scic_sds_request *sci_req,
+ u32 length)
+{
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
+ struct scu_task_context *task_context;
+ struct scu_sgl_element *current_sgl;
+
+ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+ * for the data from current_sgl+offset for the input length
+ */
+ task_context = scic_sds_controller_get_task_context_buffer(scic,
+ sci_req->io_tag);
+
+ if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
+ current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
+ else
+ current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
+
+ /* update the TC */
+ task_context->command_iu_upper = current_sgl->address_upper;
+ task_context->command_iu_lower = current_sgl->address_lower;
+ task_context->transfer_length_bytes = length;
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ /* send the new TC out. */
+ return scic_controller_continue_io(sci_req);
+}
+
+static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
+{
+
+ struct scu_sgl_element *current_sgl;
+ u32 sgl_offset;
+ u32 remaining_bytes_in_current_sgl = 0;
+ enum sci_status status = SCI_SUCCESS;
+ struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
+
+ sgl_offset = stp_req->type.pio.request_current.sgl_offset;
+
+ if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
+ current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
+ remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
+ } else {
+ current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
+ remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
+ }
+
+
+ if (stp_req->type.pio.pio_transfer_bytes > 0) {
+ if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
+ /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
+ status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
+ if (status == SCI_SUCCESS) {
+ stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
+
+ /* update the current sgl, sgl_offset and save for future */
+ current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
+ sgl_offset = 0;
+ }
+ } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
+ /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
+ scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
+
+ if (status == SCI_SUCCESS) {
+ /* Sgl offset will be adjusted and saved for future */
+ sgl_offset += stp_req->type.pio.pio_transfer_bytes;
+ current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
+ stp_req->type.pio.pio_transfer_bytes = 0;
+ }
+ }
+ }
+
+ if (status == SCI_SUCCESS) {
+ stp_req->type.pio.request_current.sgl_offset = sgl_offset;
+ }
+
+ return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
+ u8 *data_buf, u32 len)
+{
+ struct scic_sds_request *sci_req;
+ struct isci_request *ireq;
+ u8 *src_addr;
+ int copy_len;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ void *kaddr;
+ int total_len = len;
+
+ sci_req = to_sci_req(stp_req);
+ ireq = sci_req_to_ireq(sci_req);
+ task = isci_request_access_task(ireq);
+ src_addr = data_buf;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (total_len > 0) {
+ struct page *page = sg_page(sg);
+
+ copy_len = min_t(int, total_len, sg_dma_len(sg));
+ kaddr = kmap_atomic(page, KM_IRQ0);
+ memcpy(kaddr + sg->offset, src_addr, copy_len);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ total_len -= copy_len;
+ src_addr += copy_len;
+ sg = sg_next(sg);
+ }
+ } else {
+ BUG_ON(task->total_xfer_len < total_len);
+ memcpy(task->scatter, src_addr, total_len);
+ }
+
+ return SCI_SUCCESS;
+}
/**
- * isci_request_execute() - This function allocates the isci_request object,
- * all fills in some common fields.
- * @isci_host: This parameter specifies the ISCI host object
- * @sas_task: This parameter is the task struct from the upper layer driver.
- * @isci_request: This parameter will contain the pointer to the new
- * isci_request object.
- * @gfp_flags: This parameter specifies the os allocation flags.
*
- * SCI_SUCCESS on successfull completion, or specific failure code.
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
*/
-int isci_request_execute(
- struct isci_host *isci_host,
- struct sas_task *task,
- struct isci_request **isci_request,
- gfp_t gfp_flags)
+static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
+ struct scic_sds_stp_request *sci_req,
+ u8 *data_buffer)
+{
+ enum sci_status status;
+
+ /*
+ * If there is less than 1K remaining in the transfer request
+ * copy just the data for the transfer */
+ if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
+ status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
+ sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
+
+ if (status == SCI_SUCCESS)
+ sci_req->type.pio.pio_transfer_bytes = 0;
+ } else {
+ /* We are transfering the whole frame so copy */
+ status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
+ sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+ if (status == SCI_SUCCESS)
+ sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool all_frames_transferred = false;
+ struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* Transmit data */
+ if (stp_req->type.pio.pio_transfer_bytes != 0) {
+ status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
+ if (status == SCI_SUCCESS) {
+ if (stp_req->type.pio.pio_transfer_bytes == 0)
+ all_frames_transferred = true;
+ }
+ } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
+ /*
+ * this will happen if the all data is written at the
+ * first time after the pio setup fis is received
+ */
+ all_frames_transferred = true;
+ }
+
+ /* all data transferred. */
+ if (all_frames_transferred) {
+ /*
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
+ * and wait for PIO_SETUP fis / or D2H REg fis. */
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ scic_sds_request_set_status(
+ sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static void scic_sds_stp_request_udma_complete_request(
+ struct scic_sds_request *request,
+ u32 scu_status,
+ enum sci_status sci_status)
+{
+ scic_sds_request_set_status(request, scu_status, sci_status);
+ sci_change_state(&request->sm, SCI_REQ_COMPLETED);
+}
+
+static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
+ u32 frame_index)
+{
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ struct dev_to_host_fis *frame_header;
+ enum sci_status status;
+ u32 *frame_buffer;
+
+ status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if ((status == SCI_SUCCESS) &&
+ (frame_header->fis_type == FIS_REGD2H)) {
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
+ frame_header,
+ frame_buffer);
+ }
+
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return status;
+}
+
+enum sci_status
+scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
+ u32 frame_index)
+{
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
+ enum sci_base_request_states state;
+ enum sci_status status;
+ ssize_t word_cnt;
+
+ state = sci_req->sm.current_state_id;
+ switch (state) {
+ case SCI_REQ_STARTED: {
+ struct ssp_frame_hdr ssp_hdr;
+ void *frame_header;
+
+ scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ &frame_header);
+
+ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+ if (ssp_hdr.frame_type == SSP_RESPONSE) {
+ struct ssp_response_iu *resp_iu;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&resp_iu);
+
+ sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
+
+ resp_iu = &sci_req->ssp.rsp;
+
+ if (resp_iu->datapres == 0x01 ||
+ resp_iu->datapres == 0x02) {
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+ } else
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+ } else {
+ /* not a response frame, why did it get forwarded? */
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC IO Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n", __func__, sci_req,
+ frame_index, ssp_hdr.frame_type);
+ }
+
+ /*
+ * In any case we are done with this frame buffer return it to
+ * the controller
+ */
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ scic_sds_io_request_copy_response(sci_req);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ scic_sds_controller_release_frame(scic,frame_index);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_SMP_WAIT_RESP: {
+ struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
+ void *frame_header;
+
+ scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ &frame_header);
+
+ /* byte swap the header. */
+ word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+ sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+ if (rsp_hdr->frame_type == SMP_RESPONSE) {
+ void *smp_resp;
+
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ &smp_resp);
+
+ word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
+ sizeof(u32);
+
+ sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+ smp_resp, word_cnt);
+
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+ } else {
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ sci_req,
+ frame_index,
+ rsp_hdr->frame_type);
+
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ }
+
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return scic_sds_stp_request_udma_general_frame_handler(sci_req,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ /* Use the general frame handler to copy the resposne data */
+ status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
+ frame_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ scic_sds_stp_request_udma_complete_request(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+
+ return SCI_SUCCESS;
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ break;
+
+ default:
+ dev_warn(scic_to_dev(scic),
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n", __func__, stp_req,
+ frame_index);
+
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
+ SCI_FAILURE_PROTOCOL_VIOLATION);
+ break;
+ }
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__, stp_req, frame_index, status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_PIO_SETUP:
+ /* Get from the frame buffer the PIO Setup Data */
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
+ */
+
+ /* transfer_count: first 16bits in the 4th dword */
+ stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
+
+ /* ending_status: 4th byte in the 3rd dword */
+ stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
+
+ scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
+
+ /* The next state is dependent on whether the
+ * request was PIO Data-in or Data out
+ */
+ if (task->data_dir == DMA_FROM_DEVICE) {
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
+ } else if (task->data_dir == DMA_TO_DEVICE) {
+ /* Transmit data */
+ status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
+ }
+ break;
+
+ case FIS_SETDEVBITS:
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ case FIS_REGD2H:
+ if (frame_header->status & ATA_BUSY) {
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
+ */
+ dev_dbg(scic_to_dev(scic),
+ "%s: SCIC PIO Request 0x%p received "
+ "D2H Register FIS with BSY status "
+ "0x%x\n",
+ __func__,
+ stp_req,
+ frame_header->status);
+ break;
+ }
+
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ scic_sds_controller_copy_sata_response(&sci_req->stp.req,
+ frame_header,
+ frame_buffer);
+
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* FIXME: what do we do here? */
+ break;
+ }
+
+ /* Frame is decoded return it to the controller */
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
+ struct dev_to_host_fis *frame_header;
+ struct sata_fis_data *frame_buffer;
+
+ status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ if (frame_header->fis_type != FIS_DATA) {
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC PIO Request 0x%p received frame %d "
+ "with fis type 0x%02x when expecting a data "
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
+ frame_header->fis_type);
+
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+
+ /* Frame is decoded return it to the controller */
+ scic_sds_controller_release_frame(scic, frame_index);
+ return status;
+ }
+
+ if (stp_req->type.pio.request_current.sgl_pair == NULL) {
+ sci_req->saved_rx_frame_index = frame_index;
+ stp_req->type.pio.pio_transfer_bytes = 0;
+ } else {
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
+ (u8 *)frame_buffer);
+
+ /* Frame is decoded return it to the controller */
+ scic_sds_controller_release_frame(scic, frame_index);
+ }
+
+ /* Check for the end of the transfer, are there more
+ * bytes remaining for this data transfer
+ */
+ if (status != SCI_SUCCESS ||
+ stp_req->type.pio.pio_transfer_bytes != 0)
+ return status;
+
+ if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ } else {
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ return status;
+ }
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
+ frame_index,
+ (void **)&frame_header);
+ if (status != SCI_SUCCESS) {
+ dev_err(scic_to_dev(scic),
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ break;
+
+ default:
+ dev_warn(scic_to_dev(scic),
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n",
+ __func__,
+ stp_req,
+ frame_index);
+
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_UNEXP_FIS,
+ SCI_FAILURE_PROTOCOL_VIOLATION);
+ break;
+ }
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ scic_sds_controller_release_frame(scic, frame_index);
+
+ return status;
+ }
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
+ * aborting state?
+ */
+ scic_sds_controller_release_frame(scic, frame_index);
+ return SCI_SUCCESS;
+
+ default:
+ dev_warn(scic_to_dev(scic),
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
+
+ scic_sds_controller_release_frame(scic, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_stp_request_udma_complete_request(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ /* We must check ther response buffer to see if the D2H
+ * Register FIS was received before we got the TC
+ * completion.
+ */
+ if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
+ scic_sds_remote_device_suspend(sci_req->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+ scic_sds_stp_request_udma_complete_request(sci_req,
+ SCU_TASK_DONE_CHECK_RESPONSE,
+ SCI_FAILURE_IO_RESPONSE_VALID);
+ } else {
+ /* If we have an error completion status for the
+ * TC then we can expect a D2H register FIS from
+ * the device so we must change state to wait
+ * for it
+ */
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+ }
+ break;
+
+ /* TODO Check to see if any of these completion status need to
+ * wait for the device to host register fis.
+ */
+ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+ * - this comes only for B0
+ */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+ scic_sds_remote_device_suspend(sci_req->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ /* Fall through to the default case */
+ default:
+ /* All other completion status cause the IO to be complete. */
+ scic_sds_stp_request_udma_complete_request(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be complete. If
+ * a NAK was received, then it is up to the user to retry the
+ * request.
+ */
+ scic_sds_request_set_status(sci_req,
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
- int ret = 0;
- struct scic_sds_remote_device *sci_device;
- enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
- struct isci_remote_device *isci_device;
- struct isci_request *request;
- unsigned long flags;
+ enum sci_base_request_states state;
+ struct scic_sds_controller *scic = sci_req->owning_controller;
- isci_device = isci_dev_from_domain_dev(task->dev);
- sci_device = to_sci_dev(isci_device);
+ state = sci_req->sm.current_state_id;
- /* do common allocation and init of request object. */
- ret = isci_request_alloc_io(
- isci_host,
- task,
- &request,
- isci_device,
- gfp_flags
- );
+ switch (state) {
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(sci_req, completion_code);
- if (ret)
- goto out;
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(sci_req,
+ completion_code);
- status = isci_io_request_build(isci_host, request, isci_device);
- if (status == SCI_SUCCESS) {
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(sci_req,
+ completion_code);
- spin_lock_irqsave(&isci_host->scic_lock, flags);
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(sci_req, completion_code);
- /* send the request, let the core assign the IO TAG. */
- status = scic_controller_start_io(
- isci_host->core_controller,
- sci_device,
- request->sci_request_handle,
- SCI_CONTROLLER_INVALID_IO_TAG
- );
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(sci_req,
+ completion_code);
- if (status == SCI_SUCCESS ||
- status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(sci_req,
+ completion_code);
- /* Either I/O started OK, or the core has signaled that
- * the device needs a target reset.
- *
- * In either case, hold onto the I/O for later.
- *
- * Update it's status and add it to the list in the
- * remote device object.
- */
- isci_request_change_state(request, started);
- list_add(&request->dev_node,
- &isci_device->reqs_in_process);
-
- if (status ==
- SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
- /* Signal libsas that we need the SCSI error
- * handler thread to work on this I/O and that
- * we want a device reset.
- */
- isci_request_signal_device_reset(request);
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(sci_req,
+ completion_code);
- /* Change the status, since we are holding
- * the I/O until it is managed by the SCSI
- * error handler.
- */
- status = SCI_SUCCESS;
- }
- } else
- dev_warn(&isci_host->pdev->dev,
- "%s: failed request start\n",
- __func__);
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(sci_req, completion_code);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
+ completion_code);
- } else
- dev_warn(&isci_host->pdev->dev,
- "%s: request_construct failed - status = 0x%x\n",
- __func__,
- status);
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
+ completion_code);
- out:
- if (status != SCI_SUCCESS) {
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(sci_req,
+ completion_code);
- /* release dma memory on failure. */
- isci_request_free(isci_host, request);
- request = NULL;
- ret = SCI_FAILURE;
+ default:
+ dev_warn(scic_to_dev(scic),
+ "%s: SCIC IO Request given task completion "
+ "notification %x while in wrong state %d\n",
+ __func__,
+ completion_code,
+ state);
+ return SCI_FAILURE_INVALID_STATE;
}
-
- *isci_request = request;
- return ret;
}
-
/**
* isci_request_process_response_iu() - This function sets the status and
* response iu, in the task struct, from the request object for the upper
{
unsigned int cstatus;
- cstatus = scic_request_get_controller_status(
- request->sci_request_handle
- );
+ cstatus = request->sci.scu_status;
dev_dbg(&request->isci_host->pdev->dev,
"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAM_STAT_TASK_ABORTED;
- request->complete_in_target = false;
- *complete_to_host_ptr = isci_perform_error_io_completion;
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ request->complete_in_target = true;
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ } else {
+ request->complete_in_target = false;
+
+ *complete_to_host_ptr = isci_perform_error_io_completion;
+ }
break;
}
}
{
struct sas_task *task = isci_request_access_task(request);
- isci_task_set_completion_status(task, response, status,
- task_notification_selection);
-
+ task_notification_selection
+ = isci_task_set_completion_status(task, response, status,
+ task_notification_selection);
/* Tasks aborted specifically by a call to the lldd_abort_task
* function should not be completed to the host in the regular path.
/* Normal notification (task_done) */
dev_dbg(&host->pdev->dev,
- "%s: Normal - task = %p, response=%d, status=%d\n",
+ "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
__func__,
task,
- response,
- status);
+ task->task_status.resp, response,
+ task->task_status.stat, status);
/* Add to the completed list. */
list_add(&request->completed_node,
&host->requests_to_complete);
+
+ /* Take the request off the device's pending request list. */
+ list_del_init(&request->dev_node);
break;
case isci_perform_aborted_io_completion:
- /*
- * No notification because this request is already
- * in the abort path.
+ /* No notification to libsas because this request is
+ * already in the abort path.
*/
dev_warn(&host->pdev->dev,
- "%s: Aborted - task = %p, response=%d, status=%d\n",
+ "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
__func__,
task,
- response,
- status);
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Wake up whatever process was waiting for this
+ * request to complete.
+ */
+ WARN_ON(request->io_request_completion == NULL);
+
+ if (request->io_request_completion != NULL) {
+
+ /* Signal whoever is waiting that this
+ * request is complete.
+ */
+ complete(request->io_request_completion);
+ }
break;
case isci_perform_error_io_completion:
/* Use sas_task_abort */
dev_warn(&host->pdev->dev,
- "%s: Error - task = %p, response=%d, status=%d\n",
+ "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
__func__,
task,
- response,
- status);
+ task->task_status.resp, response,
+ task->task_status.stat, status);
/* Add to the aborted list. */
list_add(&request->completed_node,
- &host->requests_to_abort);
+ &host->requests_to_errorback);
break;
default:
dev_warn(&host->pdev->dev,
- "%s: Unknown - task = %p, response=%d, status=%d\n",
+ "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
__func__,
task,
- response,
- status);
+ task->task_status.resp, response,
+ task->task_status.stat, status);
- /* Add to the aborted list. */
+ /* Add to the error to libsas list. */
list_add(&request->completed_node,
- &host->requests_to_abort);
+ &host->requests_to_errorback);
break;
}
}
-/**
- * isci_request_io_request_complete() - This function is called by the sci core
- * when an io request completes.
- * @isci_host: This parameter specifies the ISCI host object
- * @request: This parameter is the completed isci_request object.
- * @completion_status: This parameter specifies the completion status from the
- * sci core.
- *
- * none.
- */
-void isci_request_io_request_complete(
- struct isci_host *isci_host,
- struct isci_request *request,
- enum sci_io_status completion_status)
+static void isci_request_io_request_complete(struct isci_host *isci_host,
+ struct isci_request *request,
+ enum sci_io_status completion_status)
{
struct sas_task *task = isci_request_access_task(request);
struct ssp_response_iu *resp_iu;
void *resp_buf;
unsigned long task_flags;
- unsigned long state_flags;
- struct completion *io_request_completion;
struct isci_remote_device *isci_device = request->isci_device;
enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK;
task->data_dir,
completion_status);
- spin_lock_irqsave(&request->state_lock, state_flags);
+ spin_lock(&request->state_lock);
request_status = isci_request_get_state(request);
- spin_unlock_irqrestore(&request->state_lock, state_flags);
/* Decode the request status. Note that if the request has been
* aborted by a task management function, we don't care
complete_to_host = isci_perform_aborted_io_completion;
/* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
break;
case aborting:
complete_to_host = isci_perform_aborted_io_completion;
/* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
break;
case terminating:
else
status = SAS_ABORTED_TASK;
- complete_to_host = isci_perform_normal_io_completion;
+ complete_to_host = isci_perform_aborted_io_completion;
/* This was a terminated request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case dead:
+ /* This was a terminated request that timed-out during the
+ * termination process. There is no task to complete to
+ * libsas.
+ */
+ complete_to_host = isci_perform_normal_io_completion;
+ spin_unlock(&request->state_lock);
break;
default:
+ /* The request is done from an SCU HW perspective. */
+ request->status = completed;
+
+ spin_unlock(&request->state_lock);
+
/* This is an active request being completed from the core. */
switch (completion_status) {
task);
if (sas_protocol_ata(task->task_proto)) {
- resp_buf
- = scic_stp_io_request_get_d2h_reg_address(
- request->sci_request_handle
- );
+ resp_buf = &request->sci.stp.rsp;
isci_request_process_stp_response(task,
- resp_buf
- );
-
+ resp_buf);
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
/* crack the iu response buffer. */
- resp_iu
- = scic_io_request_get_response_iu_address(
- request->sci_request_handle
- );
-
+ resp_iu = &request->sci.ssp.rsp;
isci_request_process_response_iu(task, resp_iu,
- &isci_host->pdev->dev
- );
+ &isci_host->pdev->dev);
} else if (SAS_PROTOCOL_SMP == task->task_proto) {
request->complete_in_target = true;
if (task->task_proto == SAS_PROTOCOL_SMP) {
-
- u8 *command_iu_address
- = scic_io_request_get_command_iu_address(
- request->sci_request_handle
- );
+ void *rsp = &request->sci.smp.rsp;
dev_dbg(&isci_host->pdev->dev,
"%s: SMP protocol completion\n",
sg_copy_from_buffer(
&task->smp_task.smp_resp, 1,
- command_iu_address
- + sizeof(struct smp_request),
- sizeof(struct smp_resp)
- );
+ rsp, sizeof(struct smp_resp));
} else if (completion_status
== SCI_IO_SUCCESS_IO_DONE_EARLY) {
* There is a possibility that less data than
* the maximum was transferred.
*/
- u32 transferred_length
- = scic_io_request_get_number_of_bytes_transferred(
- request->sci_request_handle);
+ u32 transferred_length = sci_req_tx_bytes(&request->sci);
task->task_status.residual
= task->total_xfer_len - transferred_length;
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
complete_to_host = isci_perform_error_io_completion;
request->complete_in_target = false;
break;
+ case SCI_FAILURE_RETRY_REQUIRED:
+
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if ((isci_device->status == isci_stopping) ||
+ (isci_device->status == isci_stopped))
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ request->complete_in_target = true;
+ break;
+
+
default:
/* Catch any otherwise unhandled error codes here. */
dev_warn(&isci_host->pdev->dev,
else
status = SAS_ABORTED_TASK;
- complete_to_host = isci_perform_error_io_completion;
- request->complete_in_target = false;
+ if (SAS_PROTOCOL_SMP == task->task_proto) {
+ request->complete_in_target = true;
+ complete_to_host = isci_perform_normal_io_completion;
+ } else {
+ request->complete_in_target = false;
+ complete_to_host = isci_perform_error_io_completion;
+ }
break;
}
break;
);
/* complete the io request to the core. */
- scic_controller_complete_io(
- isci_host->core_controller,
- to_sci_dev(isci_device),
- request->sci_request_handle
- );
- /* NULL the request handle so it cannot be completed or
+ scic_controller_complete_io(&isci_host->sci,
+ &isci_device->sci,
+ &request->sci);
+ /* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort
* task to recognize the already completed case.
*/
- request->sci_request_handle = NULL;
+ request->terminated = true;
+
+ isci_host_can_dequeue(isci_host, 1);
+}
+
+static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+ struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
+ struct sas_task *task;
+
+ /* XXX as hch said always creating an internal sas_task for tmf
+ * requests would simplify the driver
+ */
+ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
- /* Only remove the request from the remote device list
- * of pending requests if we have not requested error
- * handling on this request.
+ /* all unaccelerated request types (non ssp or ncq) handled with
+ * substates
*/
- if (complete_to_host != isci_perform_error_io_completion)
- list_del_init(&request->dev_node);
+ if (!task && dev->dev_type == SAS_END_DEV) {
+ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+ } else if (!task &&
+ (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+ isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+ } else if (task && sas_protocol_ata(task->task_proto) &&
+ !task->ata_task.use_ncq) {
+ u32 state;
+
+ if (task->data_dir == DMA_NONE)
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+ else if (task->ata_task.dma_xfer)
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+ else /* PIO */
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+ sci_change_state(sm, state);
+ }
+}
+
+static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+ struct scic_sds_controller *scic = sci_req->owning_controller;
+ struct isci_host *ihost = scic_to_ihost(scic);
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
+
+ /* Tell the SCI_USER that the IO request is complete */
+ if (sci_req->is_task_management_request == false)
+ isci_request_io_request_complete(ihost, ireq,
+ sci_req->sci_status);
+ else
+ isci_task_request_complete(ihost, ireq, sci_req->sci_status);
+}
+
+static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+
+ /* Setting the abort bit in the Task Context is required by the silicon. */
+ sci_req->task_context_buffer->abort = 1;
+}
+
+static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+
+ scic_sds_remote_device_set_working_request(sci_req->target_device,
+ sci_req);
+}
+
+static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+
+ scic_sds_remote_device_set_working_request(sci_req->target_device,
+ sci_req);
+}
+
+static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+
+ scic_sds_remote_device_set_working_request(sci_req->target_device,
+ sci_req);
+}
+
+static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
+ struct scu_task_context *task_context;
+ struct host_to_dev_fis *h2d_fis;
+ enum sci_status status;
+
+ /* Clear the SRST bit */
+ h2d_fis = &sci_req->stp.cmd;
+ h2d_fis->control = 0;
+
+ /* Clear the TC control bit */
+ task_context = scic_sds_controller_get_task_context_buffer(
+ sci_req->owning_controller, sci_req->io_tag);
+ task_context->control_frame = 0;
+
+ status = scic_controller_continue_io(sci_req);
+ WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state scic_sds_request_state_table[] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
+ .enter_state = scic_sds_request_started_state_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+ .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
+ .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+ .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+ .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
+ .enter_state = scic_sds_request_completed_state_enter,
+ },
+ [SCI_REQ_ABORTING] = {
+ .enter_state = scic_sds_request_aborting_state_enter,
+ },
+ [SCI_REQ_FINAL] = { },
+};
+
+static void
+scic_sds_general_request_construct(struct scic_sds_controller *scic,
+ struct scic_sds_remote_device *sci_dev,
+ u16 io_tag,
+ struct scic_sds_request *sci_req)
+{
+ sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT);
+
+ sci_req->io_tag = io_tag;
+ sci_req->owning_controller = scic;
+ sci_req->target_device = sci_dev;
+ sci_req->protocol = SCIC_NO_PROTOCOL;
+ sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+ sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
+
+ sci_req->sci_status = SCI_SUCCESS;
+ sci_req->scu_status = 0;
+ sci_req->post_context = 0xFFFFFFFF;
+
+ sci_req->is_task_management_request = false;
+
+ if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ sci_req->was_tag_assigned_by_user = false;
+ sci_req->task_context_buffer = &sci_req->tc;
+ } else {
+ sci_req->was_tag_assigned_by_user = true;
+
+ sci_req->task_context_buffer =
+ scic_sds_controller_get_task_context_buffer(scic, io_tag);
+ }
+}
+
+static enum sci_status
+scic_io_request_construct(struct scic_sds_controller *scic,
+ struct scic_sds_remote_device *sci_dev,
+ u16 io_tag, struct scic_sds_request *sci_req)
+{
+ struct domain_device *dev = sci_dev_to_domain(sci_dev);
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
+
+ if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+ if (dev->dev_type == SAS_END_DEV)
+ /* pass */;
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
+ else if (dev_is_expander(dev))
+ memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ memset(sci_req->task_context_buffer, 0,
+ offsetof(struct scu_task_context, sgl_pair_ab));
+
+ return status;
+}
+
+enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
+ struct scic_sds_remote_device *sci_dev,
+ u16 io_tag, struct scic_sds_request *sci_req)
+{
+ struct domain_device *dev = sci_dev_to_domain(sci_dev);
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
+
+ if (dev->dev_type == SAS_END_DEV ||
+ dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ sci_req->is_task_management_request = true;
+ memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
+ } else
+ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+ struct isci_request *request)
+{
+ enum sci_status status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+ status = scic_io_request_construct_basic_ssp(&request->sci);
+ return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(
+ struct isci_request *request)
+{
+ struct sas_task *task = isci_request_access_task(request);
+ enum sci_status status;
+ struct host_to_dev_fis *register_fis;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+
+ /* Get the host_to_dev_fis from the core and copy
+ * the fis from the task into it.
+ */
+ register_fis = isci_sata_task_to_fis_copy(task);
+
+ status = scic_io_request_construct_basic_sata(&request->sci);
+
+ /* Set the ncq tag in the fis, from the queue
+ * command in the task.
+ */
+ if (isci_sata_is_task_ncq(task)) {
+
+ isci_sata_set_ncq_tag(
+ register_fis,
+ task
+ );
+ }
+
+ return status;
+}
+
+/*
+ * This function will fill in the SCU Task Context for a SMP request. The
+ * following important settings are utilized: -# task_type ==
+ * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a smp request frame.
+ * @sci_req: This parameter specifies the smp request object being
+ * constructed.
+ *
+ */
+static void
+scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
+ ssize_t req_len)
+{
+ dma_addr_t dma_addr;
+ struct scic_sds_remote_device *sci_dev;
+ struct scic_sds_port *sci_port;
+ struct scu_task_context *task_context;
+ ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
+ /* byte swap the smp request. */
+ sci_swab32_cpy(&sci_req->smp.cmd, &sci_req->smp.cmd,
+ word_cnt);
- /* Save possible completion ptr. */
- io_request_completion = request->io_request_completion;
+ task_context = scic_sds_request_get_task_context(sci_req);
- if (io_request_completion) {
+ sci_dev = scic_sds_request_get_device(sci_req);
+ sci_port = scic_sds_request_get_port(sci_req);
+
+ /*
+ * Fill in the TC with the its required data
+ * 00h
+ */
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = sci_dev->connection_rate;
+ task_context->protocol_engine_index =
+ scic_sds_controller_get_protocol_engine_group(scic);
+ task_context->logical_port_index = scic_sds_port_get_index(sci_port);
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+ task_context->abort = 0;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ /* 04h */
+ task_context->remote_node_index = sci_dev->rnc.remote_node_index;
+ task_context->command_code = 0;
+ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+ /* 08h */
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 1;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ /* 0ch */
+ task_context->address_modifier = 0;
+
+ /* 10h */
+ task_context->ssp_command_iu_length = req_len;
+
+ /* 14h */
+ task_context->transfer_length_bytes = 0;
+
+ /*
+ * 18h ~ 30h, protocol specific
+ * since commandIU has been build by framework at this point, we just
+ * copy the frist DWord from command IU to this location. */
+ memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
+
+ /*
+ * 40h
+ * "For SMP you could program it to zero. We would prefer that way
+ * so that done code will be consistent." - Venki
+ */
+ task_context->task_phase = 0;
- /* This is inherantly a regular I/O request,
- * since we are currently in the regular
- * I/O completion callback function.
- * Signal whoever is waiting that this
- * request is complete.
+ if (sci_req->was_tag_assigned_by_user) {
+ /*
+ * Build the task context now since we have already read
+ * the data
+ */
+ sci_req->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(scic) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(sci_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ scic_sds_io_tag_get_index(sci_req->io_tag));
+ } else {
+ /*
+ * Build the task context now since we have already read
+ * the data.
+ * I/O tag index is not assigned because we have to wait
+ * until we get a TCi.
*/
- complete(io_request_completion);
+ sci_req->post_context =
+ (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (scic_sds_controller_get_protocol_engine_group(scic) <<
+ SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (scic_sds_port_get_index(sci_port) <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
}
- isci_host_can_dequeue(isci_host, 1);
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context command buffer should not contain command header.
+ */
+ dma_addr = scic_io_request_get_dma_addr(sci_req,
+ ((char *) &sci_req->smp.cmd) +
+ sizeof(u32));
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SMP response comes as UF, so no need to set response IU address. */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
}
-/**
- * isci_request_io_request_get_transfer_length() - This function is called by
- * the sci core to retrieve the transfer length for a given request.
- * @request: This parameter is the isci_request object.
+static enum sci_status
+scic_io_request_construct_smp(struct scic_sds_request *sci_req)
+{
+ struct smp_req *smp_req = &sci_req->smp.cmd;
+
+ sci_req->protocol = SCIC_SMP_PROTOCOL;
+
+ /*
+ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+ * functions under SAS 2.0, a zero request length really indicates
+ * a non-zero default length.
+ */
+ if (smp_req->req_len == 0) {
+ switch (smp_req->func) {
+ case SMP_DISCOVER:
+ case SMP_REPORT_PHY_ERR_LOG:
+ case SMP_REPORT_PHY_SATA:
+ case SMP_REPORT_ROUTE_INFO:
+ smp_req->req_len = 2;
+ break;
+ case SMP_CONF_ROUTE_INFO:
+ case SMP_PHY_CONTROL:
+ case SMP_PHY_TEST_FUNCTION:
+ smp_req->req_len = 9;
+ break;
+ /* Default - zero is a valid default for 2.0. */
+ }
+ }
+
+ scu_smp_request_construct_task_context(sci_req, smp_req->req_len);
+
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ * request construct function.
*
- * length of transfer for specified request.
+ * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-u32 isci_request_io_request_get_transfer_length(struct isci_request *request)
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
{
- struct sas_task *task = isci_request_access_task(request);
+ enum sci_status status = SCI_FAILURE;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scic_sds_request *sci_req = &ireq->sci;
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: total_xfer_len: %d\n",
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: request = %p\n", __func__, ireq);
+
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: smp_req len = %d\n",
__func__,
- task->total_xfer_len);
- return task->total_xfer_len;
-}
+ task->smp_task.smp_req.length);
+ /* copy the smp_command to the address; */
+ sg_copy_to_buffer(&task->smp_task.smp_req, 1,
+ &sci_req->smp.cmd,
+ sizeof(struct smp_req));
-/**
- * isci_request_io_request_get_data_direction() - This function is called by
- * the sci core to retrieve the data direction for a given request.
- * @request: This parameter is the isci_request object.
- *
- * data direction for specified request.
- */
-enum dma_data_direction isci_request_io_request_get_data_direction(
- struct isci_request *request)
-{
- struct sas_task *task = isci_request_access_task(request);
+ status = scic_io_request_construct_smp(sci_req);
+ if (status != SCI_SUCCESS)
+ dev_warn(&ireq->isci_host->pdev->dev,
+ "%s: failed with status = %d\n",
+ __func__,
+ status);
- return task->data_dir;
+ return status;
}
/**
- * isci_request_sge_get_address_field() - This function is called by the sci
- * core to retrieve the address field contents for a given sge.
- * @request: This parameter is the isci_request object.
- * @sge_address: This parameter is the sge.
+ * isci_io_request_build() - This function builds the io request object.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ * request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ * object that is the destination for this request.
*
- * physical address in the specified sge.
+ * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-dma_addr_t isci_request_sge_get_address_field(
+static enum sci_status isci_io_request_build(
+ struct isci_host *isci_host,
struct isci_request *request,
- void *sge_address)
+ struct isci_remote_device *isci_device)
{
+ enum sci_status status = SCI_SUCCESS;
struct sas_task *task = isci_request_access_task(request);
- dma_addr_t ret;
- struct isci_host *isci_host = isci_host_from_sas_ha(
- task->dev->port->ha);
+ struct scic_sds_remote_device *sci_device = &isci_device->sci;
dev_dbg(&isci_host->pdev->dev,
- "%s: request = %p, sge_address = %p\n",
+ "%s: isci_device = 0x%p; request = %p, "
+ "num_scatter = %d\n",
__func__,
+ isci_device,
request,
- sge_address);
-
- if (task->data_dir == PCI_DMA_NONE)
- return 0;
+ task->num_scatter);
- /* the case where num_scatter == 0 is special, in that
- * task->scatter is the actual buffer address, not an sgl.
- * so a map single is required here.
+ /* map the sgl addresses, if present.
+ * libata does the mapping for sata devices
+ * before we get the request.
*/
- if ((task->num_scatter == 0) &&
- !sas_protocol_ata(task->task_proto)) {
- ret = dma_map_single(
+ if (task->num_scatter &&
+ !sas_protocol_ata(task->task_proto) &&
+ !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+ request->num_sg_entries = dma_map_sg(
&isci_host->pdev->dev,
task->scatter,
- task->total_xfer_len,
+ task->num_scatter,
task->data_dir
);
- request->zero_scatter_daddr = ret;
- } else
- ret = sg_dma_address(((struct scatterlist *)sge_address));
- dev_dbg(&isci_host->pdev->dev,
- "%s: bus address = %lx\n",
- __func__,
- (unsigned long)ret);
+ if (request->num_sg_entries == 0)
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ }
- return ret;
-}
+ /* build the common request object. For now,
+ * we will let the core allocate the IO tag.
+ */
+ status = scic_io_request_construct(&isci_host->sci, sci_device,
+ SCI_CONTROLLER_INVALID_IO_TAG,
+ &request->sci);
+
+ if (status != SCI_SUCCESS) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: failed request construct\n",
+ __func__);
+ return SCI_FAILURE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ status = isci_smp_request_build(request);
+ break;
+ case SAS_PROTOCOL_SSP:
+ status = isci_request_ssp_request_construct(request);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ status = isci_request_stp_request_construct(request);
+ break;
+ default:
+ dev_warn(&isci_host->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+}
/**
- * isci_request_sge_get_length_field() - This function is called by the sci
- * core to retrieve the length field contents for a given sge.
- * @request: This parameter is the isci_request object.
- * @sge_address: This parameter is the sge.
+ * isci_request_alloc_core() - This function gets the request object from the
+ * isci_host dma cache.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_request: This parameter will contain the pointer to the new
+ * isci_request object.
+ * @isci_device: This parameter is the pointer to the isci remote device object
+ * that is the destination for this request.
+ * @gfp_flags: This parameter specifies the os allocation flags.
*
- * length field value in the specified sge.
+ * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-u32 isci_request_sge_get_length_field(
- struct isci_request *request,
- void *sge_address)
+static int isci_request_alloc_core(
+ struct isci_host *isci_host,
+ struct isci_request **isci_request,
+ struct isci_remote_device *isci_device,
+ gfp_t gfp_flags)
{
- struct sas_task *task = isci_request_access_task(request);
- int ret;
-
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request = %p, sge_address = %p\n",
- __func__,
- request,
- sge_address);
+ int ret = 0;
+ dma_addr_t handle;
+ struct isci_request *request;
- if (task->data_dir == PCI_DMA_NONE)
- return 0;
- /* the case where num_scatter == 0 is special, in that
- * task->scatter is the actual buffer address, not an sgl.
- * so we return total_xfer_len here.
+ /* get pointer to dma memory. This actually points
+ * to both the isci_remote_device object and the
+ * sci object. The isci object is at the beginning
+ * of the memory allocated here.
*/
- if (task->num_scatter == 0)
- ret = task->total_xfer_len;
- else
- ret = sg_dma_len((struct scatterlist *)sge_address);
+ request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
+ if (!request) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: dma_pool_alloc returned NULL\n", __func__);
+ return -ENOMEM;
+ }
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: len = %d\n",
- __func__,
- ret);
+ /* initialize the request object. */
+ spin_lock_init(&request->state_lock);
+ request->request_daddr = handle;
+ request->isci_host = isci_host;
+ request->isci_device = isci_device;
+ request->io_request_completion = NULL;
+ request->terminated = false;
+
+ request->num_sg_entries = 0;
+
+ request->complete_in_target = false;
+
+ INIT_LIST_HEAD(&request->completed_node);
+ INIT_LIST_HEAD(&request->dev_node);
+
+ *isci_request = request;
+ isci_request_change_state(request, allocated);
return ret;
}
-
-/**
- * isci_request_ssp_io_request_get_cdb_address() - This function is called by
- * the sci core to retrieve the cdb address for a given request.
- * @request: This parameter is the isci_request object.
- *
- * cdb address for specified request.
- */
-void *isci_request_ssp_io_request_get_cdb_address(
- struct isci_request *request)
+static int isci_request_alloc_io(
+ struct isci_host *isci_host,
+ struct sas_task *task,
+ struct isci_request **isci_request,
+ struct isci_remote_device *isci_device,
+ gfp_t gfp_flags)
{
- struct sas_task *task = isci_request_access_task(request);
+ int retval = isci_request_alloc_core(isci_host, isci_request,
+ isci_device, gfp_flags);
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request->task->ssp_task.cdb = %p\n",
- __func__,
- task->ssp_task.cdb);
- return task->ssp_task.cdb;
-}
+ if (!retval) {
+ (*isci_request)->ttype_ptr.io_task_ptr = task;
+ (*isci_request)->ttype = io_task;
+ task->lldd_task = *isci_request;
+ }
+ return retval;
+}
/**
- * isci_request_ssp_io_request_get_cdb_length() - This function is called by
- * the sci core to retrieve the cdb length for a given request.
- * @request: This parameter is the isci_request object.
+ * isci_request_alloc_tmf() - This function gets the request object from the
+ * isci_host dma cache and initializes the relevant fields as a sas_task.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @isci_request: This parameter will contain the pointer to the new
+ * isci_request object.
+ * @isci_device: This parameter is the pointer to the isci remote device object
+ * that is the destination for this request.
+ * @gfp_flags: This parameter specifies the os allocation flags.
*
- * cdb length for specified request.
+ * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-u32 isci_request_ssp_io_request_get_cdb_length(
- struct isci_request *request)
+int isci_request_alloc_tmf(
+ struct isci_host *isci_host,
+ struct isci_tmf *isci_tmf,
+ struct isci_request **isci_request,
+ struct isci_remote_device *isci_device,
+ gfp_t gfp_flags)
{
- return 16;
-}
+ int retval = isci_request_alloc_core(isci_host, isci_request,
+ isci_device, gfp_flags);
+
+ if (!retval) {
+ (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
+ (*isci_request)->ttype = tmf_task;
+ }
+ return retval;
+}
/**
- * isci_request_ssp_io_request_get_lun() - This function is called by the sci
- * core to retrieve the lun for a given request.
- * @request: This parameter is the isci_request object.
+ * isci_request_execute() - This function allocates the isci_request object,
+ * all fills in some common fields.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @isci_request: This parameter will contain the pointer to the new
+ * isci_request object.
+ * @gfp_flags: This parameter specifies the os allocation flags.
*
- * lun for specified request.
+ * SCI_SUCCESS on successfull completion, or specific failure code.
*/
-u32 isci_request_ssp_io_request_get_lun(
- struct isci_request *request)
+int isci_request_execute(
+ struct isci_host *isci_host,
+ struct sas_task *task,
+ struct isci_request **isci_request,
+ gfp_t gfp_flags)
{
- struct sas_task *task = isci_request_access_task(request);
-
-#ifdef DEBUG
- int i;
-
- for (i = 0; i < 8; i++)
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: task->ssp_task.LUN[%d] = %x\n",
- __func__, i, task->ssp_task.LUN[i]);
+ int ret = 0;
+ struct scic_sds_remote_device *sci_device;
+ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+ struct isci_remote_device *isci_device;
+ struct isci_request *request;
+ unsigned long flags;
-#endif
+ isci_device = task->dev->lldd_dev;
+ sci_device = &isci_device->sci;
- return task->ssp_task.LUN[0];
-}
+ /* do common allocation and init of request object. */
+ ret = isci_request_alloc_io(
+ isci_host,
+ task,
+ &request,
+ isci_device,
+ gfp_flags
+ );
+ if (ret)
+ goto out;
-/**
- * isci_request_ssp_io_request_get_task_attribute() - This function is called
- * by the sci core to retrieve the task attribute for a given request.
- * @request: This parameter is the isci_request object.
- *
- * task attribute for specified request.
- */
-u32 isci_request_ssp_io_request_get_task_attribute(
- struct isci_request *request)
-{
- struct sas_task *task = isci_request_access_task(request);
+ status = isci_io_request_build(isci_host, request, isci_device);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ goto out;
+ }
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request->task->ssp_task.task_attr = %x\n",
- __func__,
- task->ssp_task.task_attr);
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
- return task->ssp_task.task_attr;
-}
+ /* send the request, let the core assign the IO TAG. */
+ status = scic_controller_start_io(&isci_host->sci, sci_device,
+ &request->sci,
+ SCI_CONTROLLER_INVALID_IO_TAG);
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ goto out;
+ }
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ *
+ * In either case, hold onto the I/O for later.
+ *
+ * Update it's status and add it to the list in the
+ * remote device object.
+ */
+ list_add(&request->dev_node, &isci_device->reqs_in_process);
-/**
- * isci_request_ssp_io_request_get_command_priority() - This function is called
- * by the sci core to retrieve the command priority for a given request.
- * @request: This parameter is the isci_request object.
- *
- * command priority for specified request.
- */
-u32 isci_request_ssp_io_request_get_command_priority(
- struct isci_request *request)
-{
- struct sas_task *task = isci_request_access_task(request);
+ if (status == SCI_SUCCESS) {
+ /* Save the tag for possible task mgmt later. */
+ request->io_tag = request->sci.io_tag;
+ isci_request_change_state(request, started);
+ } else {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ request->terminated = true;
+ isci_request_change_state(request, completed);
+ }
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ isci_execpath_callback(isci_host, task,
+ sas_task_abort);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
- dev_dbg(&request->isci_host->pdev->dev,
- "%s: request->task->ssp_task.task_prio = %x\n",
- __func__,
- task->ssp_task.task_prio);
+ out:
+ if (status != SCI_SUCCESS) {
+ /* release dma memory on failure. */
+ isci_request_free(isci_host, request);
+ request = NULL;
+ ret = SCI_FAILURE;
+ }
- return task->ssp_task.task_prio;
+ *isci_request = request;
+ return ret;
}