1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "scu_completion_codes.h"
60 #include "scu_event_codes.h"
61 #include "sas.h"
62 
to_sgl_element_pair(struct isci_request * ireq,int idx)63 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 							int idx)
65 {
66 	if (idx == 0)
67 		return &ireq->tc->sgl_pair_ab;
68 	else if (idx == 1)
69 		return &ireq->tc->sgl_pair_cd;
70 	else if (idx < 0)
71 		return NULL;
72 	else
73 		return &ireq->sg_table[idx - 2];
74 }
75 
to_sgl_element_pair_dma(struct isci_host * ihost,struct isci_request * ireq,u32 idx)76 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77 					  struct isci_request *ireq, u32 idx)
78 {
79 	u32 offset;
80 
81 	if (idx == 0) {
82 		offset = (void *) &ireq->tc->sgl_pair_ab -
83 			 (void *) &ihost->task_context_table[0];
84 		return ihost->task_context_dma + offset;
85 	} else if (idx == 1) {
86 		offset = (void *) &ireq->tc->sgl_pair_cd -
87 			 (void *) &ihost->task_context_table[0];
88 		return ihost->task_context_dma + offset;
89 	}
90 
91 	return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92 }
93 
init_sgl_element(struct scu_sgl_element * e,struct scatterlist * sg)94 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
95 {
96 	e->length = sg_dma_len(sg);
97 	e->address_upper = upper_32_bits(sg_dma_address(sg));
98 	e->address_lower = lower_32_bits(sg_dma_address(sg));
99 	e->address_modifier = 0;
100 }
101 
sci_request_build_sgl(struct isci_request * ireq)102 static void sci_request_build_sgl(struct isci_request *ireq)
103 {
104 	struct isci_host *ihost = ireq->isci_host;
105 	struct sas_task *task = isci_request_access_task(ireq);
106 	struct scatterlist *sg = NULL;
107 	dma_addr_t dma_addr;
108 	u32 sg_idx = 0;
109 	struct scu_sgl_element_pair *scu_sg   = NULL;
110 	struct scu_sgl_element_pair *prev_sg  = NULL;
111 
112 	if (task->num_scatter > 0) {
113 		sg = task->scatter;
114 
115 		while (sg) {
116 			scu_sg = to_sgl_element_pair(ireq, sg_idx);
117 			init_sgl_element(&scu_sg->A, sg);
118 			sg = sg_next(sg);
119 			if (sg) {
120 				init_sgl_element(&scu_sg->B, sg);
121 				sg = sg_next(sg);
122 			} else
123 				memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124 
125 			if (prev_sg) {
126 				dma_addr = to_sgl_element_pair_dma(ihost,
127 								   ireq,
128 								   sg_idx);
129 
130 				prev_sg->next_pair_upper =
131 					upper_32_bits(dma_addr);
132 				prev_sg->next_pair_lower =
133 					lower_32_bits(dma_addr);
134 			}
135 
136 			prev_sg = scu_sg;
137 			sg_idx++;
138 		}
139 	} else {	/* handle when no sg */
140 		scu_sg = to_sgl_element_pair(ireq, sg_idx);
141 
142 		dma_addr = dma_map_single(&ihost->pdev->dev,
143 					  task->scatter,
144 					  task->total_xfer_len,
145 					  task->data_dir);
146 
147 		ireq->zero_scatter_daddr = dma_addr;
148 
149 		scu_sg->A.length = task->total_xfer_len;
150 		scu_sg->A.address_upper = upper_32_bits(dma_addr);
151 		scu_sg->A.address_lower = lower_32_bits(dma_addr);
152 	}
153 
154 	if (scu_sg) {
155 		scu_sg->next_pair_upper = 0;
156 		scu_sg->next_pair_lower = 0;
157 	}
158 }
159 
sci_io_request_build_ssp_command_iu(struct isci_request * ireq)160 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161 {
162 	struct ssp_cmd_iu *cmd_iu;
163 	struct sas_task *task = isci_request_access_task(ireq);
164 
165 	cmd_iu = &ireq->ssp.cmd;
166 
167 	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
168 	cmd_iu->add_cdb_len = 0;
169 	cmd_iu->_r_a = 0;
170 	cmd_iu->_r_b = 0;
171 	cmd_iu->en_fburst = 0; /* unsupported */
172 	cmd_iu->task_prio = task->ssp_task.task_prio;
173 	cmd_iu->task_attr = task->ssp_task.task_attr;
174 	cmd_iu->_r_c = 0;
175 
176 	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
177 		       sizeof(task->ssp_task.cdb) / sizeof(u32));
178 }
179 
sci_task_request_build_ssp_task_iu(struct isci_request * ireq)180 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181 {
182 	struct ssp_task_iu *task_iu;
183 	struct sas_task *task = isci_request_access_task(ireq);
184 	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185 
186 	task_iu = &ireq->ssp.tmf;
187 
188 	memset(task_iu, 0, sizeof(struct ssp_task_iu));
189 
190 	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
191 
192 	task_iu->task_func = isci_tmf->tmf_code;
193 	task_iu->task_tag =
194 		(test_bit(IREQ_TMF, &ireq->flags)) ?
195 		isci_tmf->io_tag :
196 		SCI_CONTROLLER_INVALID_IO_TAG;
197 }
198 
199 /**
200  * This method is will fill in the SCU Task Context for any type of SSP request.
201  * @sci_req:
202  * @task_context:
203  *
204  */
scu_ssp_reqeust_construct_task_context(struct isci_request * ireq,struct scu_task_context * task_context)205 static void scu_ssp_reqeust_construct_task_context(
206 	struct isci_request *ireq,
207 	struct scu_task_context *task_context)
208 {
209 	dma_addr_t dma_addr;
210 	struct isci_remote_device *idev;
211 	struct isci_port *iport;
212 
213 	idev = ireq->target_device;
214 	iport = idev->owning_port;
215 
216 	/* Fill in the TC with the its required data */
217 	task_context->abort = 0;
218 	task_context->priority = 0;
219 	task_context->initiator_request = 1;
220 	task_context->connection_rate = idev->connection_rate;
221 	task_context->protocol_engine_index = ISCI_PEG;
222 	task_context->logical_port_index = iport->physical_port_index;
223 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
224 	task_context->valid = SCU_TASK_CONTEXT_VALID;
225 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
226 
227 	task_context->remote_node_index = idev->rnc.remote_node_index;
228 	task_context->command_code = 0;
229 
230 	task_context->link_layer_control = 0;
231 	task_context->do_not_dma_ssp_good_response = 1;
232 	task_context->strict_ordering = 0;
233 	task_context->control_frame = 0;
234 	task_context->timeout_enable = 0;
235 	task_context->block_guard_enable = 0;
236 
237 	task_context->address_modifier = 0;
238 
239 	/* task_context->type.ssp.tag = ireq->io_tag; */
240 	task_context->task_phase = 0x01;
241 
242 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
243 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
244 			      (iport->physical_port_index <<
245 			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
246 			      ISCI_TAG_TCI(ireq->io_tag));
247 
248 	/*
249 	 * Copy the physical address for the command buffer to the
250 	 * SCU Task Context
251 	 */
252 	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253 
254 	task_context->command_iu_upper = upper_32_bits(dma_addr);
255 	task_context->command_iu_lower = lower_32_bits(dma_addr);
256 
257 	/*
258 	 * Copy the physical address for the response buffer to the
259 	 * SCU Task Context
260 	 */
261 	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262 
263 	task_context->response_iu_upper = upper_32_bits(dma_addr);
264 	task_context->response_iu_lower = lower_32_bits(dma_addr);
265 }
266 
267 /**
268  * This method is will fill in the SCU Task Context for a SSP IO request.
269  * @sci_req:
270  *
271  */
scu_ssp_io_request_construct_task_context(struct isci_request * ireq,enum dma_data_direction dir,u32 len)272 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273 						      enum dma_data_direction dir,
274 						      u32 len)
275 {
276 	struct scu_task_context *task_context = ireq->tc;
277 
278 	scu_ssp_reqeust_construct_task_context(ireq, task_context);
279 
280 	task_context->ssp_command_iu_length =
281 		sizeof(struct ssp_cmd_iu) / sizeof(u32);
282 	task_context->type.ssp.frame_type = SSP_COMMAND;
283 
284 	switch (dir) {
285 	case DMA_FROM_DEVICE:
286 	case DMA_NONE:
287 	default:
288 		task_context->task_type = SCU_TASK_TYPE_IOREAD;
289 		break;
290 	case DMA_TO_DEVICE:
291 		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292 		break;
293 	}
294 
295 	task_context->transfer_length_bytes = len;
296 
297 	if (task_context->transfer_length_bytes > 0)
298 		sci_request_build_sgl(ireq);
299 }
300 
301 /**
302  * This method will fill in the SCU Task Context for a SSP Task request.  The
303  *    following important settings are utilized: -# priority ==
304  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
305  *    ahead of other task destined for the same Remote Node. -# task_type ==
306  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
307  *    (i.e. non-raw frame) is being utilized to perform task management. -#
308  *    control_frame == 1.  This ensures that the proper endianess is set so
309  *    that the bytes are transmitted in the right order for a task frame.
310  * @sci_req: This parameter specifies the task request object being
311  *    constructed.
312  *
313  */
scu_ssp_task_request_construct_task_context(struct isci_request * ireq)314 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315 {
316 	struct scu_task_context *task_context = ireq->tc;
317 
318 	scu_ssp_reqeust_construct_task_context(ireq, task_context);
319 
320 	task_context->control_frame                = 1;
321 	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
322 	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
323 	task_context->transfer_length_bytes        = 0;
324 	task_context->type.ssp.frame_type          = SSP_TASK;
325 	task_context->ssp_command_iu_length =
326 		sizeof(struct ssp_task_iu) / sizeof(u32);
327 }
328 
329 /**
330  * This method is will fill in the SCU Task Context for any type of SATA
331  *    request.  This is called from the various SATA constructors.
332  * @sci_req: The general IO request object which is to be used in
333  *    constructing the SCU task context.
334  * @task_context: The buffer pointer for the SCU task context which is being
335  *    constructed.
336  *
337  * The general io request construction is complete. The buffer assignment for
338  * the command buffer is complete. none Revisit task context construction to
339  * determine what is common for SSP/SMP/STP task context structures.
340  */
scu_sata_reqeust_construct_task_context(struct isci_request * ireq,struct scu_task_context * task_context)341 static void scu_sata_reqeust_construct_task_context(
342 	struct isci_request *ireq,
343 	struct scu_task_context *task_context)
344 {
345 	dma_addr_t dma_addr;
346 	struct isci_remote_device *idev;
347 	struct isci_port *iport;
348 
349 	idev = ireq->target_device;
350 	iport = idev->owning_port;
351 
352 	/* Fill in the TC with the its required data */
353 	task_context->abort = 0;
354 	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
355 	task_context->initiator_request = 1;
356 	task_context->connection_rate = idev->connection_rate;
357 	task_context->protocol_engine_index = ISCI_PEG;
358 	task_context->logical_port_index = iport->physical_port_index;
359 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
360 	task_context->valid = SCU_TASK_CONTEXT_VALID;
361 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
362 
363 	task_context->remote_node_index = idev->rnc.remote_node_index;
364 	task_context->command_code = 0;
365 
366 	task_context->link_layer_control = 0;
367 	task_context->do_not_dma_ssp_good_response = 1;
368 	task_context->strict_ordering = 0;
369 	task_context->control_frame = 0;
370 	task_context->timeout_enable = 0;
371 	task_context->block_guard_enable = 0;
372 
373 	task_context->address_modifier = 0;
374 	task_context->task_phase = 0x01;
375 
376 	task_context->ssp_command_iu_length =
377 		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
378 
379 	/* Set the first word of the H2D REG FIS */
380 	task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381 
382 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
383 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
384 			      (iport->physical_port_index <<
385 			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
386 			      ISCI_TAG_TCI(ireq->io_tag));
387 	/*
388 	 * Copy the physical address for the command buffer to the SCU Task
389 	 * Context. We must offset the command buffer by 4 bytes because the
390 	 * first 4 bytes are transfered in the body of the TC.
391 	 */
392 	dma_addr = sci_io_request_get_dma_addr(ireq,
393 						((char *) &ireq->stp.cmd) +
394 						sizeof(u32));
395 
396 	task_context->command_iu_upper = upper_32_bits(dma_addr);
397 	task_context->command_iu_lower = lower_32_bits(dma_addr);
398 
399 	/* SATA Requests do not have a response buffer */
400 	task_context->response_iu_upper = 0;
401 	task_context->response_iu_lower = 0;
402 }
403 
scu_stp_raw_request_construct_task_context(struct isci_request * ireq)404 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405 {
406 	struct scu_task_context *task_context = ireq->tc;
407 
408 	scu_sata_reqeust_construct_task_context(ireq, task_context);
409 
410 	task_context->control_frame         = 0;
411 	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
412 	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
413 	task_context->type.stp.fis_type     = FIS_REGH2D;
414 	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
415 }
416 
sci_stp_pio_request_construct(struct isci_request * ireq,bool copy_rx_frame)417 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418 							  bool copy_rx_frame)
419 {
420 	struct isci_stp_request *stp_req = &ireq->stp.req;
421 
422 	scu_stp_raw_request_construct_task_context(ireq);
423 
424 	stp_req->status = 0;
425 	stp_req->sgl.offset = 0;
426 	stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427 
428 	if (copy_rx_frame) {
429 		sci_request_build_sgl(ireq);
430 		stp_req->sgl.index = 0;
431 	} else {
432 		/* The user does not want the data copied to the SGL buffer location */
433 		stp_req->sgl.index = -1;
434 	}
435 
436 	return SCI_SUCCESS;
437 }
438 
439 /**
440  *
441  * @sci_req: This parameter specifies the request to be constructed as an
442  *    optimized request.
443  * @optimized_task_type: This parameter specifies whether the request is to be
444  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
445  *    value of 1 indicates NCQ.
446  *
447  * This method will perform request construction common to all types of STP
448  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
449  * returns an indication as to whether the construction was successful.
450  */
sci_stp_optimized_request_construct(struct isci_request * ireq,u8 optimized_task_type,u32 len,enum dma_data_direction dir)451 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452 						     u8 optimized_task_type,
453 						     u32 len,
454 						     enum dma_data_direction dir)
455 {
456 	struct scu_task_context *task_context = ireq->tc;
457 
458 	/* Build the STP task context structure */
459 	scu_sata_reqeust_construct_task_context(ireq, task_context);
460 
461 	/* Copy over the SGL elements */
462 	sci_request_build_sgl(ireq);
463 
464 	/* Copy over the number of bytes to be transfered */
465 	task_context->transfer_length_bytes = len;
466 
467 	if (dir == DMA_TO_DEVICE) {
468 		/*
469 		 * The difference between the DMA IN and DMA OUT request task type
470 		 * values are consistent with the difference between FPDMA READ
471 		 * and FPDMA WRITE values.  Add the supplied task type parameter
472 		 * to this difference to set the task type properly for this
473 		 * DATA OUT (WRITE) case. */
474 		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
475 								 - SCU_TASK_TYPE_DMA_IN);
476 	} else {
477 		/*
478 		 * For the DATA IN (READ) case, simply save the supplied
479 		 * optimized task type. */
480 		task_context->task_type = optimized_task_type;
481 	}
482 }
483 
sci_atapi_construct(struct isci_request * ireq)484 static void sci_atapi_construct(struct isci_request *ireq)
485 {
486 	struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
487 	struct sas_task *task;
488 
489 	/* To simplify the implementation we take advantage of the
490 	 * silicon's partial acceleration of atapi protocol (dma data
491 	 * transfers), so we promote all commands to dma protocol.  This
492 	 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
493 	 */
494 	h2d_fis->features |= ATAPI_PKT_DMA;
495 
496 	scu_stp_raw_request_construct_task_context(ireq);
497 
498 	task = isci_request_access_task(ireq);
499 	if (task->data_dir == DMA_NONE)
500 		task->total_xfer_len = 0;
501 
502 	/* clear the response so we can detect arrivial of an
503 	 * unsolicited h2d fis
504 	 */
505 	ireq->stp.rsp.fis_type = 0;
506 }
507 
508 static enum sci_status
sci_io_request_construct_sata(struct isci_request * ireq,u32 len,enum dma_data_direction dir,bool copy)509 sci_io_request_construct_sata(struct isci_request *ireq,
510 			       u32 len,
511 			       enum dma_data_direction dir,
512 			       bool copy)
513 {
514 	enum sci_status status = SCI_SUCCESS;
515 	struct sas_task *task = isci_request_access_task(ireq);
516 	struct domain_device *dev = ireq->target_device->domain_dev;
517 
518 	/* check for management protocols */
519 	if (test_bit(IREQ_TMF, &ireq->flags)) {
520 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
521 
522 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
523 		    tmf->tmf_code == isci_tmf_sata_srst_low) {
524 			scu_stp_raw_request_construct_task_context(ireq);
525 			return SCI_SUCCESS;
526 		} else {
527 			dev_err(&ireq->owning_controller->pdev->dev,
528 				"%s: Request 0x%p received un-handled SAT "
529 				"management protocol 0x%x.\n",
530 				__func__, ireq, tmf->tmf_code);
531 
532 			return SCI_FAILURE;
533 		}
534 	}
535 
536 	if (!sas_protocol_ata(task->task_proto)) {
537 		dev_err(&ireq->owning_controller->pdev->dev,
538 			"%s: Non-ATA protocol in SATA path: 0x%x\n",
539 			__func__,
540 			task->task_proto);
541 		return SCI_FAILURE;
542 
543 	}
544 
545 	/* ATAPI */
546 	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
547 	    task->ata_task.fis.command == ATA_CMD_PACKET) {
548 		sci_atapi_construct(ireq);
549 		return SCI_SUCCESS;
550 	}
551 
552 	/* non data */
553 	if (task->data_dir == DMA_NONE) {
554 		scu_stp_raw_request_construct_task_context(ireq);
555 		return SCI_SUCCESS;
556 	}
557 
558 	/* NCQ */
559 	if (task->ata_task.use_ncq) {
560 		sci_stp_optimized_request_construct(ireq,
561 							 SCU_TASK_TYPE_FPDMAQ_READ,
562 							 len, dir);
563 		return SCI_SUCCESS;
564 	}
565 
566 	/* DMA */
567 	if (task->ata_task.dma_xfer) {
568 		sci_stp_optimized_request_construct(ireq,
569 							 SCU_TASK_TYPE_DMA_IN,
570 							 len, dir);
571 		return SCI_SUCCESS;
572 	} else /* PIO */
573 		return sci_stp_pio_request_construct(ireq, copy);
574 
575 	return status;
576 }
577 
sci_io_request_construct_basic_ssp(struct isci_request * ireq)578 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
579 {
580 	struct sas_task *task = isci_request_access_task(ireq);
581 
582 	ireq->protocol = SCIC_SSP_PROTOCOL;
583 
584 	scu_ssp_io_request_construct_task_context(ireq,
585 						  task->data_dir,
586 						  task->total_xfer_len);
587 
588 	sci_io_request_build_ssp_command_iu(ireq);
589 
590 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
591 
592 	return SCI_SUCCESS;
593 }
594 
sci_task_request_construct_ssp(struct isci_request * ireq)595 enum sci_status sci_task_request_construct_ssp(
596 	struct isci_request *ireq)
597 {
598 	/* Construct the SSP Task SCU Task Context */
599 	scu_ssp_task_request_construct_task_context(ireq);
600 
601 	/* Fill in the SSP Task IU */
602 	sci_task_request_build_ssp_task_iu(ireq);
603 
604 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
605 
606 	return SCI_SUCCESS;
607 }
608 
sci_io_request_construct_basic_sata(struct isci_request * ireq)609 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
610 {
611 	enum sci_status status;
612 	bool copy = false;
613 	struct sas_task *task = isci_request_access_task(ireq);
614 
615 	ireq->protocol = SCIC_STP_PROTOCOL;
616 
617 	copy = (task->data_dir == DMA_NONE) ? false : true;
618 
619 	status = sci_io_request_construct_sata(ireq,
620 						task->total_xfer_len,
621 						task->data_dir,
622 						copy);
623 
624 	if (status == SCI_SUCCESS)
625 		sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
626 
627 	return status;
628 }
629 
sci_task_request_construct_sata(struct isci_request * ireq)630 enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
631 {
632 	enum sci_status status = SCI_SUCCESS;
633 
634 	/* check for management protocols */
635 	if (test_bit(IREQ_TMF, &ireq->flags)) {
636 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
637 
638 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
639 		    tmf->tmf_code == isci_tmf_sata_srst_low) {
640 			scu_stp_raw_request_construct_task_context(ireq);
641 		} else {
642 			dev_err(&ireq->owning_controller->pdev->dev,
643 				"%s: Request 0x%p received un-handled SAT "
644 				"Protocol 0x%x.\n",
645 				__func__, ireq, tmf->tmf_code);
646 
647 			return SCI_FAILURE;
648 		}
649 	}
650 
651 	if (status != SCI_SUCCESS)
652 		return status;
653 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
654 
655 	return status;
656 }
657 
658 /**
659  * sci_req_tx_bytes - bytes transferred when reply underruns request
660  * @ireq: request that was terminated early
661  */
662 #define SCU_TASK_CONTEXT_SRAM 0x200000
sci_req_tx_bytes(struct isci_request * ireq)663 static u32 sci_req_tx_bytes(struct isci_request *ireq)
664 {
665 	struct isci_host *ihost = ireq->owning_controller;
666 	u32 ret_val = 0;
667 
668 	if (readl(&ihost->smu_registers->address_modifier) == 0) {
669 		void __iomem *scu_reg_base = ihost->scu_registers;
670 
671 		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
672 		 *   BAR1 is the scu_registers
673 		 *   0x20002C = 0x200000 + 0x2c
674 		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
675 		 *   TCi is the io_tag of struct sci_request
676 		 */
677 		ret_val = readl(scu_reg_base +
678 				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
679 				((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
680 	}
681 
682 	return ret_val;
683 }
684 
sci_request_start(struct isci_request * ireq)685 enum sci_status sci_request_start(struct isci_request *ireq)
686 {
687 	enum sci_base_request_states state;
688 	struct scu_task_context *tc = ireq->tc;
689 	struct isci_host *ihost = ireq->owning_controller;
690 
691 	state = ireq->sm.current_state_id;
692 	if (state != SCI_REQ_CONSTRUCTED) {
693 		dev_warn(&ihost->pdev->dev,
694 			"%s: SCIC IO Request requested to start while in wrong "
695 			 "state %d\n", __func__, state);
696 		return SCI_FAILURE_INVALID_STATE;
697 	}
698 
699 	tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
700 
701 	switch (tc->protocol_type) {
702 	case SCU_TASK_CONTEXT_PROTOCOL_SMP:
703 	case SCU_TASK_CONTEXT_PROTOCOL_SSP:
704 		/* SSP/SMP Frame */
705 		tc->type.ssp.tag = ireq->io_tag;
706 		tc->type.ssp.target_port_transfer_tag = 0xFFFF;
707 		break;
708 
709 	case SCU_TASK_CONTEXT_PROTOCOL_STP:
710 		/* STP/SATA Frame
711 		 * tc->type.stp.ncq_tag = ireq->ncq_tag;
712 		 */
713 		break;
714 
715 	case SCU_TASK_CONTEXT_PROTOCOL_NONE:
716 		/* / @todo When do we set no protocol type? */
717 		break;
718 
719 	default:
720 		/* This should never happen since we build the IO
721 		 * requests */
722 		break;
723 	}
724 
725 	/* Add to the post_context the io tag value */
726 	ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
727 
728 	/* Everything is good go ahead and change state */
729 	sci_change_state(&ireq->sm, SCI_REQ_STARTED);
730 
731 	return SCI_SUCCESS;
732 }
733 
734 enum sci_status
sci_io_request_terminate(struct isci_request * ireq)735 sci_io_request_terminate(struct isci_request *ireq)
736 {
737 	enum sci_base_request_states state;
738 
739 	state = ireq->sm.current_state_id;
740 
741 	switch (state) {
742 	case SCI_REQ_CONSTRUCTED:
743 		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
744 		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
745 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
746 		return SCI_SUCCESS;
747 	case SCI_REQ_STARTED:
748 	case SCI_REQ_TASK_WAIT_TC_COMP:
749 	case SCI_REQ_SMP_WAIT_RESP:
750 	case SCI_REQ_SMP_WAIT_TC_COMP:
751 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
752 	case SCI_REQ_STP_UDMA_WAIT_D2H:
753 	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
754 	case SCI_REQ_STP_NON_DATA_WAIT_D2H:
755 	case SCI_REQ_STP_PIO_WAIT_H2D:
756 	case SCI_REQ_STP_PIO_WAIT_FRAME:
757 	case SCI_REQ_STP_PIO_DATA_IN:
758 	case SCI_REQ_STP_PIO_DATA_OUT:
759 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
760 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
761 	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
762 	case SCI_REQ_ATAPI_WAIT_H2D:
763 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
764 	case SCI_REQ_ATAPI_WAIT_D2H:
765 	case SCI_REQ_ATAPI_WAIT_TC_COMP:
766 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
767 		return SCI_SUCCESS;
768 	case SCI_REQ_TASK_WAIT_TC_RESP:
769 		/* The task frame was already confirmed to have been
770 		 * sent by the SCU HW.  Since the state machine is
771 		 * now only waiting for the task response itself,
772 		 * abort the request and complete it immediately
773 		 * and don't wait for the task response.
774 		 */
775 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
776 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
777 		return SCI_SUCCESS;
778 	case SCI_REQ_ABORTING:
779 		/* If a request has a termination requested twice, return
780 		 * a failure indication, since HW confirmation of the first
781 		 * abort is still outstanding.
782 		 */
783 	case SCI_REQ_COMPLETED:
784 	default:
785 		dev_warn(&ireq->owning_controller->pdev->dev,
786 			 "%s: SCIC IO Request requested to abort while in wrong "
787 			 "state %d\n",
788 			 __func__,
789 			 ireq->sm.current_state_id);
790 		break;
791 	}
792 
793 	return SCI_FAILURE_INVALID_STATE;
794 }
795 
sci_request_complete(struct isci_request * ireq)796 enum sci_status sci_request_complete(struct isci_request *ireq)
797 {
798 	enum sci_base_request_states state;
799 	struct isci_host *ihost = ireq->owning_controller;
800 
801 	state = ireq->sm.current_state_id;
802 	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
803 		      "isci: request completion from wrong state (%d)\n", state))
804 		return SCI_FAILURE_INVALID_STATE;
805 
806 	if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
807 		sci_controller_release_frame(ihost,
808 						  ireq->saved_rx_frame_index);
809 
810 	/* XXX can we just stop the machine and remove the 'final' state? */
811 	sci_change_state(&ireq->sm, SCI_REQ_FINAL);
812 	return SCI_SUCCESS;
813 }
814 
sci_io_request_event_handler(struct isci_request * ireq,u32 event_code)815 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
816 						  u32 event_code)
817 {
818 	enum sci_base_request_states state;
819 	struct isci_host *ihost = ireq->owning_controller;
820 
821 	state = ireq->sm.current_state_id;
822 
823 	if (state != SCI_REQ_STP_PIO_DATA_IN) {
824 		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
825 			 __func__, event_code, state);
826 
827 		return SCI_FAILURE_INVALID_STATE;
828 	}
829 
830 	switch (scu_get_event_specifier(event_code)) {
831 	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
832 		/* We are waiting for data and the SCU has R_ERR the data frame.
833 		 * Go back to waiting for the D2H Register FIS
834 		 */
835 		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
836 		return SCI_SUCCESS;
837 	default:
838 		dev_err(&ihost->pdev->dev,
839 			"%s: pio request unexpected event %#x\n",
840 			__func__, event_code);
841 
842 		/* TODO Should we fail the PIO request when we get an
843 		 * unexpected event?
844 		 */
845 		return SCI_FAILURE;
846 	}
847 }
848 
849 /*
850  * This function copies response data for requests returning response data
851  *    instead of sense data.
852  * @sci_req: This parameter specifies the request object for which to copy
853  *    the response data.
854  */
sci_io_request_copy_response(struct isci_request * ireq)855 static void sci_io_request_copy_response(struct isci_request *ireq)
856 {
857 	void *resp_buf;
858 	u32 len;
859 	struct ssp_response_iu *ssp_response;
860 	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
861 
862 	ssp_response = &ireq->ssp.rsp;
863 
864 	resp_buf = &isci_tmf->resp.resp_iu;
865 
866 	len = min_t(u32,
867 		    SSP_RESP_IU_MAX_SIZE,
868 		    be32_to_cpu(ssp_response->response_data_len));
869 
870 	memcpy(resp_buf, ssp_response->resp_data, len);
871 }
872 
873 static enum sci_status
request_started_state_tc_event(struct isci_request * ireq,u32 completion_code)874 request_started_state_tc_event(struct isci_request *ireq,
875 			       u32 completion_code)
876 {
877 	struct ssp_response_iu *resp_iu;
878 	u8 datapres;
879 
880 	/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
881 	 * to determine SDMA status
882 	 */
883 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
884 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
885 		ireq->scu_status = SCU_TASK_DONE_GOOD;
886 		ireq->sci_status = SCI_SUCCESS;
887 		break;
888 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
889 		/* There are times when the SCU hardware will return an early
890 		 * response because the io request specified more data than is
891 		 * returned by the target device (mode pages, inquiry data,
892 		 * etc.).  We must check the response stats to see if this is
893 		 * truly a failed request or a good request that just got
894 		 * completed early.
895 		 */
896 		struct ssp_response_iu *resp = &ireq->ssp.rsp;
897 		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
898 
899 		sci_swab32_cpy(&ireq->ssp.rsp,
900 			       &ireq->ssp.rsp,
901 			       word_cnt);
902 
903 		if (resp->status == 0) {
904 			ireq->scu_status = SCU_TASK_DONE_GOOD;
905 			ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
906 		} else {
907 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
908 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
909 		}
910 		break;
911 	}
912 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
913 		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
914 
915 		sci_swab32_cpy(&ireq->ssp.rsp,
916 			       &ireq->ssp.rsp,
917 			       word_cnt);
918 
919 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
920 		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
921 		break;
922 	}
923 
924 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
925 		/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
926 		 * guaranteed to be received before this completion status is
927 		 * posted?
928 		 */
929 		resp_iu = &ireq->ssp.rsp;
930 		datapres = resp_iu->datapres;
931 
932 		if (datapres == 1 || datapres == 2) {
933 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
934 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
935 		} else {
936 			ireq->scu_status = SCU_TASK_DONE_GOOD;
937 			ireq->sci_status = SCI_SUCCESS;
938 		}
939 		break;
940 	/* only stp device gets suspended. */
941 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
942 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
943 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
944 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
945 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
946 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
947 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
948 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
949 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
950 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
951 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
952 		if (ireq->protocol == SCIC_STP_PROTOCOL) {
953 			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
954 					   SCU_COMPLETION_TL_STATUS_SHIFT;
955 			ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
956 		} else {
957 			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
958 					   SCU_COMPLETION_TL_STATUS_SHIFT;
959 			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
960 		}
961 		break;
962 
963 	/* both stp/ssp device gets suspended */
964 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
965 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
966 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
967 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
968 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
969 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
970 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
971 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
972 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
973 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
974 		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
975 				   SCU_COMPLETION_TL_STATUS_SHIFT;
976 		ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
977 		break;
978 
979 	/* neither ssp nor stp gets suspended. */
980 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
981 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
982 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
983 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
984 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
985 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
986 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
987 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
988 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
989 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
990 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
991 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
992 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
993 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
994 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
995 	default:
996 		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
997 				   SCU_COMPLETION_TL_STATUS_SHIFT;
998 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
999 		break;
1000 	}
1001 
1002 	/*
1003 	 * TODO: This is probably wrong for ACK/NAK timeout conditions
1004 	 */
1005 
1006 	/* In all cases we will treat this as the completion of the IO req. */
1007 	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1008 	return SCI_SUCCESS;
1009 }
1010 
1011 static enum sci_status
request_aborting_state_tc_event(struct isci_request * ireq,u32 completion_code)1012 request_aborting_state_tc_event(struct isci_request *ireq,
1013 				u32 completion_code)
1014 {
1015 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1016 	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1017 	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1018 		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1019 		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1020 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1021 		break;
1022 
1023 	default:
1024 		/* Unless we get some strange error wait for the task abort to complete
1025 		 * TODO: Should there be a state change for this completion?
1026 		 */
1027 		break;
1028 	}
1029 
1030 	return SCI_SUCCESS;
1031 }
1032 
ssp_task_request_await_tc_event(struct isci_request * ireq,u32 completion_code)1033 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1034 						       u32 completion_code)
1035 {
1036 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1037 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1038 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1039 		ireq->sci_status = SCI_SUCCESS;
1040 		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1041 		break;
1042 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1043 		/* Currently, the decision is to simply allow the task request
1044 		 * to timeout if the task IU wasn't received successfully.
1045 		 * There is a potential for receiving multiple task responses if
1046 		 * we decide to send the task IU again.
1047 		 */
1048 		dev_warn(&ireq->owning_controller->pdev->dev,
1049 			 "%s: TaskRequest:0x%p CompletionCode:%x - "
1050 			 "ACK/NAK timeout\n", __func__, ireq,
1051 			 completion_code);
1052 
1053 		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1054 		break;
1055 	default:
1056 		/*
1057 		 * All other completion status cause the IO to be complete.
1058 		 * If a NAK was received, then it is up to the user to retry
1059 		 * the request.
1060 		 */
1061 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1062 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1063 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064 		break;
1065 	}
1066 
1067 	return SCI_SUCCESS;
1068 }
1069 
1070 static enum sci_status
smp_request_await_response_tc_event(struct isci_request * ireq,u32 completion_code)1071 smp_request_await_response_tc_event(struct isci_request *ireq,
1072 				    u32 completion_code)
1073 {
1074 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1075 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1076 		/* In the AWAIT RESPONSE state, any TC completion is
1077 		 * unexpected.  but if the TC has success status, we
1078 		 * complete the IO anyway.
1079 		 */
1080 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1081 		ireq->sci_status = SCI_SUCCESS;
1082 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1083 		break;
1084 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1085 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1086 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1087 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1088 		/* These status has been seen in a specific LSI
1089 		 * expander, which sometimes is not able to send smp
1090 		 * response within 2 ms. This causes our hardware break
1091 		 * the connection and set TC completion with one of
1092 		 * these SMP_XXX_XX_ERR status. For these type of error,
1093 		 * we ask ihost user to retry the request.
1094 		 */
1095 		ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1096 		ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1097 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1098 		break;
1099 	default:
1100 		/* All other completion status cause the IO to be complete.  If a NAK
1101 		 * was received, then it is up to the user to retry the request
1102 		 */
1103 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1104 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1105 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1106 		break;
1107 	}
1108 
1109 	return SCI_SUCCESS;
1110 }
1111 
1112 static enum sci_status
smp_request_await_tc_event(struct isci_request * ireq,u32 completion_code)1113 smp_request_await_tc_event(struct isci_request *ireq,
1114 			   u32 completion_code)
1115 {
1116 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1117 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1118 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1119 		ireq->sci_status = SCI_SUCCESS;
1120 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1121 		break;
1122 	default:
1123 		/* All other completion status cause the IO to be
1124 		 * complete.  If a NAK was received, then it is up to
1125 		 * the user to retry the request.
1126 		 */
1127 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1128 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1129 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1130 		break;
1131 	}
1132 
1133 	return SCI_SUCCESS;
1134 }
1135 
pio_sgl_next(struct isci_stp_request * stp_req)1136 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1137 {
1138 	struct scu_sgl_element *sgl;
1139 	struct scu_sgl_element_pair *sgl_pair;
1140 	struct isci_request *ireq = to_ireq(stp_req);
1141 	struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1142 
1143 	sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1144 	if (!sgl_pair)
1145 		sgl = NULL;
1146 	else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1147 		if (sgl_pair->B.address_lower == 0 &&
1148 		    sgl_pair->B.address_upper == 0) {
1149 			sgl = NULL;
1150 		} else {
1151 			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1152 			sgl = &sgl_pair->B;
1153 		}
1154 	} else {
1155 		if (sgl_pair->next_pair_lower == 0 &&
1156 		    sgl_pair->next_pair_upper == 0) {
1157 			sgl = NULL;
1158 		} else {
1159 			pio_sgl->index++;
1160 			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1161 			sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1162 			sgl = &sgl_pair->A;
1163 		}
1164 	}
1165 
1166 	return sgl;
1167 }
1168 
1169 static enum sci_status
stp_request_non_data_await_h2d_tc_event(struct isci_request * ireq,u32 completion_code)1170 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1171 					u32 completion_code)
1172 {
1173 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1174 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1175 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1176 		ireq->sci_status = SCI_SUCCESS;
1177 		sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1178 		break;
1179 
1180 	default:
1181 		/* All other completion status cause the IO to be
1182 		 * complete.  If a NAK was received, then it is up to
1183 		 * the user to retry the request.
1184 		 */
1185 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1186 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1187 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1188 		break;
1189 	}
1190 
1191 	return SCI_SUCCESS;
1192 }
1193 
1194 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1195 
1196 /* transmit DATA_FIS from (current sgl + offset) for input
1197  * parameter length. current sgl and offset is alreay stored in the IO request
1198  */
sci_stp_request_pio_data_out_trasmit_data_frame(struct isci_request * ireq,u32 length)1199 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1200 	struct isci_request *ireq,
1201 	u32 length)
1202 {
1203 	struct isci_stp_request *stp_req = &ireq->stp.req;
1204 	struct scu_task_context *task_context = ireq->tc;
1205 	struct scu_sgl_element_pair *sgl_pair;
1206 	struct scu_sgl_element *current_sgl;
1207 
1208 	/* Recycle the TC and reconstruct it for sending out DATA FIS containing
1209 	 * for the data from current_sgl+offset for the input length
1210 	 */
1211 	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1212 	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1213 		current_sgl = &sgl_pair->A;
1214 	else
1215 		current_sgl = &sgl_pair->B;
1216 
1217 	/* update the TC */
1218 	task_context->command_iu_upper = current_sgl->address_upper;
1219 	task_context->command_iu_lower = current_sgl->address_lower;
1220 	task_context->transfer_length_bytes = length;
1221 	task_context->type.stp.fis_type = FIS_DATA;
1222 
1223 	/* send the new TC out. */
1224 	return sci_controller_continue_io(ireq);
1225 }
1226 
sci_stp_request_pio_data_out_transmit_data(struct isci_request * ireq)1227 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1228 {
1229 	struct isci_stp_request *stp_req = &ireq->stp.req;
1230 	struct scu_sgl_element_pair *sgl_pair;
1231 	enum sci_status status = SCI_SUCCESS;
1232 	struct scu_sgl_element *sgl;
1233 	u32 offset;
1234 	u32 len = 0;
1235 
1236 	offset = stp_req->sgl.offset;
1237 	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1238 	if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1239 		return SCI_FAILURE;
1240 
1241 	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1242 		sgl = &sgl_pair->A;
1243 		len = sgl_pair->A.length - offset;
1244 	} else {
1245 		sgl = &sgl_pair->B;
1246 		len = sgl_pair->B.length - offset;
1247 	}
1248 
1249 	if (stp_req->pio_len == 0)
1250 		return SCI_SUCCESS;
1251 
1252 	if (stp_req->pio_len >= len) {
1253 		status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1254 		if (status != SCI_SUCCESS)
1255 			return status;
1256 		stp_req->pio_len -= len;
1257 
1258 		/* update the current sgl, offset and save for future */
1259 		sgl = pio_sgl_next(stp_req);
1260 		offset = 0;
1261 	} else if (stp_req->pio_len < len) {
1262 		sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1263 
1264 		/* Sgl offset will be adjusted and saved for future */
1265 		offset += stp_req->pio_len;
1266 		sgl->address_lower += stp_req->pio_len;
1267 		stp_req->pio_len = 0;
1268 	}
1269 
1270 	stp_req->sgl.offset = offset;
1271 
1272 	return status;
1273 }
1274 
1275 /**
1276  *
1277  * @stp_request: The request that is used for the SGL processing.
1278  * @data_buffer: The buffer of data to be copied.
1279  * @length: The length of the data transfer.
1280  *
1281  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1282  * specified data region. enum sci_status
1283  */
1284 static enum sci_status
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request * stp_req,u8 * data_buf,u32 len)1285 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1286 					     u8 *data_buf, u32 len)
1287 {
1288 	struct isci_request *ireq;
1289 	u8 *src_addr;
1290 	int copy_len;
1291 	struct sas_task *task;
1292 	struct scatterlist *sg;
1293 	void *kaddr;
1294 	int total_len = len;
1295 
1296 	ireq = to_ireq(stp_req);
1297 	task = isci_request_access_task(ireq);
1298 	src_addr = data_buf;
1299 
1300 	if (task->num_scatter > 0) {
1301 		sg = task->scatter;
1302 
1303 		while (total_len > 0) {
1304 			struct page *page = sg_page(sg);
1305 
1306 			copy_len = min_t(int, total_len, sg_dma_len(sg));
1307 			kaddr = kmap_atomic(page, KM_IRQ0);
1308 			memcpy(kaddr + sg->offset, src_addr, copy_len);
1309 			kunmap_atomic(kaddr, KM_IRQ0);
1310 			total_len -= copy_len;
1311 			src_addr += copy_len;
1312 			sg = sg_next(sg);
1313 		}
1314 	} else {
1315 		BUG_ON(task->total_xfer_len < total_len);
1316 		memcpy(task->scatter, src_addr, total_len);
1317 	}
1318 
1319 	return SCI_SUCCESS;
1320 }
1321 
1322 /**
1323  *
1324  * @sci_req: The PIO DATA IN request that is to receive the data.
1325  * @data_buffer: The buffer to copy from.
1326  *
1327  * Copy the data buffer to the io request data region. enum sci_status
1328  */
sci_stp_request_pio_data_in_copy_data(struct isci_stp_request * stp_req,u8 * data_buffer)1329 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1330 	struct isci_stp_request *stp_req,
1331 	u8 *data_buffer)
1332 {
1333 	enum sci_status status;
1334 
1335 	/*
1336 	 * If there is less than 1K remaining in the transfer request
1337 	 * copy just the data for the transfer */
1338 	if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1339 		status = sci_stp_request_pio_data_in_copy_data_buffer(
1340 			stp_req, data_buffer, stp_req->pio_len);
1341 
1342 		if (status == SCI_SUCCESS)
1343 			stp_req->pio_len = 0;
1344 	} else {
1345 		/* We are transfering the whole frame so copy */
1346 		status = sci_stp_request_pio_data_in_copy_data_buffer(
1347 			stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1348 
1349 		if (status == SCI_SUCCESS)
1350 			stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1351 	}
1352 
1353 	return status;
1354 }
1355 
1356 static enum sci_status
stp_request_pio_await_h2d_completion_tc_event(struct isci_request * ireq,u32 completion_code)1357 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1358 					      u32 completion_code)
1359 {
1360 	enum sci_status status = SCI_SUCCESS;
1361 
1362 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1363 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1364 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1365 		ireq->sci_status = SCI_SUCCESS;
1366 		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1367 		break;
1368 
1369 	default:
1370 		/* All other completion status cause the IO to be
1371 		 * complete.  If a NAK was received, then it is up to
1372 		 * the user to retry the request.
1373 		 */
1374 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1375 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1376 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1377 		break;
1378 	}
1379 
1380 	return status;
1381 }
1382 
1383 static enum sci_status
pio_data_out_tx_done_tc_event(struct isci_request * ireq,u32 completion_code)1384 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1385 			      u32 completion_code)
1386 {
1387 	enum sci_status status = SCI_SUCCESS;
1388 	bool all_frames_transferred = false;
1389 	struct isci_stp_request *stp_req = &ireq->stp.req;
1390 
1391 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1392 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1393 		/* Transmit data */
1394 		if (stp_req->pio_len != 0) {
1395 			status = sci_stp_request_pio_data_out_transmit_data(ireq);
1396 			if (status == SCI_SUCCESS) {
1397 				if (stp_req->pio_len == 0)
1398 					all_frames_transferred = true;
1399 			}
1400 		} else if (stp_req->pio_len == 0) {
1401 			/*
1402 			 * this will happen if the all data is written at the
1403 			 * first time after the pio setup fis is received
1404 			 */
1405 			all_frames_transferred  = true;
1406 		}
1407 
1408 		/* all data transferred. */
1409 		if (all_frames_transferred) {
1410 			/*
1411 			 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1412 			 * and wait for PIO_SETUP fis / or D2H REg fis. */
1413 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1414 		}
1415 		break;
1416 
1417 	default:
1418 		/*
1419 		 * All other completion status cause the IO to be complete.
1420 		 * If a NAK was received, then it is up to the user to retry
1421 		 * the request.
1422 		 */
1423 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1424 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1425 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1426 		break;
1427 	}
1428 
1429 	return status;
1430 }
1431 
sci_stp_request_udma_general_frame_handler(struct isci_request * ireq,u32 frame_index)1432 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1433 								       u32 frame_index)
1434 {
1435 	struct isci_host *ihost = ireq->owning_controller;
1436 	struct dev_to_host_fis *frame_header;
1437 	enum sci_status status;
1438 	u32 *frame_buffer;
1439 
1440 	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1441 							       frame_index,
1442 							       (void **)&frame_header);
1443 
1444 	if ((status == SCI_SUCCESS) &&
1445 	    (frame_header->fis_type == FIS_REGD2H)) {
1446 		sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1447 							      frame_index,
1448 							      (void **)&frame_buffer);
1449 
1450 		sci_controller_copy_sata_response(&ireq->stp.rsp,
1451 						       frame_header,
1452 						       frame_buffer);
1453 	}
1454 
1455 	sci_controller_release_frame(ihost, frame_index);
1456 
1457 	return status;
1458 }
1459 
process_unsolicited_fis(struct isci_request * ireq,u32 frame_index)1460 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1461 					       u32 frame_index)
1462 {
1463 	struct isci_host *ihost = ireq->owning_controller;
1464 	enum sci_status status;
1465 	struct dev_to_host_fis *frame_header;
1466 	u32 *frame_buffer;
1467 
1468 	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1469 							  frame_index,
1470 							  (void **)&frame_header);
1471 
1472 	if (status != SCI_SUCCESS)
1473 		return status;
1474 
1475 	if (frame_header->fis_type != FIS_REGD2H) {
1476 		dev_err(&ireq->isci_host->pdev->dev,
1477 			"%s ERROR: invalid fis type 0x%X\n",
1478 			__func__, frame_header->fis_type);
1479 		return SCI_FAILURE;
1480 	}
1481 
1482 	sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1483 						 frame_index,
1484 						 (void **)&frame_buffer);
1485 
1486 	sci_controller_copy_sata_response(&ireq->stp.rsp,
1487 					  (u32 *)frame_header,
1488 					  frame_buffer);
1489 
1490 	/* Frame has been decoded return it to the controller */
1491 	sci_controller_release_frame(ihost, frame_index);
1492 
1493 	return status;
1494 }
1495 
atapi_d2h_reg_frame_handler(struct isci_request * ireq,u32 frame_index)1496 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1497 						   u32 frame_index)
1498 {
1499 	struct sas_task *task = isci_request_access_task(ireq);
1500 	enum sci_status status;
1501 
1502 	status = process_unsolicited_fis(ireq, frame_index);
1503 
1504 	if (status == SCI_SUCCESS) {
1505 		if (ireq->stp.rsp.status & ATA_ERR)
1506 			status = SCI_IO_FAILURE_RESPONSE_VALID;
1507 	} else {
1508 		status = SCI_IO_FAILURE_RESPONSE_VALID;
1509 	}
1510 
1511 	if (status != SCI_SUCCESS) {
1512 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1513 		ireq->sci_status = status;
1514 	} else {
1515 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1516 		ireq->sci_status = SCI_SUCCESS;
1517 	}
1518 
1519 	/* the d2h ufi is the end of non-data commands */
1520 	if (task->data_dir == DMA_NONE)
1521 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1522 
1523 	return status;
1524 }
1525 
scu_atapi_reconstruct_raw_frame_task_context(struct isci_request * ireq)1526 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1527 {
1528 	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1529 	void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1530 	struct scu_task_context *task_context = ireq->tc;
1531 
1532 	/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1533 	 * type. The TC for previous Packet fis was already there, we only need to
1534 	 * change the H2D fis content.
1535 	 */
1536 	memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1537 	memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1538 	memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1539 	task_context->type.stp.fis_type = FIS_DATA;
1540 	task_context->transfer_length_bytes = dev->cdb_len;
1541 }
1542 
scu_atapi_construct_task_context(struct isci_request * ireq)1543 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1544 {
1545 	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1546 	struct sas_task *task = isci_request_access_task(ireq);
1547 	struct scu_task_context *task_context = ireq->tc;
1548 	int cdb_len = dev->cdb_len;
1549 
1550 	/* reference: SSTL 1.13.4.2
1551 	 * task_type, sata_direction
1552 	 */
1553 	if (task->data_dir == DMA_TO_DEVICE) {
1554 		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1555 		task_context->sata_direction = 0;
1556 	} else {
1557 		/* todo: for NO_DATA command, we need to send out raw frame. */
1558 		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1559 		task_context->sata_direction = 1;
1560 	}
1561 
1562 	memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1563 	task_context->type.stp.fis_type = FIS_DATA;
1564 
1565 	memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1566 	memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1567 	task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1568 
1569 	/* task phase is set to TX_CMD */
1570 	task_context->task_phase = 0x1;
1571 
1572 	/* retry counter */
1573 	task_context->stp_retry_count = 0;
1574 
1575 	/* data transfer size. */
1576 	task_context->transfer_length_bytes = task->total_xfer_len;
1577 
1578 	/* setup sgl */
1579 	sci_request_build_sgl(ireq);
1580 }
1581 
1582 enum sci_status
sci_io_request_frame_handler(struct isci_request * ireq,u32 frame_index)1583 sci_io_request_frame_handler(struct isci_request *ireq,
1584 				  u32 frame_index)
1585 {
1586 	struct isci_host *ihost = ireq->owning_controller;
1587 	struct isci_stp_request *stp_req = &ireq->stp.req;
1588 	enum sci_base_request_states state;
1589 	enum sci_status status;
1590 	ssize_t word_cnt;
1591 
1592 	state = ireq->sm.current_state_id;
1593 	switch (state)  {
1594 	case SCI_REQ_STARTED: {
1595 		struct ssp_frame_hdr ssp_hdr;
1596 		void *frame_header;
1597 
1598 		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1599 							      frame_index,
1600 							      &frame_header);
1601 
1602 		word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1603 		sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1604 
1605 		if (ssp_hdr.frame_type == SSP_RESPONSE) {
1606 			struct ssp_response_iu *resp_iu;
1607 			ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1608 
1609 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1610 								      frame_index,
1611 								      (void **)&resp_iu);
1612 
1613 			sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1614 
1615 			resp_iu = &ireq->ssp.rsp;
1616 
1617 			if (resp_iu->datapres == 0x01 ||
1618 			    resp_iu->datapres == 0x02) {
1619 				ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1620 				ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1621 			} else {
1622 				ireq->scu_status = SCU_TASK_DONE_GOOD;
1623 				ireq->sci_status = SCI_SUCCESS;
1624 			}
1625 		} else {
1626 			/* not a response frame, why did it get forwarded? */
1627 			dev_err(&ihost->pdev->dev,
1628 				"%s: SCIC IO Request 0x%p received unexpected "
1629 				"frame %d type 0x%02x\n", __func__, ireq,
1630 				frame_index, ssp_hdr.frame_type);
1631 		}
1632 
1633 		/*
1634 		 * In any case we are done with this frame buffer return it to
1635 		 * the controller
1636 		 */
1637 		sci_controller_release_frame(ihost, frame_index);
1638 
1639 		return SCI_SUCCESS;
1640 	}
1641 
1642 	case SCI_REQ_TASK_WAIT_TC_RESP:
1643 		sci_io_request_copy_response(ireq);
1644 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1645 		sci_controller_release_frame(ihost, frame_index);
1646 		return SCI_SUCCESS;
1647 
1648 	case SCI_REQ_SMP_WAIT_RESP: {
1649 		struct sas_task *task = isci_request_access_task(ireq);
1650 		struct scatterlist *sg = &task->smp_task.smp_resp;
1651 		void *frame_header, *kaddr;
1652 		u8 *rsp;
1653 
1654 		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1655 							 frame_index,
1656 							 &frame_header);
1657 		kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
1658 		rsp = kaddr + sg->offset;
1659 		sci_swab32_cpy(rsp, frame_header, 1);
1660 
1661 		if (rsp[0] == SMP_RESPONSE) {
1662 			void *smp_resp;
1663 
1664 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1665 								 frame_index,
1666 								 &smp_resp);
1667 
1668 			word_cnt = (sg->length/4)-1;
1669 			if (word_cnt > 0)
1670 				word_cnt = min_t(unsigned int, word_cnt,
1671 						 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1672 			sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1673 
1674 			ireq->scu_status = SCU_TASK_DONE_GOOD;
1675 			ireq->sci_status = SCI_SUCCESS;
1676 			sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1677 		} else {
1678 			/*
1679 			 * This was not a response frame why did it get
1680 			 * forwarded?
1681 			 */
1682 			dev_err(&ihost->pdev->dev,
1683 				"%s: SCIC SMP Request 0x%p received unexpected "
1684 				"frame %d type 0x%02x\n",
1685 				__func__,
1686 				ireq,
1687 				frame_index,
1688 				rsp[0]);
1689 
1690 			ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1691 			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1692 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 		}
1694 		kunmap_atomic(kaddr, KM_IRQ0);
1695 
1696 		sci_controller_release_frame(ihost, frame_index);
1697 
1698 		return SCI_SUCCESS;
1699 	}
1700 
1701 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1702 		return sci_stp_request_udma_general_frame_handler(ireq,
1703 								       frame_index);
1704 
1705 	case SCI_REQ_STP_UDMA_WAIT_D2H:
1706 		/* Use the general frame handler to copy the resposne data */
1707 		status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1708 
1709 		if (status != SCI_SUCCESS)
1710 			return status;
1711 
1712 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1713 		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1714 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1715 		return SCI_SUCCESS;
1716 
1717 	case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1718 		struct dev_to_host_fis *frame_header;
1719 		u32 *frame_buffer;
1720 
1721 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1722 								       frame_index,
1723 								       (void **)&frame_header);
1724 
1725 		if (status != SCI_SUCCESS) {
1726 			dev_err(&ihost->pdev->dev,
1727 				"%s: SCIC IO Request 0x%p could not get frame "
1728 				"header for frame index %d, status %x\n",
1729 				__func__,
1730 				stp_req,
1731 				frame_index,
1732 				status);
1733 
1734 			return status;
1735 		}
1736 
1737 		switch (frame_header->fis_type) {
1738 		case FIS_REGD2H:
1739 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1740 								      frame_index,
1741 								      (void **)&frame_buffer);
1742 
1743 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1744 							       frame_header,
1745 							       frame_buffer);
1746 
1747 			/* The command has completed with error */
1748 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1749 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1750 			break;
1751 
1752 		default:
1753 			dev_warn(&ihost->pdev->dev,
1754 				 "%s: IO Request:0x%p Frame Id:%d protocol "
1755 				  "violation occurred\n", __func__, stp_req,
1756 				  frame_index);
1757 
1758 			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1759 			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1760 			break;
1761 		}
1762 
1763 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1764 
1765 		/* Frame has been decoded return it to the controller */
1766 		sci_controller_release_frame(ihost, frame_index);
1767 
1768 		return status;
1769 	}
1770 
1771 	case SCI_REQ_STP_PIO_WAIT_FRAME: {
1772 		struct sas_task *task = isci_request_access_task(ireq);
1773 		struct dev_to_host_fis *frame_header;
1774 		u32 *frame_buffer;
1775 
1776 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1777 								       frame_index,
1778 								       (void **)&frame_header);
1779 
1780 		if (status != SCI_SUCCESS) {
1781 			dev_err(&ihost->pdev->dev,
1782 				"%s: SCIC IO Request 0x%p could not get frame "
1783 				"header for frame index %d, status %x\n",
1784 				__func__, stp_req, frame_index, status);
1785 			return status;
1786 		}
1787 
1788 		switch (frame_header->fis_type) {
1789 		case FIS_PIO_SETUP:
1790 			/* Get from the frame buffer the PIO Setup Data */
1791 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1792 								      frame_index,
1793 								      (void **)&frame_buffer);
1794 
1795 			/* Get the data from the PIO Setup The SCU Hardware
1796 			 * returns first word in the frame_header and the rest
1797 			 * of the data is in the frame buffer so we need to
1798 			 * back up one dword
1799 			 */
1800 
1801 			/* transfer_count: first 16bits in the 4th dword */
1802 			stp_req->pio_len = frame_buffer[3] & 0xffff;
1803 
1804 			/* status: 4th byte in the 3rd dword */
1805 			stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1806 
1807 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1808 							       frame_header,
1809 							       frame_buffer);
1810 
1811 			ireq->stp.rsp.status = stp_req->status;
1812 
1813 			/* The next state is dependent on whether the
1814 			 * request was PIO Data-in or Data out
1815 			 */
1816 			if (task->data_dir == DMA_FROM_DEVICE) {
1817 				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1818 			} else if (task->data_dir == DMA_TO_DEVICE) {
1819 				/* Transmit data */
1820 				status = sci_stp_request_pio_data_out_transmit_data(ireq);
1821 				if (status != SCI_SUCCESS)
1822 					break;
1823 				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1824 			}
1825 			break;
1826 
1827 		case FIS_SETDEVBITS:
1828 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1829 			break;
1830 
1831 		case FIS_REGD2H:
1832 			if (frame_header->status & ATA_BUSY) {
1833 				/*
1834 				 * Now why is the drive sending a D2H Register
1835 				 * FIS when it is still busy?  Do nothing since
1836 				 * we are still in the right state.
1837 				 */
1838 				dev_dbg(&ihost->pdev->dev,
1839 					"%s: SCIC PIO Request 0x%p received "
1840 					"D2H Register FIS with BSY status "
1841 					"0x%x\n",
1842 					__func__,
1843 					stp_req,
1844 					frame_header->status);
1845 				break;
1846 			}
1847 
1848 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1849 								      frame_index,
1850 								      (void **)&frame_buffer);
1851 
1852 			sci_controller_copy_sata_response(&ireq->stp.req,
1853 							       frame_header,
1854 							       frame_buffer);
1855 
1856 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1857 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1858 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859 			break;
1860 
1861 		default:
1862 			/* FIXME: what do we do here? */
1863 			break;
1864 		}
1865 
1866 		/* Frame is decoded return it to the controller */
1867 		sci_controller_release_frame(ihost, frame_index);
1868 
1869 		return status;
1870 	}
1871 
1872 	case SCI_REQ_STP_PIO_DATA_IN: {
1873 		struct dev_to_host_fis *frame_header;
1874 		struct sata_fis_data *frame_buffer;
1875 
1876 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1877 								       frame_index,
1878 								       (void **)&frame_header);
1879 
1880 		if (status != SCI_SUCCESS) {
1881 			dev_err(&ihost->pdev->dev,
1882 				"%s: SCIC IO Request 0x%p could not get frame "
1883 				"header for frame index %d, status %x\n",
1884 				__func__,
1885 				stp_req,
1886 				frame_index,
1887 				status);
1888 			return status;
1889 		}
1890 
1891 		if (frame_header->fis_type != FIS_DATA) {
1892 			dev_err(&ihost->pdev->dev,
1893 				"%s: SCIC PIO Request 0x%p received frame %d "
1894 				"with fis type 0x%02x when expecting a data "
1895 				"fis.\n",
1896 				__func__,
1897 				stp_req,
1898 				frame_index,
1899 				frame_header->fis_type);
1900 
1901 			ireq->scu_status = SCU_TASK_DONE_GOOD;
1902 			ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1903 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1904 
1905 			/* Frame is decoded return it to the controller */
1906 			sci_controller_release_frame(ihost, frame_index);
1907 			return status;
1908 		}
1909 
1910 		if (stp_req->sgl.index < 0) {
1911 			ireq->saved_rx_frame_index = frame_index;
1912 			stp_req->pio_len = 0;
1913 		} else {
1914 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1915 								      frame_index,
1916 								      (void **)&frame_buffer);
1917 
1918 			status = sci_stp_request_pio_data_in_copy_data(stp_req,
1919 									    (u8 *)frame_buffer);
1920 
1921 			/* Frame is decoded return it to the controller */
1922 			sci_controller_release_frame(ihost, frame_index);
1923 		}
1924 
1925 		/* Check for the end of the transfer, are there more
1926 		 * bytes remaining for this data transfer
1927 		 */
1928 		if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1929 			return status;
1930 
1931 		if ((stp_req->status & ATA_BUSY) == 0) {
1932 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1933 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1934 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1935 		} else {
1936 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1937 		}
1938 		return status;
1939 	}
1940 
1941 	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1942 		struct dev_to_host_fis *frame_header;
1943 		u32 *frame_buffer;
1944 
1945 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1946 								       frame_index,
1947 								       (void **)&frame_header);
1948 		if (status != SCI_SUCCESS) {
1949 			dev_err(&ihost->pdev->dev,
1950 				"%s: SCIC IO Request 0x%p could not get frame "
1951 				"header for frame index %d, status %x\n",
1952 				__func__,
1953 				stp_req,
1954 				frame_index,
1955 				status);
1956 			return status;
1957 		}
1958 
1959 		switch (frame_header->fis_type) {
1960 		case FIS_REGD2H:
1961 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1962 								      frame_index,
1963 								      (void **)&frame_buffer);
1964 
1965 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1966 							       frame_header,
1967 							       frame_buffer);
1968 
1969 			/* The command has completed with error */
1970 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1971 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1972 			break;
1973 
1974 		default:
1975 			dev_warn(&ihost->pdev->dev,
1976 				 "%s: IO Request:0x%p Frame Id:%d protocol "
1977 				 "violation occurred\n",
1978 				 __func__,
1979 				 stp_req,
1980 				 frame_index);
1981 
1982 			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1983 			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1984 			break;
1985 		}
1986 
1987 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1988 
1989 		/* Frame has been decoded return it to the controller */
1990 		sci_controller_release_frame(ihost, frame_index);
1991 
1992 		return status;
1993 	}
1994 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
1995 		struct sas_task *task = isci_request_access_task(ireq);
1996 
1997 		sci_controller_release_frame(ihost, frame_index);
1998 		ireq->target_device->working_request = ireq;
1999 		if (task->data_dir == DMA_NONE) {
2000 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2001 			scu_atapi_reconstruct_raw_frame_task_context(ireq);
2002 		} else {
2003 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2004 			scu_atapi_construct_task_context(ireq);
2005 		}
2006 
2007 		sci_controller_continue_io(ireq);
2008 		return SCI_SUCCESS;
2009 	}
2010 	case SCI_REQ_ATAPI_WAIT_D2H:
2011 		return atapi_d2h_reg_frame_handler(ireq, frame_index);
2012 	case SCI_REQ_ABORTING:
2013 		/*
2014 		 * TODO: Is it even possible to get an unsolicited frame in the
2015 		 * aborting state?
2016 		 */
2017 		sci_controller_release_frame(ihost, frame_index);
2018 		return SCI_SUCCESS;
2019 
2020 	default:
2021 		dev_warn(&ihost->pdev->dev,
2022 			 "%s: SCIC IO Request given unexpected frame %x while "
2023 			 "in state %d\n",
2024 			 __func__,
2025 			 frame_index,
2026 			 state);
2027 
2028 		sci_controller_release_frame(ihost, frame_index);
2029 		return SCI_FAILURE_INVALID_STATE;
2030 	}
2031 }
2032 
stp_request_udma_await_tc_event(struct isci_request * ireq,u32 completion_code)2033 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2034 						       u32 completion_code)
2035 {
2036 	enum sci_status status = SCI_SUCCESS;
2037 
2038 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2039 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2040 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2041 		ireq->sci_status = SCI_SUCCESS;
2042 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2043 		break;
2044 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2045 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2046 		/* We must check ther response buffer to see if the D2H
2047 		 * Register FIS was received before we got the TC
2048 		 * completion.
2049 		 */
2050 		if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2051 			sci_remote_device_suspend(ireq->target_device,
2052 				SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2053 
2054 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2055 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2056 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2057 		} else {
2058 			/* If we have an error completion status for the
2059 			 * TC then we can expect a D2H register FIS from
2060 			 * the device so we must change state to wait
2061 			 * for it
2062 			 */
2063 			sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2064 		}
2065 		break;
2066 
2067 	/* TODO Check to see if any of these completion status need to
2068 	 * wait for the device to host register fis.
2069 	 */
2070 	/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2071 	 * - this comes only for B0
2072 	 */
2073 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2074 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2075 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2076 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2077 		sci_remote_device_suspend(ireq->target_device,
2078 			SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2079 		/* Fall through to the default case */
2080 	default:
2081 		/* All other completion status cause the IO to be complete. */
2082 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2083 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2084 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2085 		break;
2086 	}
2087 
2088 	return status;
2089 }
2090 
2091 static enum sci_status
stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request * ireq,u32 completion_code)2092 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2093 						   u32 completion_code)
2094 {
2095 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2096 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2097 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2098 		ireq->sci_status = SCI_SUCCESS;
2099 		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2100 		break;
2101 
2102 	default:
2103 		/*
2104 		 * All other completion status cause the IO to be complete.
2105 		 * If a NAK was received, then it is up to the user to retry
2106 		 * the request.
2107 		 */
2108 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2109 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2110 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2111 		break;
2112 	}
2113 
2114 	return SCI_SUCCESS;
2115 }
2116 
2117 static enum sci_status
stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request * ireq,u32 completion_code)2118 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2119 						     u32 completion_code)
2120 {
2121 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2122 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2123 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2124 		ireq->sci_status = SCI_SUCCESS;
2125 		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2126 		break;
2127 
2128 	default:
2129 		/* All other completion status cause the IO to be complete.  If
2130 		 * a NAK was received, then it is up to the user to retry the
2131 		 * request.
2132 		 */
2133 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2134 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2135 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2136 		break;
2137 	}
2138 
2139 	return SCI_SUCCESS;
2140 }
2141 
atapi_raw_completion(struct isci_request * ireq,u32 completion_code,enum sci_base_request_states next)2142 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2143 						  enum sci_base_request_states next)
2144 {
2145 	enum sci_status status = SCI_SUCCESS;
2146 
2147 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2148 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2149 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2150 		ireq->sci_status = SCI_SUCCESS;
2151 		sci_change_state(&ireq->sm, next);
2152 		break;
2153 	default:
2154 		/* All other completion status cause the IO to be complete.
2155 		 * If a NAK was received, then it is up to the user to retry
2156 		 * the request.
2157 		 */
2158 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2159 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2160 
2161 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2162 		break;
2163 	}
2164 
2165 	return status;
2166 }
2167 
atapi_data_tc_completion_handler(struct isci_request * ireq,u32 completion_code)2168 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2169 							u32 completion_code)
2170 {
2171 	struct isci_remote_device *idev = ireq->target_device;
2172 	struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2173 	enum sci_status status = SCI_SUCCESS;
2174 
2175 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2176 	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2177 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2178 		break;
2179 
2180 	case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2181 		u16 len = sci_req_tx_bytes(ireq);
2182 
2183 		/* likely non-error data underrrun, workaround missing
2184 		 * d2h frame from the controller
2185 		 */
2186 		if (d2h->fis_type != FIS_REGD2H) {
2187 			d2h->fis_type = FIS_REGD2H;
2188 			d2h->flags = (1 << 6);
2189 			d2h->status = 0x50;
2190 			d2h->error = 0;
2191 			d2h->lbal = 0;
2192 			d2h->byte_count_low = len & 0xff;
2193 			d2h->byte_count_high = len >> 8;
2194 			d2h->device = 0xa0;
2195 			d2h->lbal_exp = 0;
2196 			d2h->lbam_exp = 0;
2197 			d2h->lbah_exp = 0;
2198 			d2h->_r_a = 0;
2199 			d2h->sector_count = 0x3;
2200 			d2h->sector_count_exp = 0;
2201 			d2h->_r_b = 0;
2202 			d2h->_r_c = 0;
2203 			d2h->_r_d = 0;
2204 		}
2205 
2206 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2207 		ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2208 		status = ireq->sci_status;
2209 
2210 		/* the hw will have suspended the rnc, so complete the
2211 		 * request upon pending resume
2212 		 */
2213 		sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2214 		break;
2215 	}
2216 	case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2217 		/* In this case, there is no UF coming after.
2218 		 * compelte the IO now.
2219 		 */
2220 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2221 		ireq->sci_status = SCI_SUCCESS;
2222 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2223 		break;
2224 
2225 	default:
2226 		if (d2h->fis_type == FIS_REGD2H) {
2227 			/* UF received change the device state to ATAPI_ERROR */
2228 			status = ireq->sci_status;
2229 			sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2230 		} else {
2231 			/* If receiving any non-sucess TC status, no UF
2232 			 * received yet, then an UF for the status fis
2233 			 * is coming after (XXX: suspect this is
2234 			 * actually a protocol error or a bug like the
2235 			 * DONE_UNEXP_FIS case)
2236 			 */
2237 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2238 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2239 
2240 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2241 		}
2242 		break;
2243 	}
2244 
2245 	return status;
2246 }
2247 
2248 enum sci_status
sci_io_request_tc_completion(struct isci_request * ireq,u32 completion_code)2249 sci_io_request_tc_completion(struct isci_request *ireq,
2250 				  u32 completion_code)
2251 {
2252 	enum sci_base_request_states state;
2253 	struct isci_host *ihost = ireq->owning_controller;
2254 
2255 	state = ireq->sm.current_state_id;
2256 
2257 	switch (state) {
2258 	case SCI_REQ_STARTED:
2259 		return request_started_state_tc_event(ireq, completion_code);
2260 
2261 	case SCI_REQ_TASK_WAIT_TC_COMP:
2262 		return ssp_task_request_await_tc_event(ireq,
2263 						       completion_code);
2264 
2265 	case SCI_REQ_SMP_WAIT_RESP:
2266 		return smp_request_await_response_tc_event(ireq,
2267 							   completion_code);
2268 
2269 	case SCI_REQ_SMP_WAIT_TC_COMP:
2270 		return smp_request_await_tc_event(ireq, completion_code);
2271 
2272 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2273 		return stp_request_udma_await_tc_event(ireq,
2274 						       completion_code);
2275 
2276 	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2277 		return stp_request_non_data_await_h2d_tc_event(ireq,
2278 							       completion_code);
2279 
2280 	case SCI_REQ_STP_PIO_WAIT_H2D:
2281 		return stp_request_pio_await_h2d_completion_tc_event(ireq,
2282 								     completion_code);
2283 
2284 	case SCI_REQ_STP_PIO_DATA_OUT:
2285 		return pio_data_out_tx_done_tc_event(ireq, completion_code);
2286 
2287 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2288 		return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2289 									  completion_code);
2290 
2291 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2292 		return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2293 									    completion_code);
2294 
2295 	case SCI_REQ_ABORTING:
2296 		return request_aborting_state_tc_event(ireq,
2297 						       completion_code);
2298 
2299 	case SCI_REQ_ATAPI_WAIT_H2D:
2300 		return atapi_raw_completion(ireq, completion_code,
2301 					    SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2302 
2303 	case SCI_REQ_ATAPI_WAIT_TC_COMP:
2304 		return atapi_raw_completion(ireq, completion_code,
2305 					    SCI_REQ_ATAPI_WAIT_D2H);
2306 
2307 	case SCI_REQ_ATAPI_WAIT_D2H:
2308 		return atapi_data_tc_completion_handler(ireq, completion_code);
2309 
2310 	default:
2311 		dev_warn(&ihost->pdev->dev,
2312 			 "%s: SCIC IO Request given task completion "
2313 			 "notification %x while in wrong state %d\n",
2314 			 __func__,
2315 			 completion_code,
2316 			 state);
2317 		return SCI_FAILURE_INVALID_STATE;
2318 	}
2319 }
2320 
2321 /**
2322  * isci_request_process_response_iu() - This function sets the status and
2323  *    response iu, in the task struct, from the request object for the upper
2324  *    layer driver.
2325  * @sas_task: This parameter is the task struct from the upper layer driver.
2326  * @resp_iu: This parameter points to the response iu of the completed request.
2327  * @dev: This parameter specifies the linux device struct.
2328  *
2329  * none.
2330  */
isci_request_process_response_iu(struct sas_task * task,struct ssp_response_iu * resp_iu,struct device * dev)2331 static void isci_request_process_response_iu(
2332 	struct sas_task *task,
2333 	struct ssp_response_iu *resp_iu,
2334 	struct device *dev)
2335 {
2336 	dev_dbg(dev,
2337 		"%s: resp_iu = %p "
2338 		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2339 		"resp_iu->response_data_len = %x, "
2340 		"resp_iu->sense_data_len = %x\nrepsonse data: ",
2341 		__func__,
2342 		resp_iu,
2343 		resp_iu->status,
2344 		resp_iu->datapres,
2345 		resp_iu->response_data_len,
2346 		resp_iu->sense_data_len);
2347 
2348 	task->task_status.stat = resp_iu->status;
2349 
2350 	/* libsas updates the task status fields based on the response iu. */
2351 	sas_ssp_task_response(dev, task, resp_iu);
2352 }
2353 
2354 /**
2355  * isci_request_set_open_reject_status() - This function prepares the I/O
2356  *    completion for OPEN_REJECT conditions.
2357  * @request: This parameter is the completed isci_request object.
2358  * @response_ptr: This parameter specifies the service response for the I/O.
2359  * @status_ptr: This parameter specifies the exec status for the I/O.
2360  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2361  *    the LLDD with respect to completing this request or forcing an abort
2362  *    condition on the I/O.
2363  * @open_rej_reason: This parameter specifies the encoded reason for the
2364  *    abandon-class reject.
2365  *
2366  * none.
2367  */
isci_request_set_open_reject_status(struct isci_request * request,struct sas_task * task,enum service_response * response_ptr,enum exec_status * status_ptr,enum isci_completion_selection * complete_to_host_ptr,enum sas_open_rej_reason open_rej_reason)2368 static void isci_request_set_open_reject_status(
2369 	struct isci_request *request,
2370 	struct sas_task *task,
2371 	enum service_response *response_ptr,
2372 	enum exec_status *status_ptr,
2373 	enum isci_completion_selection *complete_to_host_ptr,
2374 	enum sas_open_rej_reason open_rej_reason)
2375 {
2376 	/* Task in the target is done. */
2377 	set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2378 	*response_ptr                     = SAS_TASK_UNDELIVERED;
2379 	*status_ptr                       = SAS_OPEN_REJECT;
2380 	*complete_to_host_ptr             = isci_perform_normal_io_completion;
2381 	task->task_status.open_rej_reason = open_rej_reason;
2382 }
2383 
2384 /**
2385  * isci_request_handle_controller_specific_errors() - This function decodes
2386  *    controller-specific I/O completion error conditions.
2387  * @request: This parameter is the completed isci_request object.
2388  * @response_ptr: This parameter specifies the service response for the I/O.
2389  * @status_ptr: This parameter specifies the exec status for the I/O.
2390  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2391  *    the LLDD with respect to completing this request or forcing an abort
2392  *    condition on the I/O.
2393  *
2394  * none.
2395  */
isci_request_handle_controller_specific_errors(struct isci_remote_device * idev,struct isci_request * request,struct sas_task * task,enum service_response * response_ptr,enum exec_status * status_ptr,enum isci_completion_selection * complete_to_host_ptr)2396 static void isci_request_handle_controller_specific_errors(
2397 	struct isci_remote_device *idev,
2398 	struct isci_request *request,
2399 	struct sas_task *task,
2400 	enum service_response *response_ptr,
2401 	enum exec_status *status_ptr,
2402 	enum isci_completion_selection *complete_to_host_ptr)
2403 {
2404 	unsigned int cstatus;
2405 
2406 	cstatus = request->scu_status;
2407 
2408 	dev_dbg(&request->isci_host->pdev->dev,
2409 		"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2410 		"- controller status = 0x%x\n",
2411 		__func__, request, cstatus);
2412 
2413 	/* Decode the controller-specific errors; most
2414 	 * important is to recognize those conditions in which
2415 	 * the target may still have a task outstanding that
2416 	 * must be aborted.
2417 	 *
2418 	 * Note that there are SCU completion codes being
2419 	 * named in the decode below for which SCIC has already
2420 	 * done work to handle them in a way other than as
2421 	 * a controller-specific completion code; these are left
2422 	 * in the decode below for completeness sake.
2423 	 */
2424 	switch (cstatus) {
2425 	case SCU_TASK_DONE_DMASETUP_DIRERR:
2426 	/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2427 	case SCU_TASK_DONE_XFERCNT_ERR:
2428 		/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2429 		if (task->task_proto == SAS_PROTOCOL_SMP) {
2430 			/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2431 			*response_ptr = SAS_TASK_COMPLETE;
2432 
2433 			/* See if the device has been/is being stopped. Note
2434 			 * that we ignore the quiesce state, since we are
2435 			 * concerned about the actual device state.
2436 			 */
2437 			if (!idev)
2438 				*status_ptr = SAS_DEVICE_UNKNOWN;
2439 			else
2440 				*status_ptr = SAS_ABORTED_TASK;
2441 
2442 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2443 
2444 			*complete_to_host_ptr =
2445 				isci_perform_normal_io_completion;
2446 		} else {
2447 			/* Task in the target is not done. */
2448 			*response_ptr = SAS_TASK_UNDELIVERED;
2449 
2450 			if (!idev)
2451 				*status_ptr = SAS_DEVICE_UNKNOWN;
2452 			else
2453 				*status_ptr = SAM_STAT_TASK_ABORTED;
2454 
2455 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2456 
2457 			*complete_to_host_ptr =
2458 				isci_perform_error_io_completion;
2459 		}
2460 
2461 		break;
2462 
2463 	case SCU_TASK_DONE_CRC_ERR:
2464 	case SCU_TASK_DONE_NAK_CMD_ERR:
2465 	case SCU_TASK_DONE_EXCESS_DATA:
2466 	case SCU_TASK_DONE_UNEXP_FIS:
2467 	/* Also SCU_TASK_DONE_UNEXP_RESP: */
2468 	case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2469 	case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2470 	case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2471 		/* These are conditions in which the target
2472 		 * has completed the task, so that no cleanup
2473 		 * is necessary.
2474 		 */
2475 		*response_ptr = SAS_TASK_COMPLETE;
2476 
2477 		/* See if the device has been/is being stopped. Note
2478 		 * that we ignore the quiesce state, since we are
2479 		 * concerned about the actual device state.
2480 		 */
2481 		if (!idev)
2482 			*status_ptr = SAS_DEVICE_UNKNOWN;
2483 		else
2484 			*status_ptr = SAS_ABORTED_TASK;
2485 
2486 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2487 
2488 		*complete_to_host_ptr = isci_perform_normal_io_completion;
2489 		break;
2490 
2491 
2492 	/* Note that the only open reject completion codes seen here will be
2493 	 * abandon-class codes; all others are automatically retried in the SCU.
2494 	 */
2495 	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2496 
2497 		isci_request_set_open_reject_status(
2498 			request, task, response_ptr, status_ptr,
2499 			complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2500 		break;
2501 
2502 	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2503 
2504 		/* Note - the return of AB0 will change when
2505 		 * libsas implements detection of zone violations.
2506 		 */
2507 		isci_request_set_open_reject_status(
2508 			request, task, response_ptr, status_ptr,
2509 			complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2510 		break;
2511 
2512 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2513 
2514 		isci_request_set_open_reject_status(
2515 			request, task, response_ptr, status_ptr,
2516 			complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2517 		break;
2518 
2519 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2520 
2521 		isci_request_set_open_reject_status(
2522 			request, task, response_ptr, status_ptr,
2523 			complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2524 		break;
2525 
2526 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2527 
2528 		isci_request_set_open_reject_status(
2529 			request, task, response_ptr, status_ptr,
2530 			complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2531 		break;
2532 
2533 	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2534 
2535 		isci_request_set_open_reject_status(
2536 			request, task, response_ptr, status_ptr,
2537 			complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2538 		break;
2539 
2540 	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2541 
2542 		isci_request_set_open_reject_status(
2543 			request, task, response_ptr, status_ptr,
2544 			complete_to_host_ptr, SAS_OREJ_STP_NORES);
2545 		break;
2546 
2547 	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2548 
2549 		isci_request_set_open_reject_status(
2550 			request, task, response_ptr, status_ptr,
2551 			complete_to_host_ptr, SAS_OREJ_EPROTO);
2552 		break;
2553 
2554 	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2555 
2556 		isci_request_set_open_reject_status(
2557 			request, task, response_ptr, status_ptr,
2558 			complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2559 		break;
2560 
2561 	case SCU_TASK_DONE_LL_R_ERR:
2562 	/* Also SCU_TASK_DONE_ACK_NAK_TO: */
2563 	case SCU_TASK_DONE_LL_PERR:
2564 	case SCU_TASK_DONE_LL_SY_TERM:
2565 	/* Also SCU_TASK_DONE_NAK_ERR:*/
2566 	case SCU_TASK_DONE_LL_LF_TERM:
2567 	/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2568 	case SCU_TASK_DONE_LL_ABORT_ERR:
2569 	case SCU_TASK_DONE_SEQ_INV_TYPE:
2570 	/* Also SCU_TASK_DONE_UNEXP_XR: */
2571 	case SCU_TASK_DONE_XR_IU_LEN_ERR:
2572 	case SCU_TASK_DONE_INV_FIS_LEN:
2573 	/* Also SCU_TASK_DONE_XR_WD_LEN: */
2574 	case SCU_TASK_DONE_SDMA_ERR:
2575 	case SCU_TASK_DONE_OFFSET_ERR:
2576 	case SCU_TASK_DONE_MAX_PLD_ERR:
2577 	case SCU_TASK_DONE_LF_ERR:
2578 	case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2579 	case SCU_TASK_DONE_SMP_LL_RX_ERR:
2580 	case SCU_TASK_DONE_UNEXP_DATA:
2581 	case SCU_TASK_DONE_UNEXP_SDBFIS:
2582 	case SCU_TASK_DONE_REG_ERR:
2583 	case SCU_TASK_DONE_SDB_ERR:
2584 	case SCU_TASK_DONE_TASK_ABORT:
2585 	default:
2586 		/* Task in the target is not done. */
2587 		*response_ptr = SAS_TASK_UNDELIVERED;
2588 		*status_ptr = SAM_STAT_TASK_ABORTED;
2589 
2590 		if (task->task_proto == SAS_PROTOCOL_SMP) {
2591 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2592 
2593 			*complete_to_host_ptr = isci_perform_normal_io_completion;
2594 		} else {
2595 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2596 
2597 			*complete_to_host_ptr = isci_perform_error_io_completion;
2598 		}
2599 		break;
2600 	}
2601 }
2602 
2603 /**
2604  * isci_task_save_for_upper_layer_completion() - This function saves the
2605  *    request for later completion to the upper layer driver.
2606  * @host: This parameter is a pointer to the host on which the the request
2607  *    should be queued (either as an error or success).
2608  * @request: This parameter is the completed request.
2609  * @response: This parameter is the response code for the completed task.
2610  * @status: This parameter is the status code for the completed task.
2611  *
2612  * none.
2613  */
isci_task_save_for_upper_layer_completion(struct isci_host * host,struct isci_request * request,enum service_response response,enum exec_status status,enum isci_completion_selection task_notification_selection)2614 static void isci_task_save_for_upper_layer_completion(
2615 	struct isci_host *host,
2616 	struct isci_request *request,
2617 	enum service_response response,
2618 	enum exec_status status,
2619 	enum isci_completion_selection task_notification_selection)
2620 {
2621 	struct sas_task *task = isci_request_access_task(request);
2622 
2623 	task_notification_selection
2624 		= isci_task_set_completion_status(task, response, status,
2625 						  task_notification_selection);
2626 
2627 	/* Tasks aborted specifically by a call to the lldd_abort_task
2628 	 * function should not be completed to the host in the regular path.
2629 	 */
2630 	switch (task_notification_selection) {
2631 
2632 	case isci_perform_normal_io_completion:
2633 		/* Normal notification (task_done) */
2634 
2635 		/* Add to the completed list. */
2636 		list_add(&request->completed_node,
2637 			 &host->requests_to_complete);
2638 
2639 		/* Take the request off the device's pending request list. */
2640 		list_del_init(&request->dev_node);
2641 		break;
2642 
2643 	case isci_perform_aborted_io_completion:
2644 		/* No notification to libsas because this request is
2645 		 * already in the abort path.
2646 		 */
2647 		/* Wake up whatever process was waiting for this
2648 		 * request to complete.
2649 		 */
2650 		WARN_ON(request->io_request_completion == NULL);
2651 
2652 		if (request->io_request_completion != NULL) {
2653 
2654 			/* Signal whoever is waiting that this
2655 			* request is complete.
2656 			*/
2657 			complete(request->io_request_completion);
2658 		}
2659 		break;
2660 
2661 	case isci_perform_error_io_completion:
2662 		/* Use sas_task_abort */
2663 		/* Add to the aborted list. */
2664 		list_add(&request->completed_node,
2665 			 &host->requests_to_errorback);
2666 		break;
2667 
2668 	default:
2669 		/* Add to the error to libsas list. */
2670 		list_add(&request->completed_node,
2671 			 &host->requests_to_errorback);
2672 		break;
2673 	}
2674 	dev_dbg(&host->pdev->dev,
2675 		"%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
2676 		__func__, task_notification_selection, task,
2677 		(task) ? task->task_status.resp : 0, response,
2678 		(task) ? task->task_status.stat : 0, status);
2679 }
2680 
isci_process_stp_response(struct sas_task * task,struct dev_to_host_fis * fis)2681 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2682 {
2683 	struct task_status_struct *ts = &task->task_status;
2684 	struct ata_task_resp *resp = (void *)&ts->buf[0];
2685 
2686 	resp->frame_len = sizeof(*fis);
2687 	memcpy(resp->ending_fis, fis, sizeof(*fis));
2688 	ts->buf_valid_size = sizeof(*resp);
2689 
2690 	/* If the device fault bit is set in the status register, then
2691 	 * set the sense data and return.
2692 	 */
2693 	if (fis->status & ATA_DF)
2694 		ts->stat = SAS_PROTO_RESPONSE;
2695 	else if (fis->status & ATA_ERR)
2696 		ts->stat = SAM_STAT_CHECK_CONDITION;
2697 	else
2698 		ts->stat = SAM_STAT_GOOD;
2699 
2700 	ts->resp = SAS_TASK_COMPLETE;
2701 }
2702 
isci_request_io_request_complete(struct isci_host * ihost,struct isci_request * request,enum sci_io_status completion_status)2703 static void isci_request_io_request_complete(struct isci_host *ihost,
2704 					     struct isci_request *request,
2705 					     enum sci_io_status completion_status)
2706 {
2707 	struct sas_task *task = isci_request_access_task(request);
2708 	struct ssp_response_iu *resp_iu;
2709 	unsigned long task_flags;
2710 	struct isci_remote_device *idev = request->target_device;
2711 	enum service_response response = SAS_TASK_UNDELIVERED;
2712 	enum exec_status status = SAS_ABORTED_TASK;
2713 	enum isci_request_status request_status;
2714 	enum isci_completion_selection complete_to_host
2715 		= isci_perform_normal_io_completion;
2716 
2717 	dev_dbg(&ihost->pdev->dev,
2718 		"%s: request = %p, task = %p,\n"
2719 		"task->data_dir = %d completion_status = 0x%x\n",
2720 		__func__,
2721 		request,
2722 		task,
2723 		task->data_dir,
2724 		completion_status);
2725 
2726 	spin_lock(&request->state_lock);
2727 	request_status = request->status;
2728 
2729 	/* Decode the request status.  Note that if the request has been
2730 	 * aborted by a task management function, we don't care
2731 	 * what the status is.
2732 	 */
2733 	switch (request_status) {
2734 
2735 	case aborted:
2736 		/* "aborted" indicates that the request was aborted by a task
2737 		 * management function, since once a task management request is
2738 		 * perfomed by the device, the request only completes because
2739 		 * of the subsequent driver terminate.
2740 		 *
2741 		 * Aborted also means an external thread is explicitly managing
2742 		 * this request, so that we do not complete it up the stack.
2743 		 *
2744 		 * The target is still there (since the TMF was successful).
2745 		 */
2746 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2747 		response = SAS_TASK_COMPLETE;
2748 
2749 		/* See if the device has been/is being stopped. Note
2750 		 * that we ignore the quiesce state, since we are
2751 		 * concerned about the actual device state.
2752 		 */
2753 		if (!idev)
2754 			status = SAS_DEVICE_UNKNOWN;
2755 		else
2756 			status = SAS_ABORTED_TASK;
2757 
2758 		complete_to_host = isci_perform_aborted_io_completion;
2759 		/* This was an aborted request. */
2760 
2761 		spin_unlock(&request->state_lock);
2762 		break;
2763 
2764 	case aborting:
2765 		/* aborting means that the task management function tried and
2766 		 * failed to abort the request. We need to note the request
2767 		 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2768 		 * target as down.
2769 		 *
2770 		 * Aborting also means an external thread is explicitly managing
2771 		 * this request, so that we do not complete it up the stack.
2772 		 */
2773 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2774 		response = SAS_TASK_UNDELIVERED;
2775 
2776 		if (!idev)
2777 			/* The device has been /is being stopped. Note that
2778 			 * we ignore the quiesce state, since we are
2779 			 * concerned about the actual device state.
2780 			 */
2781 			status = SAS_DEVICE_UNKNOWN;
2782 		else
2783 			status = SAS_PHY_DOWN;
2784 
2785 		complete_to_host = isci_perform_aborted_io_completion;
2786 
2787 		/* This was an aborted request. */
2788 
2789 		spin_unlock(&request->state_lock);
2790 		break;
2791 
2792 	case terminating:
2793 
2794 		/* This was an terminated request.  This happens when
2795 		 * the I/O is being terminated because of an action on
2796 		 * the device (reset, tear down, etc.), and the I/O needs
2797 		 * to be completed up the stack.
2798 		 */
2799 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2800 		response = SAS_TASK_UNDELIVERED;
2801 
2802 		/* See if the device has been/is being stopped. Note
2803 		 * that we ignore the quiesce state, since we are
2804 		 * concerned about the actual device state.
2805 		 */
2806 		if (!idev)
2807 			status = SAS_DEVICE_UNKNOWN;
2808 		else
2809 			status = SAS_ABORTED_TASK;
2810 
2811 		complete_to_host = isci_perform_aborted_io_completion;
2812 
2813 		/* This was a terminated request. */
2814 
2815 		spin_unlock(&request->state_lock);
2816 		break;
2817 
2818 	case dead:
2819 		/* This was a terminated request that timed-out during the
2820 		 * termination process.  There is no task to complete to
2821 		 * libsas.
2822 		 */
2823 		complete_to_host = isci_perform_normal_io_completion;
2824 		spin_unlock(&request->state_lock);
2825 		break;
2826 
2827 	default:
2828 
2829 		/* The request is done from an SCU HW perspective. */
2830 		request->status = completed;
2831 
2832 		spin_unlock(&request->state_lock);
2833 
2834 		/* This is an active request being completed from the core. */
2835 		switch (completion_status) {
2836 
2837 		case SCI_IO_FAILURE_RESPONSE_VALID:
2838 			dev_dbg(&ihost->pdev->dev,
2839 				"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2840 				__func__,
2841 				request,
2842 				task);
2843 
2844 			if (sas_protocol_ata(task->task_proto)) {
2845 				isci_process_stp_response(task, &request->stp.rsp);
2846 			} else if (SAS_PROTOCOL_SSP == task->task_proto) {
2847 
2848 				/* crack the iu response buffer. */
2849 				resp_iu = &request->ssp.rsp;
2850 				isci_request_process_response_iu(task, resp_iu,
2851 								 &ihost->pdev->dev);
2852 
2853 			} else if (SAS_PROTOCOL_SMP == task->task_proto) {
2854 
2855 				dev_err(&ihost->pdev->dev,
2856 					"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2857 					"SAS_PROTOCOL_SMP protocol\n",
2858 					__func__);
2859 
2860 			} else
2861 				dev_err(&ihost->pdev->dev,
2862 					"%s: unknown protocol\n", __func__);
2863 
2864 			/* use the task status set in the task struct by the
2865 			 * isci_request_process_response_iu call.
2866 			 */
2867 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2868 			response = task->task_status.resp;
2869 			status = task->task_status.stat;
2870 			break;
2871 
2872 		case SCI_IO_SUCCESS:
2873 		case SCI_IO_SUCCESS_IO_DONE_EARLY:
2874 
2875 			response = SAS_TASK_COMPLETE;
2876 			status   = SAM_STAT_GOOD;
2877 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2878 
2879 			if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2880 
2881 				/* This was an SSP / STP / SATA transfer.
2882 				 * There is a possibility that less data than
2883 				 * the maximum was transferred.
2884 				 */
2885 				u32 transferred_length = sci_req_tx_bytes(request);
2886 
2887 				task->task_status.residual
2888 					= task->total_xfer_len - transferred_length;
2889 
2890 				/* If there were residual bytes, call this an
2891 				 * underrun.
2892 				 */
2893 				if (task->task_status.residual != 0)
2894 					status = SAS_DATA_UNDERRUN;
2895 
2896 				dev_dbg(&ihost->pdev->dev,
2897 					"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2898 					__func__,
2899 					status);
2900 
2901 			} else
2902 				dev_dbg(&ihost->pdev->dev,
2903 					"%s: SCI_IO_SUCCESS\n",
2904 					__func__);
2905 
2906 			break;
2907 
2908 		case SCI_IO_FAILURE_TERMINATED:
2909 			dev_dbg(&ihost->pdev->dev,
2910 				"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2911 				__func__,
2912 				request,
2913 				task);
2914 
2915 			/* The request was terminated explicitly.  No handling
2916 			 * is needed in the SCSI error handler path.
2917 			 */
2918 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2919 			response = SAS_TASK_UNDELIVERED;
2920 
2921 			/* See if the device has been/is being stopped. Note
2922 			 * that we ignore the quiesce state, since we are
2923 			 * concerned about the actual device state.
2924 			 */
2925 			if (!idev)
2926 				status = SAS_DEVICE_UNKNOWN;
2927 			else
2928 				status = SAS_ABORTED_TASK;
2929 
2930 			complete_to_host = isci_perform_normal_io_completion;
2931 			break;
2932 
2933 		case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2934 
2935 			isci_request_handle_controller_specific_errors(
2936 				idev, request, task, &response, &status,
2937 				&complete_to_host);
2938 
2939 			break;
2940 
2941 		case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2942 			/* This is a special case, in that the I/O completion
2943 			 * is telling us that the device needs a reset.
2944 			 * In order for the device reset condition to be
2945 			 * noticed, the I/O has to be handled in the error
2946 			 * handler.  Set the reset flag and cause the
2947 			 * SCSI error thread to be scheduled.
2948 			 */
2949 			spin_lock_irqsave(&task->task_state_lock, task_flags);
2950 			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2951 			spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2952 
2953 			/* Fail the I/O. */
2954 			response = SAS_TASK_UNDELIVERED;
2955 			status = SAM_STAT_TASK_ABORTED;
2956 
2957 			complete_to_host = isci_perform_error_io_completion;
2958 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2959 			break;
2960 
2961 		case SCI_FAILURE_RETRY_REQUIRED:
2962 
2963 			/* Fail the I/O so it can be retried. */
2964 			response = SAS_TASK_UNDELIVERED;
2965 			if (!idev)
2966 				status = SAS_DEVICE_UNKNOWN;
2967 			else
2968 				status = SAS_ABORTED_TASK;
2969 
2970 			complete_to_host = isci_perform_normal_io_completion;
2971 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2972 			break;
2973 
2974 
2975 		default:
2976 			/* Catch any otherwise unhandled error codes here. */
2977 			dev_dbg(&ihost->pdev->dev,
2978 				 "%s: invalid completion code: 0x%x - "
2979 				 "isci_request = %p\n",
2980 				 __func__, completion_status, request);
2981 
2982 			response = SAS_TASK_UNDELIVERED;
2983 
2984 			/* See if the device has been/is being stopped. Note
2985 			 * that we ignore the quiesce state, since we are
2986 			 * concerned about the actual device state.
2987 			 */
2988 			if (!idev)
2989 				status = SAS_DEVICE_UNKNOWN;
2990 			else
2991 				status = SAS_ABORTED_TASK;
2992 
2993 			if (SAS_PROTOCOL_SMP == task->task_proto) {
2994 				set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2995 				complete_to_host = isci_perform_normal_io_completion;
2996 			} else {
2997 				clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2998 				complete_to_host = isci_perform_error_io_completion;
2999 			}
3000 			break;
3001 		}
3002 		break;
3003 	}
3004 
3005 	switch (task->task_proto) {
3006 	case SAS_PROTOCOL_SSP:
3007 		if (task->data_dir == DMA_NONE)
3008 			break;
3009 		if (task->num_scatter == 0)
3010 			/* 0 indicates a single dma address */
3011 			dma_unmap_single(&ihost->pdev->dev,
3012 					 request->zero_scatter_daddr,
3013 					 task->total_xfer_len, task->data_dir);
3014 		else  /* unmap the sgl dma addresses */
3015 			dma_unmap_sg(&ihost->pdev->dev, task->scatter,
3016 				     request->num_sg_entries, task->data_dir);
3017 		break;
3018 	case SAS_PROTOCOL_SMP: {
3019 		struct scatterlist *sg = &task->smp_task.smp_req;
3020 		struct smp_req *smp_req;
3021 		void *kaddr;
3022 
3023 		dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3024 
3025 		/* need to swab it back in case the command buffer is re-used */
3026 		kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3027 		smp_req = kaddr + sg->offset;
3028 		sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3029 		kunmap_atomic(kaddr, KM_IRQ0);
3030 		break;
3031 	}
3032 	default:
3033 		break;
3034 	}
3035 
3036 	/* Put the completed request on the correct list */
3037 	isci_task_save_for_upper_layer_completion(ihost, request, response,
3038 						  status, complete_to_host
3039 						  );
3040 
3041 	/* complete the io request to the core. */
3042 	sci_controller_complete_io(ihost, request->target_device, request);
3043 
3044 	/* set terminated handle so it cannot be completed or
3045 	 * terminated again, and to cause any calls into abort
3046 	 * task to recognize the already completed case.
3047 	 */
3048 	set_bit(IREQ_TERMINATED, &request->flags);
3049 }
3050 
sci_request_started_state_enter(struct sci_base_state_machine * sm)3051 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3052 {
3053 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3054 	struct domain_device *dev = ireq->target_device->domain_dev;
3055 	enum sci_base_request_states state;
3056 	struct sas_task *task;
3057 
3058 	/* XXX as hch said always creating an internal sas_task for tmf
3059 	 * requests would simplify the driver
3060 	 */
3061 	task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
3062 
3063 	/* all unaccelerated request types (non ssp or ncq) handled with
3064 	 * substates
3065 	 */
3066 	if (!task && dev->dev_type == SAS_END_DEV) {
3067 		state = SCI_REQ_TASK_WAIT_TC_COMP;
3068 	} else if (!task &&
3069 		   (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3070 		    isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3071 		state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
3072 	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3073 		state = SCI_REQ_SMP_WAIT_RESP;
3074 	} else if (task && sas_protocol_ata(task->task_proto) &&
3075 		   !task->ata_task.use_ncq) {
3076 		if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
3077 			task->ata_task.fis.command == ATA_CMD_PACKET) {
3078 			state = SCI_REQ_ATAPI_WAIT_H2D;
3079 		} else if (task->data_dir == DMA_NONE) {
3080 			state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
3081 		} else if (task->ata_task.dma_xfer) {
3082 			state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
3083 		} else /* PIO */ {
3084 			state = SCI_REQ_STP_PIO_WAIT_H2D;
3085 		}
3086 	} else {
3087 		/* SSP or NCQ are fully accelerated, no substates */
3088 		return;
3089 	}
3090 	sci_change_state(sm, state);
3091 }
3092 
sci_request_completed_state_enter(struct sci_base_state_machine * sm)3093 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3094 {
3095 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3096 	struct isci_host *ihost = ireq->owning_controller;
3097 
3098 	/* Tell the SCI_USER that the IO request is complete */
3099 	if (!test_bit(IREQ_TMF, &ireq->flags))
3100 		isci_request_io_request_complete(ihost, ireq,
3101 						 ireq->sci_status);
3102 	else
3103 		isci_task_request_complete(ihost, ireq, ireq->sci_status);
3104 }
3105 
sci_request_aborting_state_enter(struct sci_base_state_machine * sm)3106 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3107 {
3108 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3109 
3110 	/* Setting the abort bit in the Task Context is required by the silicon. */
3111 	ireq->tc->abort = 1;
3112 }
3113 
sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine * sm)3114 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3115 {
3116 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3117 
3118 	ireq->target_device->working_request = ireq;
3119 }
3120 
sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine * sm)3121 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3122 {
3123 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3124 
3125 	ireq->target_device->working_request = ireq;
3126 }
3127 
sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine * sm)3128 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3129 {
3130 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3131 
3132 	ireq->target_device->working_request = ireq;
3133 }
3134 
sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine * sm)3135 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3136 {
3137 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3138 	struct scu_task_context *tc = ireq->tc;
3139 	struct host_to_dev_fis *h2d_fis;
3140 	enum sci_status status;
3141 
3142 	/* Clear the SRST bit */
3143 	h2d_fis = &ireq->stp.cmd;
3144 	h2d_fis->control = 0;
3145 
3146 	/* Clear the TC control bit */
3147 	tc->control_frame = 0;
3148 
3149 	status = sci_controller_continue_io(ireq);
3150 	WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3151 }
3152 
3153 static const struct sci_base_state sci_request_state_table[] = {
3154 	[SCI_REQ_INIT] = { },
3155 	[SCI_REQ_CONSTRUCTED] = { },
3156 	[SCI_REQ_STARTED] = {
3157 		.enter_state = sci_request_started_state_enter,
3158 	},
3159 	[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3160 		.enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3161 	},
3162 	[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3163 	[SCI_REQ_STP_PIO_WAIT_H2D] = {
3164 		.enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3165 	},
3166 	[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3167 	[SCI_REQ_STP_PIO_DATA_IN] = { },
3168 	[SCI_REQ_STP_PIO_DATA_OUT] = { },
3169 	[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3170 	[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3171 	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3172 		.enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3173 	},
3174 	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3175 		.enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3176 	},
3177 	[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3178 	[SCI_REQ_TASK_WAIT_TC_COMP] = { },
3179 	[SCI_REQ_TASK_WAIT_TC_RESP] = { },
3180 	[SCI_REQ_SMP_WAIT_RESP] = { },
3181 	[SCI_REQ_SMP_WAIT_TC_COMP] = { },
3182 	[SCI_REQ_ATAPI_WAIT_H2D] = { },
3183 	[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3184 	[SCI_REQ_ATAPI_WAIT_D2H] = { },
3185 	[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3186 	[SCI_REQ_COMPLETED] = {
3187 		.enter_state = sci_request_completed_state_enter,
3188 	},
3189 	[SCI_REQ_ABORTING] = {
3190 		.enter_state = sci_request_aborting_state_enter,
3191 	},
3192 	[SCI_REQ_FINAL] = { },
3193 };
3194 
3195 static void
sci_general_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)3196 sci_general_request_construct(struct isci_host *ihost,
3197 				   struct isci_remote_device *idev,
3198 				   struct isci_request *ireq)
3199 {
3200 	sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3201 
3202 	ireq->target_device = idev;
3203 	ireq->protocol = SCIC_NO_PROTOCOL;
3204 	ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3205 
3206 	ireq->sci_status   = SCI_SUCCESS;
3207 	ireq->scu_status   = 0;
3208 	ireq->post_context = 0xFFFFFFFF;
3209 }
3210 
3211 static enum sci_status
sci_io_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,struct isci_request * ireq)3212 sci_io_request_construct(struct isci_host *ihost,
3213 			  struct isci_remote_device *idev,
3214 			  struct isci_request *ireq)
3215 {
3216 	struct domain_device *dev = idev->domain_dev;
3217 	enum sci_status status = SCI_SUCCESS;
3218 
3219 	/* Build the common part of the request */
3220 	sci_general_request_construct(ihost, idev, ireq);
3221 
3222 	if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3223 		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3224 
3225 	if (dev->dev_type == SAS_END_DEV)
3226 		/* pass */;
3227 	else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3228 		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3229 	else if (dev_is_expander(dev))
3230 		/* pass */;
3231 	else
3232 		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3233 
3234 	memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3235 
3236 	return status;
3237 }
3238 
sci_task_request_construct(struct isci_host * ihost,struct isci_remote_device * idev,u16 io_tag,struct isci_request * ireq)3239 enum sci_status sci_task_request_construct(struct isci_host *ihost,
3240 					    struct isci_remote_device *idev,
3241 					    u16 io_tag, struct isci_request *ireq)
3242 {
3243 	struct domain_device *dev = idev->domain_dev;
3244 	enum sci_status status = SCI_SUCCESS;
3245 
3246 	/* Build the common part of the request */
3247 	sci_general_request_construct(ihost, idev, ireq);
3248 
3249 	if (dev->dev_type == SAS_END_DEV ||
3250 	    dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3251 		set_bit(IREQ_TMF, &ireq->flags);
3252 		memset(ireq->tc, 0, sizeof(struct scu_task_context));
3253 	} else
3254 		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3255 
3256 	return status;
3257 }
3258 
isci_request_ssp_request_construct(struct isci_request * request)3259 static enum sci_status isci_request_ssp_request_construct(
3260 	struct isci_request *request)
3261 {
3262 	enum sci_status status;
3263 
3264 	dev_dbg(&request->isci_host->pdev->dev,
3265 		"%s: request = %p\n",
3266 		__func__,
3267 		request);
3268 	status = sci_io_request_construct_basic_ssp(request);
3269 	return status;
3270 }
3271 
isci_request_stp_request_construct(struct isci_request * ireq)3272 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3273 {
3274 	struct sas_task *task = isci_request_access_task(ireq);
3275 	struct host_to_dev_fis *fis = &ireq->stp.cmd;
3276 	struct ata_queued_cmd *qc = task->uldd_task;
3277 	enum sci_status status;
3278 
3279 	dev_dbg(&ireq->isci_host->pdev->dev,
3280 		"%s: ireq = %p\n",
3281 		__func__,
3282 		ireq);
3283 
3284 	memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3285 	if (!task->ata_task.device_control_reg_update)
3286 		fis->flags |= 0x80;
3287 	fis->flags &= 0xF0;
3288 
3289 	status = sci_io_request_construct_basic_sata(ireq);
3290 
3291 	if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3292 		   qc->tf.command == ATA_CMD_FPDMA_READ)) {
3293 		fis->sector_count = qc->tag << 3;
3294 		ireq->tc->type.stp.ncq_tag = qc->tag;
3295 	}
3296 
3297 	return status;
3298 }
3299 
3300 static enum sci_status
sci_io_request_construct_smp(struct device * dev,struct isci_request * ireq,struct sas_task * task)3301 sci_io_request_construct_smp(struct device *dev,
3302 			      struct isci_request *ireq,
3303 			      struct sas_task *task)
3304 {
3305 	struct scatterlist *sg = &task->smp_task.smp_req;
3306 	struct isci_remote_device *idev;
3307 	struct scu_task_context *task_context;
3308 	struct isci_port *iport;
3309 	struct smp_req *smp_req;
3310 	void *kaddr;
3311 	u8 req_len;
3312 	u32 cmd;
3313 
3314 	kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3315 	smp_req = kaddr + sg->offset;
3316 	/*
3317 	 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3318 	 * functions under SAS 2.0, a zero request length really indicates
3319 	 * a non-zero default length.
3320 	 */
3321 	if (smp_req->req_len == 0) {
3322 		switch (smp_req->func) {
3323 		case SMP_DISCOVER:
3324 		case SMP_REPORT_PHY_ERR_LOG:
3325 		case SMP_REPORT_PHY_SATA:
3326 		case SMP_REPORT_ROUTE_INFO:
3327 			smp_req->req_len = 2;
3328 			break;
3329 		case SMP_CONF_ROUTE_INFO:
3330 		case SMP_PHY_CONTROL:
3331 		case SMP_PHY_TEST_FUNCTION:
3332 			smp_req->req_len = 9;
3333 			break;
3334 			/* Default - zero is a valid default for 2.0. */
3335 		}
3336 	}
3337 	req_len = smp_req->req_len;
3338 	sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3339 	cmd = *(u32 *) smp_req;
3340 	kunmap_atomic(kaddr, KM_IRQ0);
3341 
3342 	if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3343 		return SCI_FAILURE;
3344 
3345 	ireq->protocol = SCIC_SMP_PROTOCOL;
3346 
3347 	/* byte swap the smp request. */
3348 
3349 	task_context = ireq->tc;
3350 
3351 	idev = ireq->target_device;
3352 	iport = idev->owning_port;
3353 
3354 	/*
3355 	 * Fill in the TC with the its required data
3356 	 * 00h
3357 	 */
3358 	task_context->priority = 0;
3359 	task_context->initiator_request = 1;
3360 	task_context->connection_rate = idev->connection_rate;
3361 	task_context->protocol_engine_index = ISCI_PEG;
3362 	task_context->logical_port_index = iport->physical_port_index;
3363 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3364 	task_context->abort = 0;
3365 	task_context->valid = SCU_TASK_CONTEXT_VALID;
3366 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3367 
3368 	/* 04h */
3369 	task_context->remote_node_index = idev->rnc.remote_node_index;
3370 	task_context->command_code = 0;
3371 	task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3372 
3373 	/* 08h */
3374 	task_context->link_layer_control = 0;
3375 	task_context->do_not_dma_ssp_good_response = 1;
3376 	task_context->strict_ordering = 0;
3377 	task_context->control_frame = 1;
3378 	task_context->timeout_enable = 0;
3379 	task_context->block_guard_enable = 0;
3380 
3381 	/* 0ch */
3382 	task_context->address_modifier = 0;
3383 
3384 	/* 10h */
3385 	task_context->ssp_command_iu_length = req_len;
3386 
3387 	/* 14h */
3388 	task_context->transfer_length_bytes = 0;
3389 
3390 	/*
3391 	 * 18h ~ 30h, protocol specific
3392 	 * since commandIU has been build by framework at this point, we just
3393 	 * copy the frist DWord from command IU to this location. */
3394 	memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3395 
3396 	/*
3397 	 * 40h
3398 	 * "For SMP you could program it to zero. We would prefer that way
3399 	 * so that done code will be consistent." - Venki
3400 	 */
3401 	task_context->task_phase = 0;
3402 
3403 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3404 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3405 			       (iport->physical_port_index <<
3406 				SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3407 			      ISCI_TAG_TCI(ireq->io_tag));
3408 	/*
3409 	 * Copy the physical address for the command buffer to the SCU Task
3410 	 * Context command buffer should not contain command header.
3411 	 */
3412 	task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3413 	task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3414 
3415 	/* SMP response comes as UF, so no need to set response IU address. */
3416 	task_context->response_iu_upper = 0;
3417 	task_context->response_iu_lower = 0;
3418 
3419 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3420 
3421 	return SCI_SUCCESS;
3422 }
3423 
3424 /*
3425  * isci_smp_request_build() - This function builds the smp request.
3426  * @ireq: This parameter points to the isci_request allocated in the
3427  *    request construct function.
3428  *
3429  * SCI_SUCCESS on successfull completion, or specific failure code.
3430  */
isci_smp_request_build(struct isci_request * ireq)3431 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3432 {
3433 	struct sas_task *task = isci_request_access_task(ireq);
3434 	struct device *dev = &ireq->isci_host->pdev->dev;
3435 	enum sci_status status = SCI_FAILURE;
3436 
3437 	status = sci_io_request_construct_smp(dev, ireq, task);
3438 	if (status != SCI_SUCCESS)
3439 		dev_dbg(&ireq->isci_host->pdev->dev,
3440 			 "%s: failed with status = %d\n",
3441 			 __func__,
3442 			 status);
3443 
3444 	return status;
3445 }
3446 
3447 /**
3448  * isci_io_request_build() - This function builds the io request object.
3449  * @ihost: This parameter specifies the ISCI host object
3450  * @request: This parameter points to the isci_request object allocated in the
3451  *    request construct function.
3452  * @sci_device: This parameter is the handle for the sci core's remote device
3453  *    object that is the destination for this request.
3454  *
3455  * SCI_SUCCESS on successfull completion, or specific failure code.
3456  */
isci_io_request_build(struct isci_host * ihost,struct isci_request * request,struct isci_remote_device * idev)3457 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3458 					     struct isci_request *request,
3459 					     struct isci_remote_device *idev)
3460 {
3461 	enum sci_status status = SCI_SUCCESS;
3462 	struct sas_task *task = isci_request_access_task(request);
3463 
3464 	dev_dbg(&ihost->pdev->dev,
3465 		"%s: idev = 0x%p; request = %p, "
3466 		"num_scatter = %d\n",
3467 		__func__,
3468 		idev,
3469 		request,
3470 		task->num_scatter);
3471 
3472 	/* map the sgl addresses, if present.
3473 	 * libata does the mapping for sata devices
3474 	 * before we get the request.
3475 	 */
3476 	if (task->num_scatter &&
3477 	    !sas_protocol_ata(task->task_proto) &&
3478 	    !(SAS_PROTOCOL_SMP & task->task_proto)) {
3479 
3480 		request->num_sg_entries = dma_map_sg(
3481 			&ihost->pdev->dev,
3482 			task->scatter,
3483 			task->num_scatter,
3484 			task->data_dir
3485 			);
3486 
3487 		if (request->num_sg_entries == 0)
3488 			return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3489 	}
3490 
3491 	status = sci_io_request_construct(ihost, idev, request);
3492 
3493 	if (status != SCI_SUCCESS) {
3494 		dev_dbg(&ihost->pdev->dev,
3495 			 "%s: failed request construct\n",
3496 			 __func__);
3497 		return SCI_FAILURE;
3498 	}
3499 
3500 	switch (task->task_proto) {
3501 	case SAS_PROTOCOL_SMP:
3502 		status = isci_smp_request_build(request);
3503 		break;
3504 	case SAS_PROTOCOL_SSP:
3505 		status = isci_request_ssp_request_construct(request);
3506 		break;
3507 	case SAS_PROTOCOL_SATA:
3508 	case SAS_PROTOCOL_STP:
3509 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3510 		status = isci_request_stp_request_construct(request);
3511 		break;
3512 	default:
3513 		dev_dbg(&ihost->pdev->dev,
3514 			 "%s: unknown protocol\n", __func__);
3515 		return SCI_FAILURE;
3516 	}
3517 
3518 	return SCI_SUCCESS;
3519 }
3520 
isci_request_from_tag(struct isci_host * ihost,u16 tag)3521 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3522 {
3523 	struct isci_request *ireq;
3524 
3525 	ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3526 	ireq->io_tag = tag;
3527 	ireq->io_request_completion = NULL;
3528 	ireq->flags = 0;
3529 	ireq->num_sg_entries = 0;
3530 	INIT_LIST_HEAD(&ireq->completed_node);
3531 	INIT_LIST_HEAD(&ireq->dev_node);
3532 	isci_request_change_state(ireq, allocated);
3533 
3534 	return ireq;
3535 }
3536 
isci_io_request_from_tag(struct isci_host * ihost,struct sas_task * task,u16 tag)3537 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3538 						     struct sas_task *task,
3539 						     u16 tag)
3540 {
3541 	struct isci_request *ireq;
3542 
3543 	ireq = isci_request_from_tag(ihost, tag);
3544 	ireq->ttype_ptr.io_task_ptr = task;
3545 	clear_bit(IREQ_TMF, &ireq->flags);
3546 	task->lldd_task = ireq;
3547 
3548 	return ireq;
3549 }
3550 
isci_tmf_request_from_tag(struct isci_host * ihost,struct isci_tmf * isci_tmf,u16 tag)3551 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3552 					       struct isci_tmf *isci_tmf,
3553 					       u16 tag)
3554 {
3555 	struct isci_request *ireq;
3556 
3557 	ireq = isci_request_from_tag(ihost, tag);
3558 	ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3559 	set_bit(IREQ_TMF, &ireq->flags);
3560 
3561 	return ireq;
3562 }
3563 
isci_request_execute(struct isci_host * ihost,struct isci_remote_device * idev,struct sas_task * task,u16 tag)3564 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3565 			 struct sas_task *task, u16 tag)
3566 {
3567 	enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3568 	struct isci_request *ireq;
3569 	unsigned long flags;
3570 	int ret = 0;
3571 
3572 	/* do common allocation and init of request object. */
3573 	ireq = isci_io_request_from_tag(ihost, task, tag);
3574 
3575 	status = isci_io_request_build(ihost, ireq, idev);
3576 	if (status != SCI_SUCCESS) {
3577 		dev_dbg(&ihost->pdev->dev,
3578 			 "%s: request_construct failed - status = 0x%x\n",
3579 			 __func__,
3580 			 status);
3581 		return status;
3582 	}
3583 
3584 	spin_lock_irqsave(&ihost->scic_lock, flags);
3585 
3586 	if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3587 
3588 		if (isci_task_is_ncq_recovery(task)) {
3589 
3590 			/* The device is in an NCQ recovery state.  Issue the
3591 			 * request on the task side.  Note that it will
3592 			 * complete on the I/O request side because the
3593 			 * request was built that way (ie.
3594 			 * ireq->is_task_management_request is false).
3595 			 */
3596 			status = sci_controller_start_task(ihost,
3597 							    idev,
3598 							    ireq);
3599 		} else {
3600 			status = SCI_FAILURE;
3601 		}
3602 	} else {
3603 		/* send the request, let the core assign the IO TAG.	*/
3604 		status = sci_controller_start_io(ihost, idev,
3605 						  ireq);
3606 	}
3607 
3608 	if (status != SCI_SUCCESS &&
3609 	    status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3610 		dev_dbg(&ihost->pdev->dev,
3611 			 "%s: failed request start (0x%x)\n",
3612 			 __func__, status);
3613 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
3614 		return status;
3615 	}
3616 
3617 	/* Either I/O started OK, or the core has signaled that
3618 	 * the device needs a target reset.
3619 	 *
3620 	 * In either case, hold onto the I/O for later.
3621 	 *
3622 	 * Update it's status and add it to the list in the
3623 	 * remote device object.
3624 	 */
3625 	list_add(&ireq->dev_node, &idev->reqs_in_process);
3626 
3627 	if (status == SCI_SUCCESS) {
3628 		isci_request_change_state(ireq, started);
3629 	} else {
3630 		/* The request did not really start in the
3631 		 * hardware, so clear the request handle
3632 		 * here so no terminations will be done.
3633 		 */
3634 		set_bit(IREQ_TERMINATED, &ireq->flags);
3635 		isci_request_change_state(ireq, completed);
3636 	}
3637 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
3638 
3639 	if (status ==
3640 	    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3641 		/* Signal libsas that we need the SCSI error
3642 		 * handler thread to work on this I/O and that
3643 		 * we want a device reset.
3644 		 */
3645 		spin_lock_irqsave(&task->task_state_lock, flags);
3646 		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3647 		spin_unlock_irqrestore(&task->task_state_lock, flags);
3648 
3649 		/* Cause this task to be scheduled in the SCSI error
3650 		 * handler thread.
3651 		 */
3652 		isci_execpath_callback(ihost, task,
3653 				       sas_task_abort);
3654 
3655 		/* Change the status, since we are holding
3656 		 * the I/O until it is managed by the SCSI
3657 		 * error handler.
3658 		 */
3659 		status = SCI_SUCCESS;
3660 	}
3661 
3662 	return ret;
3663 }
3664