1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13
14 #include <cam/scsi/scsi_all.h>
15
16 #include "sys/kassert.h"
17 #include "ufshci_private.h"
18
19 static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
20 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
21
22 static const struct ufshci_qops sdb_utmr_qops = {
23 .construct = ufshci_req_sdb_construct,
24 .destroy = ufshci_req_sdb_destroy,
25 .get_hw_queue = ufshci_req_sdb_get_hw_queue,
26 .enable = ufshci_req_sdb_enable,
27 .disable = ufshci_req_sdb_disable,
28 .reserve_slot = ufshci_req_sdb_reserve_slot,
29 .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
30 .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
31 .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
32 .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
33 .process_cpl = ufshci_req_sdb_process_cpl,
34 .get_inflight_io = ufshci_req_sdb_get_inflight_io,
35 };
36
37 static const struct ufshci_qops sdb_utr_qops = {
38 .construct = ufshci_req_sdb_construct,
39 .destroy = ufshci_req_sdb_destroy,
40 .get_hw_queue = ufshci_req_sdb_get_hw_queue,
41 .enable = ufshci_req_sdb_enable,
42 .disable = ufshci_req_sdb_disable,
43 .reserve_slot = ufshci_req_sdb_reserve_slot,
44 .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
45 .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
46 .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
47 .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
48 .process_cpl = ufshci_req_sdb_process_cpl,
49 .get_inflight_io = ufshci_req_sdb_get_inflight_io,
50 };
51
52 int
ufshci_utmr_req_queue_construct(struct ufshci_controller * ctrlr)53 ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
54 {
55 struct ufshci_req_queue *req_queue;
56 int error;
57
58 /*
59 * UTP Task Management Request only supports Legacy Single Doorbell
60 * Queue.
61 */
62 req_queue = &ctrlr->task_mgmt_req_queue;
63 req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
64 req_queue->qops = sdb_utmr_qops;
65
66 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
67 /*is_task_mgmt*/ true);
68
69 return (error);
70 }
71
72 void
ufshci_utmr_req_queue_destroy(struct ufshci_controller * ctrlr)73 ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
74 {
75 ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
76 &ctrlr->task_mgmt_req_queue);
77 }
78
79 void
ufshci_utmr_req_queue_disable(struct ufshci_controller * ctrlr)80 ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
81 {
82 ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
83 &ctrlr->task_mgmt_req_queue);
84 }
85
86 int
ufshci_utmr_req_queue_enable(struct ufshci_controller * ctrlr)87 ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
88 {
89 return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
90 &ctrlr->task_mgmt_req_queue));
91 }
92
93 int
ufshci_utr_req_queue_construct(struct ufshci_controller * ctrlr)94 ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
95 {
96 struct ufshci_req_queue *req_queue;
97 int error;
98
99 /*
100 * Currently, it does not support MCQ mode, so it should be set to SDB
101 * mode by default.
102 * TODO: Determine queue mode by checking Capability Registers
103 */
104 req_queue = &ctrlr->transfer_req_queue;
105 req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
106 req_queue->qops = sdb_utr_qops;
107
108 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
109 /*is_task_mgmt*/ false);
110
111 return (error);
112 }
113
114 void
ufshci_utr_req_queue_destroy(struct ufshci_controller * ctrlr)115 ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
116 {
117 ctrlr->transfer_req_queue.qops.destroy(ctrlr,
118 &ctrlr->transfer_req_queue);
119 }
120
121 void
ufshci_utr_req_queue_disable(struct ufshci_controller * ctrlr)122 ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
123 {
124 ctrlr->transfer_req_queue.qops.disable(ctrlr,
125 &ctrlr->transfer_req_queue);
126 }
127
128 int
ufshci_utr_req_queue_enable(struct ufshci_controller * ctrlr)129 ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
130 {
131 return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
132 &ctrlr->transfer_req_queue));
133 }
134
135 static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue * req_queue,uint8_t ocs,union ufshci_reponse_upiu * response)136 ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
137 uint8_t ocs, union ufshci_reponse_upiu *response)
138 {
139 bool is_error = false;
140
141 /* Check request descriptor */
142 if (ocs != UFSHCI_DESC_SUCCESS) {
143 ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
144 is_error = true;
145 }
146
147 /* Check response UPIU header */
148 if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
149 ufshci_printf(req_queue->ctrlr,
150 "Function(0x%x) Invalid response code = 0x%x\n",
151 response->header.ext_iid_or_function,
152 response->header.response);
153 is_error = true;
154 }
155
156 return (is_error);
157 }
158
159 static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker * tr,uint8_t ocs,uint8_t rc)160 ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
161 uint8_t rc)
162 {
163 struct ufshci_utp_xfer_req_desc *desc;
164 struct ufshci_upiu_header *resp_header;
165
166 mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
167
168 resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
169 resp_header->response = rc;
170
171 desc = &tr->hwq->utrd[tr->slot_num];
172 desc->overall_command_status = ocs;
173
174 ufshci_req_queue_complete_tracker(tr);
175 }
176
177 static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,uint8_t ocs,uint8_t rc)178 ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
179 struct ufshci_request *req, uint8_t ocs, uint8_t rc)
180 {
181 struct ufshci_completion cpl;
182 bool error;
183
184 memset(&cpl, 0, sizeof(cpl));
185 cpl.response_upiu.header.response = rc;
186 error = ufshci_req_queue_response_is_error(req_queue, ocs,
187 &cpl.response_upiu);
188
189 if (error) {
190 ufshci_printf(req_queue->ctrlr,
191 "Manual complete request error:0x%x", error);
192 }
193
194 if (req->cb_fn)
195 req->cb_fn(req->cb_arg, &cpl, error);
196
197 ufshci_free_request(req);
198 }
199
200 void
ufshci_req_queue_fail(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)201 ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
202 struct ufshci_req_queue *req_queue)
203 {
204 struct ufshci_hw_queue *hwq = req_queue->qops.get_hw_queue(req_queue);
205 struct ufshci_tracker *tr;
206 int i;
207
208 if (!mtx_initialized(&hwq->qlock))
209 return;
210
211 mtx_lock(&hwq->qlock);
212
213 for (i = 0; i < req_queue->num_trackers; i++) {
214 tr = hwq->act_tr[i];
215
216 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
217 mtx_unlock(&hwq->qlock);
218 ufshci_req_queue_manual_complete_request(req_queue,
219 tr->req, UFSHCI_DESC_ABORTED,
220 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
221 mtx_lock(&hwq->qlock);
222 } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
223 /*
224 * Do not remove the tracker. The abort_tracker path
225 * will do that for us.
226 */
227 mtx_unlock(&hwq->qlock);
228 ufshci_req_queue_manual_complete_tracker(tr,
229 UFSHCI_DESC_ABORTED,
230 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
231 mtx_lock(&hwq->qlock);
232 }
233 }
234
235 mtx_unlock(&hwq->qlock);
236 }
237
238 void
ufshci_req_queue_complete_tracker(struct ufshci_tracker * tr)239 ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
240 {
241 struct ufshci_req_queue *req_queue = tr->req_queue;
242 struct ufshci_hw_queue *hwq = tr->hwq;
243 struct ufshci_request *req = tr->req;
244 struct ufshci_completion cpl;
245 uint8_t ocs;
246 bool retry, error, retriable;
247
248 mtx_assert(&hwq->qlock, MA_NOTOWNED);
249
250 /* Copy the response from the Request Descriptor or UTP Command
251 * Descriptor. */
252 cpl.size = tr->response_size;
253 if (req_queue->is_task_mgmt) {
254 memcpy(&cpl.response_upiu,
255 (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
256
257 ocs = hwq->utmrd[tr->slot_num].overall_command_status;
258 } else {
259 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
260 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
261
262 memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
263 cpl.size);
264
265 ocs = hwq->utrd[tr->slot_num].overall_command_status;
266 }
267
268 error = ufshci_req_queue_response_is_error(req_queue, ocs,
269 &cpl.response_upiu);
270
271 /* Retry for admin commands */
272 retriable = req->is_admin;
273 retry = error && retriable &&
274 req->retries < req_queue->ctrlr->retry_count;
275 if (retry)
276 hwq->num_retries++;
277 if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
278 hwq->num_failures++;
279
280 KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
281 KASSERT(cpl.response_upiu.header.task_tag ==
282 req->request_upiu.header.task_tag,
283 ("response task_tag does not match request task_tag\n"));
284
285 if (!retry) {
286 if (req->payload_valid) {
287 bus_dmamap_sync(req_queue->dma_tag_payload,
288 tr->payload_dma_map,
289 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
290 }
291 /* Copy response from the command descriptor */
292 if (req->cb_fn)
293 req->cb_fn(req->cb_arg, &cpl, error);
294 }
295
296 mtx_lock(&hwq->qlock);
297
298 /* Clear the UTRL Completion Notification register */
299 req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
300
301 if (retry) {
302 req->retries++;
303 ufshci_req_queue_submit_tracker(req_queue, tr,
304 req->data_direction);
305 } else {
306 if (req->payload_valid) {
307 bus_dmamap_unload(req_queue->dma_tag_payload,
308 tr->payload_dma_map);
309 }
310
311 /* Clear tracker */
312 ufshci_free_request(req);
313 tr->req = NULL;
314 tr->slot_state = UFSHCI_SLOT_STATE_FREE;
315
316 TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
317 TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
318 }
319
320 mtx_unlock(&tr->hwq->qlock);
321 }
322
323 bool
ufshci_req_queue_process_completions(struct ufshci_req_queue * req_queue)324 ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
325 {
326 struct ufshci_hw_queue *hwq;
327 bool done;
328
329 hwq = req_queue->qops.get_hw_queue(req_queue);
330
331 mtx_lock(&hwq->recovery_lock);
332 done = req_queue->qops.process_cpl(req_queue);
333 mtx_unlock(&hwq->recovery_lock);
334
335 return (done);
336 }
337
338 static void
ufshci_payload_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)339 ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
340 {
341 struct ufshci_tracker *tr = arg;
342 struct ufshci_prdt_entry *prdt_entry;
343 int i;
344
345 /*
346 * If the mapping operation failed, return immediately. The caller
347 * is responsible for detecting the error status and failing the
348 * tracker manually.
349 */
350 if (error != 0) {
351 ufshci_printf(tr->req_queue->ctrlr,
352 "Failed to map payload %d\n", error);
353 return;
354 }
355
356 prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
357
358 tr->prdt_entry_cnt = nseg;
359
360 for (i = 0; i < nseg; i++) {
361 prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
362 0xffffffff;
363 prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
364 32;
365 prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
366
367 ++prdt_entry;
368 }
369
370 bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
372 }
373
374 static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker * tr)375 ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
376 {
377 struct ufshci_request *req = tr->req;
378 struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
379 int error;
380
381 tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
382
383 memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
384
385 /* Filling PRDT enrties with payload */
386 error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
387 tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
388 BUS_DMA_NOWAIT);
389 if (error != 0) {
390 /*
391 * The dmamap operation failed, so we manually fail the
392 * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
393 *
394 * ufshci_req_queue_manual_complete_tracker must not be called
395 * with the req_queue lock held.
396 */
397 ufshci_printf(tr->req_queue->ctrlr,
398 "bus_dmamap_load_mem returned with error:0x%x!\n", error);
399
400 mtx_unlock(&tr->hwq->qlock);
401 ufshci_req_queue_manual_complete_tracker(tr,
402 UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
403 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
404 mtx_lock(&tr->hwq->qlock);
405 }
406 }
407
408 static void
ufshci_req_queue_fill_utmr_descriptor(struct ufshci_utp_task_mgmt_req_desc * desc,struct ufshci_request * req)409 ufshci_req_queue_fill_utmr_descriptor(
410 struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
411 {
412 memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
413 desc->interrupt = true;
414 /* Set the initial value to Invalid. */
415 desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
416
417 memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
418 }
419
420 static void
ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc * desc,uint8_t data_direction,const uint64_t paddr,const uint16_t response_off,const uint16_t response_len,const uint16_t prdt_off,const uint16_t prdt_entry_cnt)421 ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
422 uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
423 const uint16_t response_len, const uint16_t prdt_off,
424 const uint16_t prdt_entry_cnt)
425 {
426 uint8_t command_type;
427 /* Value to convert bytes to dwords */
428 const uint16_t dword_size = 4;
429
430 /*
431 * Set command type to UFS storage.
432 * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
433 */
434 command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
435
436 memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
437 desc->command_type = command_type;
438 desc->data_direction = data_direction;
439 desc->interrupt = true;
440 /* Set the initial value to Invalid. */
441 desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
442 desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
443 0xffffffff);
444 desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
445 32);
446
447 desc->response_upiu_offset = response_off / dword_size;
448 desc->response_upiu_length = response_len / dword_size;
449 desc->prdt_offset = prdt_off / dword_size;
450 desc->prdt_length = prdt_entry_cnt;
451 }
452
453 static void
ufshci_req_queue_timeout_recovery(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)454 ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
455 struct ufshci_hw_queue *hwq)
456 {
457 /* TODO: Step 2. Logical unit reset */
458 /* TODO: Step 3. Target device reset */
459 /* TODO: Step 4. Bus reset */
460
461 /*
462 * Step 5. All previous commands were timeout.
463 * Recovery failed, reset the host controller.
464 */
465 ufshci_printf(ctrlr,
466 "Recovery step 5: Resetting controller due to a timeout.\n");
467 hwq->recovery_state = RECOVERY_WAITING;
468
469 ufshci_ctrlr_reset(ctrlr);
470 }
471
472 static void
ufshci_abort_complete(void * arg,const struct ufshci_completion * status,bool error)473 ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
474 bool error)
475 {
476 struct ufshci_tracker *tr = arg;
477
478 /*
479 * We still need to check the active tracker array, to cover race where
480 * I/O timed out at same time controller was completing the I/O. An
481 * abort request always is on the Task Management Request queue, but
482 * affects either an Task Management Request or an I/O (UTRL) queue, so
483 * take the appropriate queue lock for the original command's queue,
484 * since we'll need it to avoid races with the completion code and to
485 * complete the command manually.
486 */
487 mtx_lock(&tr->hwq->qlock);
488 if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
489 mtx_unlock(&tr->hwq->qlock);
490 /*
491 * An I/O has timed out, and the controller was unable to abort
492 * it for some reason. And we've not processed a completion for
493 * it yet. Construct a fake completion status, and then complete
494 * the I/O's tracker manually.
495 */
496 ufshci_printf(tr->hwq->ctrlr,
497 "abort task request failed, aborting task manually\n");
498 ufshci_req_queue_manual_complete_tracker(tr,
499 UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
500
501 if ((status->response_upiu.task_mgmt_response_upiu
502 .output_param1 ==
503 UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
504 (status->response_upiu.task_mgmt_response_upiu
505 .output_param1 ==
506 UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
507 ufshci_printf(tr->hwq->ctrlr,
508 "Warning: the abort task request completed \
509 successfully, but the original task is still incomplete.");
510 return;
511 }
512
513 /* Abort Task failed. Perform recovery steps 2-5 */
514 ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
515 } else {
516 mtx_unlock(&tr->hwq->qlock);
517 }
518 }
519
520 static void
ufshci_req_queue_timeout(void * arg)521 ufshci_req_queue_timeout(void *arg)
522 {
523 struct ufshci_hw_queue *hwq = arg;
524 struct ufshci_controller *ctrlr = hwq->ctrlr;
525 struct ufshci_tracker *tr;
526 sbintime_t now;
527 bool idle = true;
528 bool fast;
529
530 mtx_assert(&hwq->recovery_lock, MA_OWNED);
531
532 /*
533 * If the controller is failed, then stop polling. This ensures that any
534 * failure processing that races with the hwq timeout will fail safely.
535 */
536 if (ctrlr->is_failed) {
537 ufshci_printf(ctrlr,
538 "Failed controller, stopping watchdog timeout.\n");
539 hwq->timer_armed = false;
540 return;
541 }
542
543 /*
544 * Shutdown condition: We set hwq->timer_armed to false in
545 * ufshci_req_sdb_destroy before calling callout_drain. When we call
546 * that, this routine might get called one last time. Exit w/o setting a
547 * timeout. None of the watchdog stuff needs to be done since we're
548 * destroying the hwq.
549 */
550 if (!hwq->timer_armed) {
551 ufshci_printf(ctrlr,
552 "Timeout fired during ufshci_utr_req_queue_destroy\n");
553 return;
554 }
555
556 switch (hwq->recovery_state) {
557 case RECOVERY_NONE:
558 /*
559 * See if there's any recovery needed. First, do a fast check to
560 * see if anything could have timed out. If not, then skip
561 * everything else.
562 */
563 fast = false;
564 mtx_lock(&hwq->qlock);
565 now = getsbinuptime();
566 TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
567 /*
568 * If the first real transaction is not in timeout, then
569 * we're done. Otherwise, we try recovery.
570 */
571 idle = false;
572 if (now <= tr->deadline)
573 fast = true;
574 break;
575 }
576 mtx_unlock(&hwq->qlock);
577 if (idle || fast)
578 break;
579
580 /*
581 * There's a stale transaction at the start of the queue whose
582 * deadline has passed. Poll the competions as a last-ditch
583 * effort in case an interrupt has been missed.
584 */
585 hwq->req_queue->qops.process_cpl(hwq->req_queue);
586
587 /*
588 * Now that we've run the ISR, re-rheck to see if there's any
589 * timed out commands and abort them or reset the card if so.
590 */
591 mtx_lock(&hwq->qlock);
592 idle = true;
593 TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
594 /*
595 * If we know this tracker hasn't timed out, we also
596 * know all subsequent ones haven't timed out. The tr
597 * queue is in submission order and all normal commands
598 * in a queue have the same timeout (or the timeout was
599 * changed by the user, but we eventually timeout then).
600 */
601 idle = false;
602 if (now <= tr->deadline)
603 break;
604
605 /*
606 * Timeout recovery is performed in five steps. If
607 * recovery fails at any step, the process continues to
608 * the next one:
609 * next steps:
610 * Step 1. Abort task
611 * Step 2. Logical unit reset (TODO)
612 * Step 3. Target device reset (TODO)
613 * Step 4. Bus reset (TODO)
614 * Step 5. Host controller reset
615 *
616 * If the timeout occurred in the Task Management
617 * Request queue, ignore Step 1.
618 */
619 if (ctrlr->enable_aborts &&
620 !hwq->req_queue->is_task_mgmt &&
621 tr->req->cb_fn != ufshci_abort_complete) {
622 /*
623 * Step 1. Timeout expired, abort the task.
624 *
625 * This isn't an abort command, ask for a
626 * hardware abort. This goes to the Task
627 * Management Request queue which will reset the
628 * task if it times out.
629 */
630 ufshci_printf(ctrlr,
631 "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
632 tr->req->request_upiu.header.task_tag);
633 ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
634 ufshci_abort_complete, tr,
635 UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
636 tr->req->request_upiu.header.lun,
637 tr->req->request_upiu.header.task_tag, 0);
638 } else {
639 /* Recovery Step 2-5 */
640 ufshci_req_queue_timeout_recovery(ctrlr, hwq);
641 idle = false;
642 break;
643 }
644 }
645 mtx_unlock(&hwq->qlock);
646 break;
647
648 case RECOVERY_WAITING:
649 /*
650 * These messages aren't interesting while we're suspended. We
651 * put the queues into waiting state while suspending.
652 * Suspending takes a while, so we'll see these during that time
653 * and they aren't diagnostic. At other times, they indicate a
654 * problem that's worth complaining about.
655 */
656 if (!device_is_suspended(ctrlr->dev))
657 ufshci_printf(ctrlr, "Waiting for reset to complete\n");
658 idle = false; /* We want to keep polling */
659 break;
660 }
661
662 /*
663 * Rearm the timeout.
664 */
665 if (!idle) {
666 callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
667 } else {
668 hwq->timer_armed = false;
669 }
670 }
671
672 /*
673 * Submit the tracker to the hardware.
674 */
675 static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue * req_queue,struct ufshci_tracker * tr,enum ufshci_data_direction data_direction)676 ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
677 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
678 {
679 struct ufshci_controller *ctrlr = req_queue->ctrlr;
680 struct ufshci_request *req = tr->req;
681 struct ufshci_hw_queue *hwq;
682 uint64_t ucd_paddr;
683 uint16_t request_len, response_off, response_len;
684 uint8_t slot_num = tr->slot_num;
685 int timeout;
686
687 hwq = req_queue->qops.get_hw_queue(req_queue);
688
689 mtx_assert(&hwq->qlock, MA_OWNED);
690
691 if (req->cb_fn == ufshci_completion_poll_cb)
692 timeout = 1;
693 else
694 timeout = ctrlr->timeout_period;
695 tr->deadline = getsbinuptime() + timeout * SBT_1S;
696 if (!hwq->timer_armed) {
697 hwq->timer_armed = true;
698 /*
699 * It wakes up once every 0.5 seconds to check if the deadline
700 * has passed.
701 */
702 callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
703 ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
704 }
705
706 if (req_queue->is_task_mgmt) {
707 /* Prepare UTP Task Management Request Descriptor. */
708 ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
709 req);
710 } else {
711 request_len = req->request_size;
712 response_off = UFSHCI_UTP_XFER_REQ_SIZE;
713 response_len = req->response_size;
714
715 /* Prepare UTP Command Descriptor */
716 memcpy(tr->ucd, &req->request_upiu, request_len);
717 memset((uint8_t *)tr->ucd + response_off, 0, response_len);
718
719 /* Prepare PRDT */
720 if (req->payload_valid)
721 ufshci_req_queue_prepare_prdt(tr);
722
723 /* Prepare UTP Transfer Request Descriptor. */
724 ucd_paddr = tr->ucd_bus_addr;
725 ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
726 data_direction, ucd_paddr, response_off, response_len,
727 tr->prdt_off, tr->prdt_entry_cnt);
728
729 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
730 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
731 }
732
733 bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735
736 tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
737
738 /* Ring the doorbell */
739 req_queue->qops.ring_doorbell(ctrlr, tr);
740 }
741
742 static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)743 _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
744 struct ufshci_request *req)
745 {
746 struct ufshci_tracker *tr = NULL;
747 int error;
748
749 mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
750
751 error = req_queue->qops.reserve_slot(req_queue, &tr);
752 if (error != 0) {
753 ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
754 return (error);
755 }
756 KASSERT(tr, ("There is no tracker allocated."));
757
758 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
759 tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
760 return (EBUSY);
761
762 /* Set the task_tag value to slot_num for traceability. */
763 req->request_upiu.header.task_tag = tr->slot_num;
764
765 tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
766 tr->response_size = req->response_size;
767 tr->deadline = SBT_MAX;
768 tr->req = req;
769
770 TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
771 TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
772
773 ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
774
775 return (0);
776 }
777
778 int
ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)779 ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
780 struct ufshci_request *req)
781 {
782 struct ufshci_hw_queue *hwq;
783 uint32_t error;
784
785 /* TODO: MCQs should use a separate Admin queue. */
786
787 hwq = req_queue->qops.get_hw_queue(req_queue);
788 KASSERT(hwq, ("There is no HW queue allocated."));
789
790 mtx_lock(&hwq->qlock);
791 error = _ufshci_req_queue_submit_request(req_queue, req);
792 mtx_unlock(&hwq->qlock);
793
794 return (error);
795 }
796