xref: /qemu/hw/ufs/ufs.c (revision 6ff5da16000f908140723e164d33a0b51a6c4162)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 4.0
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30 
31 /* The QEMU-UFS device follows spec version 4.0 */
32 #define UFS_SPEC_VER 0x0400
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 #define UFS_MCQ_QCFGPTR 2
36 
37 /* Each value represents the temperature in celsius as (value - 80) */
38 #define UFS_TEMPERATURE 120
39 #define UFS_TOO_HIGH_TEMP_BOUNDARY 160
40 #define UFS_TOO_LOW_TEMP_BOUNDARY 60
41 
42 static void ufs_exec_req(UfsRequest *req);
43 static void ufs_clear_req(UfsRequest *req);
44 
45 static inline uint64_t ufs_mcq_reg_addr(UfsHc *u, int qid)
46 {
47     /* Submission Queue MCQ Registers offset (400h) */
48     return (UFS_MCQ_QCFGPTR * 0x200) + qid * 0x40;
49 }
50 
51 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc *u, int qid)
52 {
53     /* MCQ Operation & Runtime Registers offset (1000h) */
54     return UFS_MCQ_OPR_START + qid * 48;
55 }
56 
57 static inline uint64_t ufs_reg_size(UfsHc *u)
58 {
59     /* Total UFS HCI Register size in bytes */
60     return ufs_mcq_op_reg_addr(u, 0) + sizeof(u->mcq_op_reg);
61 }
62 
63 static inline bool ufs_is_mcq_reg(UfsHc *u, uint64_t addr, unsigned size)
64 {
65     uint64_t mcq_reg_addr;
66 
67     if (!u->params.mcq) {
68         return false;
69     }
70 
71     mcq_reg_addr = ufs_mcq_reg_addr(u, 0);
72     return (addr >= mcq_reg_addr &&
73             addr + size <= mcq_reg_addr + sizeof(u->mcq_reg));
74 }
75 
76 static inline bool ufs_is_mcq_op_reg(UfsHc *u, uint64_t addr, unsigned size)
77 {
78     uint64_t mcq_op_reg_addr;
79 
80     if (!u->params.mcq) {
81         return false;
82     }
83 
84     mcq_op_reg_addr = ufs_mcq_op_reg_addr(u, 0);
85     return (addr >= mcq_op_reg_addr &&
86             addr + size <= mcq_op_reg_addr + sizeof(u->mcq_op_reg));
87 }
88 
89 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
90 {
91     hwaddr hi = addr + size - 1;
92 
93     if (hi < addr) {
94         return MEMTX_DECODE_ERROR;
95     }
96 
97     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
98         return MEMTX_DECODE_ERROR;
99     }
100 
101     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
102 }
103 
104 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
105                                   int size)
106 {
107     hwaddr hi = addr + size - 1;
108     if (hi < addr) {
109         return MEMTX_DECODE_ERROR;
110     }
111 
112     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
113         return MEMTX_DECODE_ERROR;
114     }
115 
116     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
117 }
118 
119 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
120 {
121     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
122     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
123 
124     return utrd_addr;
125 }
126 
127 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
128 {
129     uint32_t cmd_desc_base_addr_lo =
130         le32_to_cpu(utrd->command_desc_base_addr_lo);
131     uint32_t cmd_desc_base_addr_hi =
132         le32_to_cpu(utrd->command_desc_base_addr_hi);
133 
134     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
135 }
136 
137 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
138 {
139     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
140     uint32_t rsp_upiu_byte_off =
141         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
142     return req_upiu_base_addr + rsp_upiu_byte_off;
143 }
144 
145 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
146 {
147     UfsHc *u = req->hc;
148     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
149     MemTxResult ret;
150 
151     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
152     if (ret) {
153         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
154     }
155     return ret;
156 }
157 
158 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
159 {
160     UfsHc *u = req->hc;
161     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
162     UtpUpiuReq *req_upiu = &req->req_upiu;
163     uint32_t copy_size;
164     uint16_t data_segment_length;
165     MemTxResult ret;
166 
167     /*
168      * To know the size of the req_upiu, we need to read the
169      * data_segment_length in the header first.
170      */
171     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
172                         sizeof(UtpUpiuHeader));
173     if (ret) {
174         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
175         return ret;
176     }
177     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
178 
179     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
180                 data_segment_length;
181 
182     if (copy_size > sizeof(req->req_upiu)) {
183         copy_size = sizeof(req->req_upiu);
184     }
185 
186     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
187     if (ret) {
188         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
189     }
190     return ret;
191 }
192 
193 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
194 {
195     UfsHc *u = req->hc;
196     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
197     uint16_t prdt_byte_off =
198         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
199     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
200     g_autofree UfshcdSgEntry *prd_entries = NULL;
201     hwaddr req_upiu_base_addr, prdt_base_addr;
202     int err;
203 
204     assert(!req->sg);
205 
206     if (prdt_size == 0) {
207         return MEMTX_OK;
208     }
209     prd_entries = g_new(UfshcdSgEntry, prdt_size);
210 
211     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
212     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
213 
214     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
215     if (err) {
216         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
217         return err;
218     }
219 
220     req->sg = g_malloc0(sizeof(QEMUSGList));
221     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
222     req->data_len = 0;
223 
224     for (uint16_t i = 0; i < prdt_len; ++i) {
225         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
226         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
227         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
228         req->data_len += data_byte_count;
229     }
230     return MEMTX_OK;
231 }
232 
233 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
234 {
235     MemTxResult ret;
236 
237     /*
238      * In case of MCQ, UTRD has already been read from a SQ, so skip it.
239      */
240     if (!ufs_mcq_req(req)) {
241         ret = ufs_dma_read_utrd(req);
242         if (ret) {
243             return ret;
244         }
245     }
246 
247     ret = ufs_dma_read_req_upiu(req);
248     if (ret) {
249         return ret;
250     }
251 
252     ret = ufs_dma_read_prdt(req);
253     if (ret) {
254         return ret;
255     }
256 
257     return 0;
258 }
259 
260 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
261 {
262     UfsHc *u = req->hc;
263     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
264     MemTxResult ret;
265 
266     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
267     if (ret) {
268         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
269     }
270     return ret;
271 }
272 
273 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
274 {
275     UfsHc *u = req->hc;
276     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
277     uint32_t rsp_upiu_byte_len =
278         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
279     uint16_t data_segment_length =
280         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
281     uint32_t copy_size = sizeof(UtpUpiuHeader) +
282                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
283                          data_segment_length;
284     MemTxResult ret;
285 
286     if (copy_size > rsp_upiu_byte_len) {
287         copy_size = rsp_upiu_byte_len;
288     }
289 
290     if (copy_size > sizeof(req->rsp_upiu)) {
291         copy_size = sizeof(req->rsp_upiu);
292     }
293 
294     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
295     if (ret) {
296         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
297     }
298     return ret;
299 }
300 
301 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
302 {
303     MemTxResult ret;
304 
305     ret = ufs_dma_write_rsp_upiu(req);
306     if (ret) {
307         return ret;
308     }
309 
310     return ufs_dma_write_utrd(req);
311 }
312 
313 static void ufs_irq_check(UfsHc *u)
314 {
315     PCIDevice *pci = PCI_DEVICE(u);
316 
317     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
318         trace_ufs_irq_raise();
319         pci_irq_assert(pci);
320     } else {
321         trace_ufs_irq_lower();
322         pci_irq_deassert(pci);
323     }
324 }
325 
326 static void ufs_process_db(UfsHc *u, uint32_t val)
327 {
328     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
329     uint32_t slot;
330     uint32_t nutrs = u->params.nutrs;
331     UfsRequest *req;
332 
333     val &= ~u->reg.utrldbr;
334     if (!val) {
335         return;
336     }
337 
338     doorbell[0] = val;
339     slot = find_first_bit(doorbell, nutrs);
340 
341     while (slot < nutrs) {
342         req = &u->req_list[slot];
343         if (req->state == UFS_REQUEST_ERROR) {
344             trace_ufs_err_utrl_slot_error(req->slot);
345             return;
346         }
347 
348         if (req->state != UFS_REQUEST_IDLE) {
349             trace_ufs_err_utrl_slot_busy(req->slot);
350             return;
351         }
352 
353         trace_ufs_process_db(slot);
354         req->state = UFS_REQUEST_READY;
355         slot = find_next_bit(doorbell, nutrs, slot + 1);
356     }
357 
358     qemu_bh_schedule(u->doorbell_bh);
359 }
360 
361 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
362 {
363     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
364                              u->reg.ucmdarg3);
365     /*
366      * Only the essential uic commands for running drivers on Linux and Windows
367      * are implemented.
368      */
369     switch (val) {
370     case UFS_UIC_CMD_DME_LINK_STARTUP:
371         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
372         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
373         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
374         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
375         break;
376     /* TODO: Revisit it when Power Management is implemented */
377     case UFS_UIC_CMD_DME_HIBER_ENTER:
378         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
379         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
380         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
381         break;
382     case UFS_UIC_CMD_DME_HIBER_EXIT:
383         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
384         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
385         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
386         break;
387     default:
388         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
389     }
390 
391     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
392 
393     ufs_irq_check(u);
394 }
395 
396 static void ufs_mcq_init_req(UfsHc *u, UfsRequest *req, UfsSq *sq)
397 {
398     memset(req, 0, sizeof(*req));
399 
400     req->hc = u;
401     req->state = UFS_REQUEST_IDLE;
402     req->slot = UFS_INVALID_SLOT;
403     req->sq = sq;
404 }
405 
406 static void ufs_mcq_process_sq(void *opaque)
407 {
408     UfsSq *sq = opaque;
409     UfsHc *u = sq->u;
410     UfsSqEntry sqe;
411     UfsRequest *req;
412     hwaddr addr;
413     uint16_t head = ufs_mcq_sq_head(u, sq->sqid);
414     int err;
415 
416     while (!(ufs_mcq_sq_empty(u, sq->sqid) || QTAILQ_EMPTY(&sq->req_list))) {
417         addr = sq->addr + head;
418         err = ufs_addr_read(sq->u, addr, (void *)&sqe, sizeof(sqe));
419         if (err) {
420             trace_ufs_err_dma_read_sq(sq->sqid, addr);
421             return;
422         }
423 
424         head = (head + sizeof(sqe)) % (sq->size * sizeof(sqe));
425         ufs_mcq_update_sq_head(u, sq->sqid, head);
426 
427         req = QTAILQ_FIRST(&sq->req_list);
428         QTAILQ_REMOVE(&sq->req_list, req, entry);
429 
430         ufs_mcq_init_req(sq->u, req, sq);
431         memcpy(&req->utrd, &sqe, sizeof(req->utrd));
432 
433         req->state = UFS_REQUEST_RUNNING;
434         ufs_exec_req(req);
435     }
436 }
437 
438 static void ufs_mcq_process_cq(void *opaque)
439 {
440     UfsCq *cq = opaque;
441     UfsHc *u = cq->u;
442     UfsRequest *req, *next;
443     MemTxResult ret;
444     uint32_t tail = ufs_mcq_cq_tail(u, cq->cqid);
445 
446     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
447     {
448         ufs_dma_write_rsp_upiu(req);
449 
450         req->cqe.utp_addr =
451             ((uint64_t)req->utrd.command_desc_base_addr_hi << 32ULL) |
452             req->utrd.command_desc_base_addr_lo;
453         req->cqe.utp_addr |= req->sq->sqid;
454         req->cqe.resp_len = req->utrd.response_upiu_length;
455         req->cqe.resp_off = req->utrd.response_upiu_offset;
456         req->cqe.prdt_len = req->utrd.prd_table_length;
457         req->cqe.prdt_off = req->utrd.prd_table_offset;
458         req->cqe.status = req->utrd.header.dword_2 & 0xf;
459         req->cqe.error = 0;
460 
461         ret = ufs_addr_write(u, cq->addr + tail, &req->cqe, sizeof(req->cqe));
462         if (ret) {
463             trace_ufs_err_dma_write_cq(cq->cqid, cq->addr + tail);
464         }
465         QTAILQ_REMOVE(&cq->req_list, req, entry);
466 
467         tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
468         ufs_mcq_update_cq_tail(u, cq->cqid, tail);
469 
470         ufs_clear_req(req);
471         QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
472     }
473 
474     if (!ufs_mcq_cq_empty(u, cq->cqid)) {
475         u->mcq_op_reg[cq->cqid].cq_int.is =
476             FIELD_DP32(u->mcq_op_reg[cq->cqid].cq_int.is, CQIS, TEPS, 1);
477 
478         u->reg.is = FIELD_DP32(u->reg.is, IS, CQES, 1);
479         ufs_irq_check(u);
480     }
481 }
482 
483 static bool ufs_mcq_create_sq(UfsHc *u, uint8_t qid, uint32_t attr)
484 {
485     UfsMcqReg *reg = &u->mcq_reg[qid];
486     UfsSq *sq;
487     uint8_t cqid = FIELD_EX32(attr, SQATTR, CQID);
488 
489     if (qid >= u->params.mcq_maxq) {
490         trace_ufs_err_mcq_create_sq_invalid_sqid(qid);
491         return false;
492     }
493 
494     if (u->sq[qid]) {
495         trace_ufs_err_mcq_create_sq_already_exists(qid);
496         return false;
497     }
498 
499     if (!u->cq[cqid]) {
500         trace_ufs_err_mcq_create_sq_invalid_cqid(qid);
501         return false;
502     }
503 
504     sq = g_malloc0(sizeof(*sq));
505     sq->u = u;
506     sq->sqid = qid;
507     sq->cq = u->cq[cqid];
508     sq->addr = ((uint64_t)reg->squba << 32) | reg->sqlba;
509     sq->size = ((FIELD_EX32(attr, SQATTR, SIZE) + 1) << 2) / sizeof(UfsSqEntry);
510 
511     sq->bh = qemu_bh_new_guarded(ufs_mcq_process_sq, sq,
512                                  &DEVICE(u)->mem_reentrancy_guard);
513     sq->req = g_new0(UfsRequest, sq->size);
514     QTAILQ_INIT(&sq->req_list);
515     for (int i = 0; i < sq->size; i++) {
516         ufs_mcq_init_req(u, &sq->req[i], sq);
517         QTAILQ_INSERT_TAIL(&sq->req_list, &sq->req[i], entry);
518     }
519 
520     u->sq[qid] = sq;
521 
522     trace_ufs_mcq_create_sq(sq->sqid, sq->cq->cqid, sq->addr, sq->size);
523     return true;
524 }
525 
526 static bool ufs_mcq_delete_sq(UfsHc *u, uint8_t qid)
527 {
528     UfsSq *sq;
529 
530     if (qid >= u->params.mcq_maxq) {
531         trace_ufs_err_mcq_delete_sq_invalid_sqid(qid);
532         return false;
533     }
534 
535     if (!u->sq[qid]) {
536         trace_ufs_err_mcq_delete_sq_not_exists(qid);
537         return false;
538     }
539 
540     sq = u->sq[qid];
541 
542     qemu_bh_delete(sq->bh);
543     g_free(sq->req);
544     g_free(sq);
545     u->sq[qid] = NULL;
546     return true;
547 }
548 
549 static bool ufs_mcq_create_cq(UfsHc *u, uint8_t qid, uint32_t attr)
550 {
551     UfsMcqReg *reg = &u->mcq_reg[qid];
552     UfsCq *cq;
553 
554     if (qid >= u->params.mcq_maxq) {
555         trace_ufs_err_mcq_create_cq_invalid_cqid(qid);
556         return false;
557     }
558 
559     if (u->cq[qid]) {
560         trace_ufs_err_mcq_create_cq_already_exists(qid);
561         return false;
562     }
563 
564     cq = g_malloc0(sizeof(*cq));
565     cq->u = u;
566     cq->cqid = qid;
567     cq->addr = ((uint64_t)reg->cquba << 32) | reg->cqlba;
568     cq->size = ((FIELD_EX32(attr, CQATTR, SIZE) + 1) << 2) / sizeof(UfsCqEntry);
569 
570     cq->bh = qemu_bh_new_guarded(ufs_mcq_process_cq, cq,
571                                  &DEVICE(u)->mem_reentrancy_guard);
572     QTAILQ_INIT(&cq->req_list);
573 
574     u->cq[qid] = cq;
575 
576     trace_ufs_mcq_create_cq(cq->cqid, cq->addr, cq->size);
577     return true;
578 }
579 
580 static bool ufs_mcq_delete_cq(UfsHc *u, uint8_t qid)
581 {
582     UfsCq *cq;
583 
584     if (qid >= u->params.mcq_maxq) {
585         trace_ufs_err_mcq_delete_cq_invalid_cqid(qid);
586         return false;
587     }
588 
589     if (!u->cq[qid]) {
590         trace_ufs_err_mcq_delete_cq_not_exists(qid);
591         return false;
592     }
593 
594     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
595         if (u->sq[i] && u->sq[i]->cq->cqid == qid) {
596             trace_ufs_err_mcq_delete_cq_sq_not_deleted(i, qid);
597             return false;
598         }
599     }
600 
601     cq = u->cq[qid];
602 
603     qemu_bh_delete(cq->bh);
604     g_free(cq);
605     u->cq[qid] = NULL;
606     return true;
607 }
608 
609 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
610 {
611     switch (offset) {
612     case A_IS:
613         u->reg.is &= ~data;
614         ufs_irq_check(u);
615         break;
616     case A_IE:
617         u->reg.ie = data;
618         ufs_irq_check(u);
619         break;
620     case A_HCE:
621         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
622             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
623             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
624         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
625                    !FIELD_EX32(data, HCE, HCE)) {
626             u->reg.hcs = 0;
627             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
628         }
629         break;
630     case A_UTRLBA:
631         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
632         break;
633     case A_UTRLBAU:
634         u->reg.utrlbau = data;
635         break;
636     case A_UTRLDBR:
637         ufs_process_db(u, data);
638         u->reg.utrldbr |= data;
639         break;
640     case A_UTRLRSR:
641         u->reg.utrlrsr = data;
642         break;
643     case A_UTRLCNR:
644         u->reg.utrlcnr &= ~data;
645         break;
646     case A_UTMRLBA:
647         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
648         break;
649     case A_UTMRLBAU:
650         u->reg.utmrlbau = data;
651         break;
652     case A_UICCMD:
653         ufs_process_uiccmd(u, data);
654         break;
655     case A_UCMDARG1:
656         u->reg.ucmdarg1 = data;
657         break;
658     case A_UCMDARG2:
659         u->reg.ucmdarg2 = data;
660         break;
661     case A_UCMDARG3:
662         u->reg.ucmdarg3 = data;
663         break;
664     case A_CONFIG:
665         u->reg.config = data;
666         break;
667     case A_MCQCONFIG:
668         u->reg.mcqconfig = data;
669         break;
670     case A_UTRLCLR:
671     case A_UTMRLDBR:
672     case A_UTMRLCLR:
673     case A_UTMRLRSR:
674         trace_ufs_err_unsupport_register_offset(offset);
675         break;
676     default:
677         trace_ufs_err_invalid_register_offset(offset);
678         break;
679     }
680 }
681 
682 static void ufs_write_mcq_reg(UfsHc *u, hwaddr offset, uint32_t data,
683                               unsigned size)
684 {
685     int qid = offset / sizeof(UfsMcqReg);
686     UfsMcqReg *reg = &u->mcq_reg[qid];
687 
688     switch (offset % sizeof(UfsMcqReg)) {
689     case A_SQATTR:
690         if (!FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
691             FIELD_EX32(data, SQATTR, SQEN)) {
692             if (!ufs_mcq_create_sq(u, qid, data)) {
693                 break;
694             }
695         } else if (FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
696                    !FIELD_EX32(data, SQATTR, SQEN)) {
697             if (!ufs_mcq_delete_sq(u, qid)) {
698                 break;
699             }
700         }
701         reg->sqattr = data;
702         break;
703     case A_SQLBA:
704         reg->sqlba = data;
705         break;
706     case A_SQUBA:
707         reg->squba = data;
708         break;
709     case A_SQCFG:
710         reg->sqcfg = data;
711         break;
712     case A_CQATTR:
713         if (!FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
714             FIELD_EX32(data, CQATTR, CQEN)) {
715             if (!ufs_mcq_create_cq(u, qid, data)) {
716                 break;
717             }
718         } else if (FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
719                    !FIELD_EX32(data, CQATTR, CQEN)) {
720             if (!ufs_mcq_delete_cq(u, qid)) {
721                 break;
722             }
723         }
724         reg->cqattr = data;
725         break;
726     case A_CQLBA:
727         reg->cqlba = data;
728         break;
729     case A_CQUBA:
730         reg->cquba = data;
731         break;
732     case A_CQCFG:
733         reg->cqcfg = data;
734         break;
735     case A_SQDAO:
736     case A_SQISAO:
737     case A_CQDAO:
738     case A_CQISAO:
739         trace_ufs_err_unsupport_register_offset(offset);
740         break;
741     default:
742         trace_ufs_err_invalid_register_offset(offset);
743         break;
744     }
745 }
746 
747 static void ufs_mcq_process_db(UfsHc *u, uint8_t qid, uint32_t db)
748 {
749     UfsSq *sq;
750 
751     if (qid >= u->params.mcq_maxq) {
752         trace_ufs_err_mcq_db_wr_invalid_sqid(qid);
753         return;
754     }
755 
756     sq = u->sq[qid];
757     if (sq->size * sizeof(UfsSqEntry) <= db) {
758         trace_ufs_err_mcq_db_wr_invalid_db(qid, db);
759         return;
760     }
761 
762     ufs_mcq_update_sq_tail(u, sq->sqid, db);
763     qemu_bh_schedule(sq->bh);
764 }
765 
766 static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
767                                  unsigned size)
768 {
769     int qid = offset / sizeof(UfsMcqOpReg);
770     UfsMcqOpReg *opr = &u->mcq_op_reg[qid];
771 
772     switch (offset % sizeof(UfsMcqOpReg)) {
773     case offsetof(UfsMcqOpReg, sq.tp):
774         if (opr->sq.tp != data) {
775             ufs_mcq_process_db(u, qid, data);
776         }
777         opr->sq.tp = data;
778         break;
779     case offsetof(UfsMcqOpReg, cq.hp):
780         opr->cq.hp = data;
781         ufs_mcq_update_cq_head(u, qid, data);
782         break;
783     case offsetof(UfsMcqOpReg, cq_int.is):
784         opr->cq_int.is &= ~data;
785         break;
786     default:
787         trace_ufs_err_invalid_register_offset(offset);
788         break;
789     }
790 }
791 
792 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
793 {
794     UfsHc *u = (UfsHc *)opaque;
795     uint32_t *ptr;
796     uint64_t value;
797     uint64_t offset;
798 
799     if (addr + size <= sizeof(u->reg)) {
800         offset = addr;
801         ptr = (uint32_t *)&u->reg;
802     } else if (ufs_is_mcq_reg(u, addr, size)) {
803         offset = addr - ufs_mcq_reg_addr(u, 0);
804         ptr = (uint32_t *)&u->mcq_reg;
805     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
806         offset = addr - ufs_mcq_op_reg_addr(u, 0);
807         ptr = (uint32_t *)&u->mcq_op_reg;
808     } else {
809         trace_ufs_err_invalid_register_offset(addr);
810         return 0;
811     }
812 
813     value = ptr[offset >> 2];
814     trace_ufs_mmio_read(addr, value, size);
815     return value;
816 }
817 
818 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
819                            unsigned size)
820 {
821     UfsHc *u = (UfsHc *)opaque;
822 
823     trace_ufs_mmio_write(addr, data, size);
824 
825     if (addr + size <= sizeof(u->reg)) {
826         ufs_write_reg(u, addr, data, size);
827     } else if (ufs_is_mcq_reg(u, addr, size)) {
828         ufs_write_mcq_reg(u, addr - ufs_mcq_reg_addr(u, 0), data, size);
829     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
830         ufs_write_mcq_op_reg(u, addr - ufs_mcq_op_reg_addr(u, 0), data, size);
831     } else {
832         trace_ufs_err_invalid_register_offset(addr);
833     }
834 }
835 
836 static const MemoryRegionOps ufs_mmio_ops = {
837     .read = ufs_mmio_read,
838     .write = ufs_mmio_write,
839     .endianness = DEVICE_LITTLE_ENDIAN,
840     .impl = {
841         .min_access_size = 4,
842         .max_access_size = 4,
843     },
844 };
845 
846 static void ufs_update_ee_status(UfsHc *u)
847 {
848     uint16_t ee_status = be16_to_cpu(u->attributes.exception_event_status);
849     uint8_t high_temp_thresh = u->attributes.device_too_high_temp_boundary;
850     uint8_t low_temp_thresh = u->attributes.device_too_low_temp_boundary;
851 
852     if (u->temperature >= high_temp_thresh) {
853         ee_status |= MASK_EE_TOO_HIGH_TEMP;
854     } else {
855         ee_status &= ~MASK_EE_TOO_HIGH_TEMP;
856     }
857 
858     if (u->temperature <= low_temp_thresh) {
859         ee_status |= MASK_EE_TOO_LOW_TEMP;
860     } else {
861         ee_status &= ~MASK_EE_TOO_LOW_TEMP;
862     }
863 
864     u->attributes.exception_event_status = cpu_to_be16(ee_status);
865 }
866 
867 static bool ufs_check_exception_event_alert(UfsHc *u, uint8_t trans_type)
868 {
869     uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
870     uint16_t ee_status;
871 
872     if (trans_type != UFS_UPIU_TRANSACTION_RESPONSE) {
873         return false;
874     }
875 
876     ufs_update_ee_status(u);
877 
878     ee_status = be16_to_cpu(u->attributes.exception_event_status);
879 
880     return ee_control & ee_status;
881 }
882 
883 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
884                            uint8_t response, uint8_t scsi_status,
885                            uint16_t data_segment_length)
886 {
887     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
888     req->rsp_upiu.header.trans_type = trans_type;
889     req->rsp_upiu.header.flags = flags;
890     req->rsp_upiu.header.response = response;
891     req->rsp_upiu.header.scsi_status = scsi_status;
892     req->rsp_upiu.header.device_inf =
893         ufs_check_exception_event_alert(req->hc, trans_type);
894     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
895 }
896 
897 void ufs_build_query_response(UfsRequest *req)
898 {
899     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
900     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
901     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
902     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
903 }
904 
905 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
906 {
907     UfsHc *u = req->hc;
908     uint8_t lun = req->req_upiu.header.lun;
909 
910     UfsLu *lu = NULL;
911 
912     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
913 
914     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
915         trace_ufs_err_scsi_cmd_invalid_lun(lun);
916         return UFS_REQUEST_FAIL;
917     }
918 
919     switch (lun) {
920     case UFS_UPIU_REPORT_LUNS_WLUN:
921         lu = &u->report_wlu;
922         break;
923     case UFS_UPIU_UFS_DEVICE_WLUN:
924         lu = &u->dev_wlu;
925         break;
926     case UFS_UPIU_BOOT_WLUN:
927         lu = &u->boot_wlu;
928         break;
929     case UFS_UPIU_RPMB_WLUN:
930         lu = &u->rpmb_wlu;
931         break;
932     default:
933         lu = u->lus[lun];
934     }
935 
936     return lu->scsi_op(lu, req);
937 }
938 
939 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
940 {
941     trace_ufs_exec_nop_cmd(req->slot);
942     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
943     return UFS_REQUEST_SUCCESS;
944 }
945 
946 /*
947  * This defines the permission of flags based on their IDN. There are some
948  * things that are declared read-only, which is inconsistent with the ufs spec,
949  * because we want to return an error for features that are not yet supported.
950  */
951 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
952     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
953     /* Write protection is not supported */
954     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
955     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
956     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
957                                     UFS_QUERY_FLAG_CLEAR |
958                                     UFS_QUERY_FLAG_TOGGLE,
959     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
960         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
961         UFS_QUERY_FLAG_TOGGLE,
962     /* Purge Operation is not supported */
963     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
964     /* Refresh Operation is not supported */
965     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
966     /* Physical Resource Removal is not supported */
967     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
968     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
969     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
970     /* Write Booster is not supported */
971     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
972     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
973     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
974 };
975 
976 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
977 {
978     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
979         return UFS_QUERY_RESULT_INVALID_IDN;
980     }
981 
982     if (!(flag_permission[idn] & op)) {
983         if (op == UFS_QUERY_FLAG_READ) {
984             trace_ufs_err_query_flag_not_readable(idn);
985             return UFS_QUERY_RESULT_NOT_READABLE;
986         }
987         trace_ufs_err_query_flag_not_writable(idn);
988         return UFS_QUERY_RESULT_NOT_WRITEABLE;
989     }
990 
991     return UFS_QUERY_RESULT_SUCCESS;
992 }
993 
994 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
995     /* booting is not supported */
996     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
997     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
998     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
999         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1000     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
1001     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
1002     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
1003     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
1004         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1005     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
1006         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1007     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
1008     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
1009         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1010     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
1011     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
1012         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1013     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
1014         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1015     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
1016     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
1017     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
1018     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
1019     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1020     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
1021         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1022     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
1023     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
1024     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
1025     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
1026     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
1027     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
1028     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
1029     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
1030     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
1031     /* refresh operation is not supported */
1032     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
1033     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
1034     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
1035 };
1036 
1037 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
1038 {
1039     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
1040         return UFS_QUERY_RESULT_INVALID_IDN;
1041     }
1042 
1043     if (!(attr_permission[idn] & op)) {
1044         if (op == UFS_QUERY_ATTR_READ) {
1045             trace_ufs_err_query_attr_not_readable(idn);
1046             return UFS_QUERY_RESULT_NOT_READABLE;
1047         }
1048         trace_ufs_err_query_attr_not_writable(idn);
1049         return UFS_QUERY_RESULT_NOT_WRITEABLE;
1050     }
1051 
1052     return UFS_QUERY_RESULT_SUCCESS;
1053 }
1054 
1055 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
1056 {
1057     UfsHc *u = req->hc;
1058     uint8_t idn = req->req_upiu.qr.idn;
1059     uint32_t value;
1060     QueryRespCode ret;
1061 
1062     ret = ufs_flag_check_idn_valid(idn, op);
1063     if (ret) {
1064         return ret;
1065     }
1066 
1067     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
1068         value = 0;
1069     } else if (op == UFS_QUERY_FLAG_READ) {
1070         value = *(((uint8_t *)&u->flags) + idn);
1071     } else if (op == UFS_QUERY_FLAG_SET) {
1072         value = 1;
1073     } else if (op == UFS_QUERY_FLAG_CLEAR) {
1074         value = 0;
1075     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
1076         value = *(((uint8_t *)&u->flags) + idn);
1077         value = !value;
1078     } else {
1079         trace_ufs_err_query_invalid_opcode(op);
1080         return UFS_QUERY_RESULT_INVALID_OPCODE;
1081     }
1082 
1083     *(((uint8_t *)&u->flags) + idn) = value;
1084     req->rsp_upiu.qr.value = cpu_to_be32(value);
1085     return UFS_QUERY_RESULT_SUCCESS;
1086 }
1087 
1088 static inline uint8_t ufs_read_device_temp(UfsHc *u)
1089 {
1090     uint8_t feat_sup = u->device_desc.ufs_features_support;
1091     bool high_temp_sup, low_temp_sup, high_temp_en, low_temp_en;
1092     uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
1093 
1094     high_temp_sup = feat_sup & UFS_DEV_HIGH_TEMP_NOTIF;
1095     low_temp_sup = feat_sup & UFS_DEV_LOW_TEMP_NOTIF;
1096     high_temp_en = ee_control & MASK_EE_TOO_HIGH_TEMP;
1097     low_temp_en = ee_control & MASK_EE_TOO_LOW_TEMP;
1098 
1099     if ((high_temp_sup && high_temp_en) ||
1100         (low_temp_sup && low_temp_en)) {
1101         return u->temperature;
1102     }
1103 
1104     return 0;
1105 }
1106 
1107 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
1108 {
1109     switch (idn) {
1110     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
1111         return u->attributes.boot_lun_en;
1112     case UFS_QUERY_ATTR_IDN_POWER_MODE:
1113         return u->attributes.current_power_mode;
1114     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1115         return u->attributes.active_icc_level;
1116     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
1117         return u->attributes.out_of_order_data_en;
1118     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
1119         return u->attributes.background_op_status;
1120     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
1121         return u->attributes.purge_status;
1122     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1123         return u->attributes.max_data_in_size;
1124     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1125         return u->attributes.max_data_out_size;
1126     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
1127         return be32_to_cpu(u->attributes.dyn_cap_needed);
1128     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1129         return u->attributes.ref_clk_freq;
1130     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
1131         return u->attributes.config_descr_lock;
1132     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1133         return u->attributes.max_num_of_rtt;
1134     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1135         return be16_to_cpu(u->attributes.exception_event_control);
1136     case UFS_QUERY_ATTR_IDN_EE_STATUS:
1137         ufs_update_ee_status(u);
1138         return be16_to_cpu(u->attributes.exception_event_status);
1139     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1140         return be32_to_cpu(u->attributes.seconds_passed);
1141     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
1142         return be16_to_cpu(u->attributes.context_conf);
1143     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
1144         return u->attributes.device_ffu_status;
1145     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1146         return be32_to_cpu(u->attributes.psa_state);
1147     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1148         return be32_to_cpu(u->attributes.psa_data_size);
1149     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
1150         return u->attributes.ref_clk_gating_wait_time;
1151     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
1152         u->attributes.device_case_rough_temperature = ufs_read_device_temp(u);
1153         return u->attributes.device_case_rough_temperature;
1154     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
1155         return u->attributes.device_too_high_temp_boundary;
1156     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
1157         return u->attributes.device_too_low_temp_boundary;
1158     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
1159         return u->attributes.throttling_status;
1160     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
1161         return u->attributes.wb_buffer_flush_status;
1162     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
1163         return u->attributes.available_wb_buffer_size;
1164     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
1165         return u->attributes.wb_buffer_life_time_est;
1166     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
1167         return be32_to_cpu(u->attributes.current_wb_buffer_size);
1168     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
1169         return u->attributes.refresh_status;
1170     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
1171         return u->attributes.refresh_freq;
1172     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
1173         return u->attributes.refresh_unit;
1174     }
1175     return 0;
1176 }
1177 
1178 static QueryRespCode ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
1179 {
1180     switch (idn) {
1181     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1182         if (value > UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE) {
1183             return UFS_QUERY_RESULT_INVALID_VALUE;
1184         }
1185         u->attributes.active_icc_level = value;
1186         break;
1187     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1188         u->attributes.max_data_in_size = value;
1189         break;
1190     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1191         u->attributes.max_data_out_size = value;
1192         break;
1193     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1194         u->attributes.ref_clk_freq = value;
1195         break;
1196     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1197         u->attributes.max_num_of_rtt = value;
1198         break;
1199     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1200         u->attributes.exception_event_control = cpu_to_be16(value);
1201         break;
1202     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1203         u->attributes.seconds_passed = cpu_to_be32(value);
1204         break;
1205     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1206         u->attributes.psa_state = value;
1207         break;
1208     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1209         u->attributes.psa_data_size = cpu_to_be32(value);
1210         break;
1211     }
1212     return UFS_QUERY_RESULT_SUCCESS;
1213 }
1214 
1215 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
1216 {
1217     UfsHc *u = req->hc;
1218     uint8_t idn = req->req_upiu.qr.idn;
1219     uint32_t value;
1220     QueryRespCode ret;
1221 
1222     ret = ufs_attr_check_idn_valid(idn, op);
1223     if (ret) {
1224         return ret;
1225     }
1226 
1227     if (op == UFS_QUERY_ATTR_READ) {
1228         value = ufs_read_attr_value(u, idn);
1229         ret = UFS_QUERY_RESULT_SUCCESS;
1230     } else {
1231         value = be32_to_cpu(req->req_upiu.qr.value);
1232         ret = ufs_write_attr_value(u, idn, value);
1233     }
1234     req->rsp_upiu.qr.value = cpu_to_be32(value);
1235     return ret;
1236 }
1237 
1238 static const RpmbUnitDescriptor rpmb_unit_desc = {
1239     .length = sizeof(RpmbUnitDescriptor),
1240     .descriptor_idn = 2,
1241     .unit_index = UFS_UPIU_RPMB_WLUN,
1242     .lu_enable = 0,
1243 };
1244 
1245 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
1246 {
1247     UfsHc *u = req->hc;
1248     uint8_t lun = req->req_upiu.qr.index;
1249 
1250     if (lun != UFS_UPIU_RPMB_WLUN &&
1251         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
1252         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
1253         return UFS_QUERY_RESULT_INVALID_INDEX;
1254     }
1255 
1256     if (lun == UFS_UPIU_RPMB_WLUN) {
1257         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
1258     } else {
1259         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
1260                sizeof(u->lus[lun]->unit_desc));
1261     }
1262 
1263     return UFS_QUERY_RESULT_SUCCESS;
1264 }
1265 
1266 static inline StringDescriptor manufacturer_str_desc(void)
1267 {
1268     StringDescriptor desc = {
1269         .length = 0x12,
1270         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1271     };
1272     desc.UC[0] = cpu_to_be16('R');
1273     desc.UC[1] = cpu_to_be16('E');
1274     desc.UC[2] = cpu_to_be16('D');
1275     desc.UC[3] = cpu_to_be16('H');
1276     desc.UC[4] = cpu_to_be16('A');
1277     desc.UC[5] = cpu_to_be16('T');
1278     return desc;
1279 }
1280 
1281 static inline StringDescriptor product_name_str_desc(void)
1282 {
1283     StringDescriptor desc = {
1284         .length = 0x22,
1285         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1286     };
1287     desc.UC[0] = cpu_to_be16('Q');
1288     desc.UC[1] = cpu_to_be16('E');
1289     desc.UC[2] = cpu_to_be16('M');
1290     desc.UC[3] = cpu_to_be16('U');
1291     desc.UC[4] = cpu_to_be16(' ');
1292     desc.UC[5] = cpu_to_be16('U');
1293     desc.UC[6] = cpu_to_be16('F');
1294     desc.UC[7] = cpu_to_be16('S');
1295     return desc;
1296 }
1297 
1298 static inline StringDescriptor product_rev_level_str_desc(void)
1299 {
1300     StringDescriptor desc = {
1301         .length = 0x0a,
1302         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1303     };
1304     desc.UC[0] = cpu_to_be16('0');
1305     desc.UC[1] = cpu_to_be16('0');
1306     desc.UC[2] = cpu_to_be16('0');
1307     desc.UC[3] = cpu_to_be16('1');
1308     return desc;
1309 }
1310 
1311 static const StringDescriptor null_str_desc = {
1312     .length = 0x02,
1313     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1314 };
1315 
1316 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
1317 {
1318     UfsHc *u = req->hc;
1319     uint8_t index = req->req_upiu.qr.index;
1320     StringDescriptor desc;
1321 
1322     if (index == u->device_desc.manufacturer_name) {
1323         desc = manufacturer_str_desc();
1324         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1325     } else if (index == u->device_desc.product_name) {
1326         desc = product_name_str_desc();
1327         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1328     } else if (index == u->device_desc.serial_number) {
1329         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1330     } else if (index == u->device_desc.oem_id) {
1331         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1332     } else if (index == u->device_desc.product_revision_level) {
1333         desc = product_rev_level_str_desc();
1334         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1335     } else {
1336         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
1337         return UFS_QUERY_RESULT_INVALID_INDEX;
1338     }
1339     return UFS_QUERY_RESULT_SUCCESS;
1340 }
1341 
1342 static inline InterconnectDescriptor interconnect_desc(void)
1343 {
1344     InterconnectDescriptor desc = {
1345         .length = sizeof(InterconnectDescriptor),
1346         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
1347     };
1348     desc.bcd_unipro_version = cpu_to_be16(0x180);
1349     desc.bcd_mphy_version = cpu_to_be16(0x410);
1350     return desc;
1351 }
1352 
1353 static QueryRespCode ufs_read_desc(UfsRequest *req)
1354 {
1355     UfsHc *u = req->hc;
1356     QueryRespCode status;
1357     uint8_t idn = req->req_upiu.qr.idn;
1358     uint8_t selector = req->req_upiu.qr.selector;
1359     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
1360     InterconnectDescriptor desc;
1361     if (selector != 0) {
1362         return UFS_QUERY_RESULT_INVALID_SELECTOR;
1363     }
1364     switch (idn) {
1365     case UFS_QUERY_DESC_IDN_DEVICE:
1366         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
1367         status = UFS_QUERY_RESULT_SUCCESS;
1368         break;
1369     case UFS_QUERY_DESC_IDN_UNIT:
1370         status = ufs_read_unit_desc(req);
1371         break;
1372     case UFS_QUERY_DESC_IDN_GEOMETRY:
1373         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
1374                sizeof(u->geometry_desc));
1375         status = UFS_QUERY_RESULT_SUCCESS;
1376         break;
1377     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
1378         desc = interconnect_desc();
1379         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
1380         status = UFS_QUERY_RESULT_SUCCESS;
1381         break;
1382     }
1383     case UFS_QUERY_DESC_IDN_STRING:
1384         status = ufs_read_string_desc(req);
1385         break;
1386     case UFS_QUERY_DESC_IDN_POWER:
1387         /* mocking of power descriptor is not supported */
1388         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
1389         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
1390         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
1391         status = UFS_QUERY_RESULT_SUCCESS;
1392         break;
1393     case UFS_QUERY_DESC_IDN_HEALTH:
1394         /* mocking of health descriptor is not supported */
1395         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
1396         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
1397         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
1398         status = UFS_QUERY_RESULT_SUCCESS;
1399         break;
1400     default:
1401         length = 0;
1402         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
1403         status = UFS_QUERY_RESULT_INVALID_IDN;
1404     }
1405 
1406     if (length > req->rsp_upiu.qr.data[0]) {
1407         length = req->rsp_upiu.qr.data[0];
1408     }
1409     req->rsp_upiu.qr.length = cpu_to_be16(length);
1410 
1411     return status;
1412 }
1413 
1414 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1415 {
1416     QueryRespCode status;
1417     switch (req->req_upiu.qr.opcode) {
1418     case UFS_UPIU_QUERY_OPCODE_NOP:
1419         status = UFS_QUERY_RESULT_SUCCESS;
1420         break;
1421     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1422         status = ufs_read_desc(req);
1423         break;
1424     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1425         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1426         break;
1427     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1428         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1429         break;
1430     default:
1431         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1432         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1433         break;
1434     }
1435 
1436     return status;
1437 }
1438 
1439 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1440 {
1441     QueryRespCode status;
1442     switch (req->req_upiu.qr.opcode) {
1443     case UFS_UPIU_QUERY_OPCODE_NOP:
1444         status = UFS_QUERY_RESULT_SUCCESS;
1445         break;
1446     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1447         /* write descriptor is not supported */
1448         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1449         break;
1450     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1451         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1452         break;
1453     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1454         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1455         break;
1456     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1457         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1458         break;
1459     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1460         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1461         break;
1462     default:
1463         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1464         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1465         break;
1466     }
1467 
1468     return status;
1469 }
1470 
1471 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1472 {
1473     uint8_t query_func = req->req_upiu.header.query_func;
1474     uint16_t data_segment_length;
1475     QueryRespCode status;
1476 
1477     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1478     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1479         status = ufs_exec_query_read(req);
1480     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1481         status = ufs_exec_query_write(req);
1482     } else {
1483         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1484     }
1485 
1486     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1487     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1488                           data_segment_length);
1489     ufs_build_query_response(req);
1490 
1491     if (status != UFS_QUERY_RESULT_SUCCESS) {
1492         return UFS_REQUEST_FAIL;
1493     }
1494     return UFS_REQUEST_SUCCESS;
1495 }
1496 
1497 static void ufs_exec_req(UfsRequest *req)
1498 {
1499     UfsReqResult req_result;
1500 
1501     if (ufs_dma_read_upiu(req)) {
1502         return;
1503     }
1504 
1505     switch (req->req_upiu.header.trans_type) {
1506     case UFS_UPIU_TRANSACTION_NOP_OUT:
1507         req_result = ufs_exec_nop_cmd(req);
1508         break;
1509     case UFS_UPIU_TRANSACTION_COMMAND:
1510         req_result = ufs_exec_scsi_cmd(req);
1511         break;
1512     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1513         req_result = ufs_exec_query_cmd(req);
1514         break;
1515     default:
1516         trace_ufs_err_invalid_trans_code(req->slot,
1517                                          req->req_upiu.header.trans_type);
1518         req_result = UFS_REQUEST_FAIL;
1519     }
1520 
1521     /*
1522      * The ufs_complete_req for scsi commands is handled by the
1523      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1524      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1525      */
1526     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1527         ufs_complete_req(req, req_result);
1528     }
1529 }
1530 
1531 static void ufs_process_req(void *opaque)
1532 {
1533     UfsHc *u = opaque;
1534     UfsRequest *req;
1535     int slot;
1536 
1537     for (slot = 0; slot < u->params.nutrs; slot++) {
1538         req = &u->req_list[slot];
1539 
1540         if (req->state != UFS_REQUEST_READY) {
1541             continue;
1542         }
1543         trace_ufs_process_req(slot);
1544         req->state = UFS_REQUEST_RUNNING;
1545 
1546         ufs_exec_req(req);
1547     }
1548 }
1549 
1550 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1551 {
1552     UfsHc *u = req->hc;
1553     assert(req->state == UFS_REQUEST_RUNNING);
1554 
1555     if (req_result == UFS_REQUEST_SUCCESS) {
1556         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1557     } else {
1558         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1559     }
1560 
1561     req->state = UFS_REQUEST_COMPLETE;
1562 
1563     if (ufs_mcq_req(req)) {
1564         trace_ufs_mcq_complete_req(req->sq->sqid);
1565         QTAILQ_INSERT_TAIL(&req->sq->cq->req_list, req, entry);
1566         qemu_bh_schedule(req->sq->cq->bh);
1567     } else {
1568         trace_ufs_complete_req(req->slot);
1569         qemu_bh_schedule(u->complete_bh);
1570     }
1571 }
1572 
1573 static void ufs_clear_req(UfsRequest *req)
1574 {
1575     if (req->sg != NULL) {
1576         qemu_sglist_destroy(req->sg);
1577         g_free(req->sg);
1578         req->sg = NULL;
1579         req->data_len = 0;
1580     }
1581 
1582     memset(&req->utrd, 0, sizeof(req->utrd));
1583     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1584     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1585 }
1586 
1587 static void ufs_sendback_req(void *opaque)
1588 {
1589     UfsHc *u = opaque;
1590     UfsRequest *req;
1591     int slot;
1592 
1593     for (slot = 0; slot < u->params.nutrs; slot++) {
1594         req = &u->req_list[slot];
1595 
1596         if (req->state != UFS_REQUEST_COMPLETE) {
1597             continue;
1598         }
1599 
1600         if (ufs_dma_write_upiu(req)) {
1601             req->state = UFS_REQUEST_ERROR;
1602             continue;
1603         }
1604 
1605         /*
1606          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1607          * supported
1608          */
1609         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1610             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1611             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1612         }
1613 
1614         u->reg.utrldbr &= ~(1 << slot);
1615         u->reg.utrlcnr |= (1 << slot);
1616 
1617         trace_ufs_sendback_req(req->slot);
1618 
1619         ufs_clear_req(req);
1620         req->state = UFS_REQUEST_IDLE;
1621     }
1622 
1623     ufs_irq_check(u);
1624 }
1625 
1626 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1627 {
1628     if (u->params.nutrs > UFS_MAX_NUTRS) {
1629         error_setg(errp, "nutrs must be less than or equal to %d",
1630                    UFS_MAX_NUTRS);
1631         return false;
1632     }
1633 
1634     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1635         error_setg(errp, "nutmrs must be less than or equal to %d",
1636                    UFS_MAX_NUTMRS);
1637         return false;
1638     }
1639 
1640     if (u->params.mcq_maxq >= UFS_MAX_MCQ_QNUM) {
1641         error_setg(errp, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM);
1642         return false;
1643     }
1644 
1645     return true;
1646 }
1647 
1648 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1649 {
1650     uint8_t *pci_conf = pci_dev->config;
1651 
1652     pci_conf[PCI_INTERRUPT_PIN] = 1;
1653     pci_config_set_prog_interface(pci_conf, 0x1);
1654 
1655     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1656                           u->reg_size);
1657     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1658     u->irq = pci_allocate_irq(pci_dev);
1659 }
1660 
1661 static void ufs_init_state(UfsHc *u)
1662 {
1663     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1664 
1665     for (int i = 0; i < u->params.nutrs; i++) {
1666         u->req_list[i].hc = u;
1667         u->req_list[i].slot = i;
1668         u->req_list[i].sg = NULL;
1669         u->req_list[i].state = UFS_REQUEST_IDLE;
1670     }
1671 
1672     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1673                                          &DEVICE(u)->mem_reentrancy_guard);
1674     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1675                                          &DEVICE(u)->mem_reentrancy_guard);
1676 
1677     if (u->params.mcq) {
1678         memset(u->sq, 0, sizeof(u->sq));
1679         memset(u->cq, 0, sizeof(u->cq));
1680     }
1681 }
1682 
1683 static void ufs_init_hc(UfsHc *u)
1684 {
1685     uint32_t cap = 0;
1686     uint32_t mcqconfig = 0;
1687     uint32_t mcqcap = 0;
1688 
1689     u->reg_size = pow2ceil(ufs_reg_size(u));
1690 
1691     memset(&u->reg, 0, sizeof(u->reg));
1692     memset(&u->mcq_reg, 0, sizeof(u->mcq_reg));
1693     memset(&u->mcq_op_reg, 0, sizeof(u->mcq_op_reg));
1694     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1695     cap = FIELD_DP32(cap, CAP, RTT, 2);
1696     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1697     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1698     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1699     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1700     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1701     cap = FIELD_DP32(cap, CAP, CS, 0);
1702     cap = FIELD_DP32(cap, CAP, LSDBS, 0);
1703     cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
1704     u->reg.cap = cap;
1705 
1706     if (u->params.mcq) {
1707         mcqconfig = FIELD_DP32(mcqconfig, MCQCONFIG, MAC, 0x1f);
1708         u->reg.mcqconfig = mcqconfig;
1709 
1710         mcqcap = FIELD_DP32(mcqcap, MCQCAP, MAXQ, u->params.mcq_maxq - 1);
1711         mcqcap = FIELD_DP32(mcqcap, MCQCAP, RRP, 1);
1712         mcqcap = FIELD_DP32(mcqcap, MCQCAP, QCFGPTR, UFS_MCQ_QCFGPTR);
1713         u->reg.mcqcap = mcqcap;
1714 
1715         for (int i = 0; i < ARRAY_SIZE(u->mcq_reg); i++) {
1716             uint64_t addr = ufs_mcq_op_reg_addr(u, i);
1717             u->mcq_reg[i].sqdao = addr;
1718             u->mcq_reg[i].sqisao = addr + sizeof(UfsMcqSqReg);
1719             addr += sizeof(UfsMcqSqReg);
1720             u->mcq_reg[i].cqdao = addr + sizeof(UfsMcqSqIntReg);
1721             addr += sizeof(UfsMcqSqIntReg);
1722             u->mcq_reg[i].cqisao = addr + sizeof(UfsMcqCqReg);
1723         }
1724     }
1725     u->reg.ver = UFS_SPEC_VER;
1726 
1727     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1728     u->device_desc.length = sizeof(DeviceDescriptor);
1729     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1730     u->device_desc.device_sub_class = 0x01;
1731     u->device_desc.number_lu = 0x00;
1732     u->device_desc.number_wlu = 0x04;
1733     /* TODO: Revisit it when Power Management is implemented */
1734     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1735     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1736     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1737     u->device_desc.manufacturer_name = 0x00;
1738     u->device_desc.product_name = 0x01;
1739     u->device_desc.serial_number = 0x02;
1740     u->device_desc.oem_id = 0x03;
1741     u->device_desc.ud_0_base_offset = 0x16;
1742     u->device_desc.ud_config_p_length = 0x1A;
1743     u->device_desc.device_rtt_cap = 0x02;
1744     u->device_desc.ufs_features_support = UFS_DEV_HIGH_TEMP_NOTIF |
1745         UFS_DEV_LOW_TEMP_NOTIF;
1746     u->device_desc.queue_depth = u->params.nutrs;
1747     u->device_desc.product_revision_level = 0x04;
1748     u->device_desc.extended_ufs_features_support =
1749         cpu_to_be32(UFS_DEV_HIGH_TEMP_NOTIF | UFS_DEV_LOW_TEMP_NOTIF);
1750 
1751     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1752     u->geometry_desc.length = sizeof(GeometryDescriptor);
1753     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1754     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1755     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1756     u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1757     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1758     u->geometry_desc.max_in_buffer_size = 0x8;
1759     u->geometry_desc.max_out_buffer_size = 0x8;
1760     u->geometry_desc.rpmb_read_write_size = 0x40;
1761     u->geometry_desc.data_ordering =
1762         0x0; /* out-of-order data transfer is not supported */
1763     u->geometry_desc.max_context_id_number = 0x5;
1764     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1765 
1766     memset(&u->attributes, 0, sizeof(u->attributes));
1767     u->attributes.max_data_in_size = 0x08;
1768     u->attributes.max_data_out_size = 0x08;
1769     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1770     /* configure descriptor is not supported */
1771     u->attributes.config_descr_lock = 0x01;
1772     u->attributes.max_num_of_rtt = 0x02;
1773     u->attributes.device_too_high_temp_boundary = UFS_TOO_HIGH_TEMP_BOUNDARY;
1774     u->attributes.device_too_low_temp_boundary = UFS_TOO_LOW_TEMP_BOUNDARY;
1775 
1776     memset(&u->flags, 0, sizeof(u->flags));
1777     u->flags.permanently_disable_fw_update = 1;
1778 
1779     /*
1780      * The temperature value is fixed to UFS_TEMPERATURE and does not change
1781      * dynamically
1782      */
1783     u->temperature = UFS_TEMPERATURE;
1784 }
1785 
1786 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1787 {
1788     UfsHc *u = UFS(pci_dev);
1789 
1790     if (!ufs_check_constraints(u, errp)) {
1791         return;
1792     }
1793 
1794     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1795               u->parent_obj.qdev.id);
1796 
1797     ufs_init_state(u);
1798     ufs_init_hc(u);
1799     ufs_init_pci(u, pci_dev);
1800 
1801     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1802     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1803     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1804     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1805 }
1806 
1807 static void ufs_exit(PCIDevice *pci_dev)
1808 {
1809     UfsHc *u = UFS(pci_dev);
1810 
1811     qemu_bh_delete(u->doorbell_bh);
1812     qemu_bh_delete(u->complete_bh);
1813 
1814     for (int i = 0; i < u->params.nutrs; i++) {
1815         ufs_clear_req(&u->req_list[i]);
1816     }
1817     g_free(u->req_list);
1818 
1819     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
1820         if (u->sq[i]) {
1821             ufs_mcq_delete_sq(u, i);
1822         }
1823     }
1824     for (int i = 0; i < ARRAY_SIZE(u->cq); i++) {
1825         if (u->cq[i]) {
1826             ufs_mcq_delete_cq(u, i);
1827         }
1828     }
1829 }
1830 
1831 static const Property ufs_props[] = {
1832     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1833     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1834     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1835     DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
1836     DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
1837 };
1838 
1839 static const VMStateDescription ufs_vmstate = {
1840     .name = "ufs",
1841     .unmigratable = 1,
1842 };
1843 
1844 static void ufs_class_init(ObjectClass *oc, void *data)
1845 {
1846     DeviceClass *dc = DEVICE_CLASS(oc);
1847     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1848 
1849     pc->realize = ufs_realize;
1850     pc->exit = ufs_exit;
1851     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1852     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1853     pc->class_id = PCI_CLASS_STORAGE_UFS;
1854 
1855     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1856     dc->desc = "Universal Flash Storage";
1857     device_class_set_props(dc, ufs_props);
1858     dc->vmsd = &ufs_vmstate;
1859 }
1860 
1861 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1862                                   Error **errp)
1863 {
1864     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1865         error_setg(errp, "%s cannot be connected to ufs-bus",
1866                    object_get_typename(OBJECT(qdev)));
1867         return false;
1868     }
1869 
1870     return true;
1871 }
1872 
1873 static char *ufs_bus_get_dev_path(DeviceState *dev)
1874 {
1875     BusState *bus = qdev_get_parent_bus(dev);
1876 
1877     return qdev_get_dev_path(bus->parent);
1878 }
1879 
1880 static void ufs_bus_class_init(ObjectClass *class, void *data)
1881 {
1882     BusClass *bc = BUS_CLASS(class);
1883     bc->get_dev_path = ufs_bus_get_dev_path;
1884     bc->check_address = ufs_bus_check_address;
1885 }
1886 
1887 static const TypeInfo ufs_info = {
1888     .name = TYPE_UFS,
1889     .parent = TYPE_PCI_DEVICE,
1890     .class_init = ufs_class_init,
1891     .instance_size = sizeof(UfsHc),
1892     .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1893 };
1894 
1895 static const TypeInfo ufs_bus_info = {
1896     .name = TYPE_UFS_BUS,
1897     .parent = TYPE_BUS,
1898     .class_init = ufs_bus_class_init,
1899     .class_size = sizeof(UfsBusClass),
1900     .instance_size = sizeof(UfsBus),
1901 };
1902 
1903 static void ufs_register_types(void)
1904 {
1905     type_register_static(&ufs_info);
1906     type_register_static(&ufs_bus_info);
1907 }
1908 
1909 type_init(ufs_register_types)
1910