xref: /qemu/hw/ufs/ufs.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 4.0
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "hw/irq.h"
29 #include "trace.h"
30 #include "ufs.h"
31 
32 /* The QEMU-UFS device follows spec version 4.0 */
33 #define UFS_SPEC_VER 0x0400
34 #define UFS_MAX_NUTRS 32
35 #define UFS_MAX_NUTMRS 8
36 #define UFS_MCQ_QCFGPTR 2
37 
38 /* Each value represents the temperature in celsius as (value - 80) */
39 #define UFS_TEMPERATURE 120
40 #define UFS_TOO_HIGH_TEMP_BOUNDARY 160
41 #define UFS_TOO_LOW_TEMP_BOUNDARY 60
42 
43 static void ufs_exec_req(UfsRequest *req);
44 static void ufs_clear_req(UfsRequest *req);
45 
ufs_mcq_reg_addr(UfsHc * u,int qid)46 static inline uint64_t ufs_mcq_reg_addr(UfsHc *u, int qid)
47 {
48     /* Submission Queue MCQ Registers offset (400h) */
49     return (UFS_MCQ_QCFGPTR * 0x200) + qid * 0x40;
50 }
51 
ufs_mcq_op_reg_addr(UfsHc * u,int qid)52 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc *u, int qid)
53 {
54     /* MCQ Operation & Runtime Registers offset (1000h) */
55     return UFS_MCQ_OPR_START + qid * 48;
56 }
57 
ufs_reg_size(UfsHc * u)58 static inline uint64_t ufs_reg_size(UfsHc *u)
59 {
60     /* Total UFS HCI Register size in bytes */
61     return ufs_mcq_op_reg_addr(u, 0) + sizeof(u->mcq_op_reg);
62 }
63 
ufs_is_mcq_reg(UfsHc * u,uint64_t addr,unsigned size)64 static inline bool ufs_is_mcq_reg(UfsHc *u, uint64_t addr, unsigned size)
65 {
66     uint64_t mcq_reg_addr;
67 
68     if (!u->params.mcq) {
69         return false;
70     }
71 
72     mcq_reg_addr = ufs_mcq_reg_addr(u, 0);
73     return (addr >= mcq_reg_addr &&
74             addr + size <= mcq_reg_addr + sizeof(u->mcq_reg));
75 }
76 
ufs_is_mcq_op_reg(UfsHc * u,uint64_t addr,unsigned size)77 static inline bool ufs_is_mcq_op_reg(UfsHc *u, uint64_t addr, unsigned size)
78 {
79     uint64_t mcq_op_reg_addr;
80 
81     if (!u->params.mcq) {
82         return false;
83     }
84 
85     mcq_op_reg_addr = ufs_mcq_op_reg_addr(u, 0);
86     return (addr >= mcq_op_reg_addr &&
87             addr + size <= mcq_op_reg_addr + sizeof(u->mcq_op_reg));
88 }
89 
ufs_addr_read(UfsHc * u,hwaddr addr,void * buf,int size)90 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
91 {
92     hwaddr hi = addr + size - 1;
93 
94     if (hi < addr) {
95         return MEMTX_DECODE_ERROR;
96     }
97 
98     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
99         return MEMTX_DECODE_ERROR;
100     }
101 
102     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
103 }
104 
ufs_addr_write(UfsHc * u,hwaddr addr,const void * buf,int size)105 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
106                                   int size)
107 {
108     hwaddr hi = addr + size - 1;
109     if (hi < addr) {
110         return MEMTX_DECODE_ERROR;
111     }
112 
113     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
114         return MEMTX_DECODE_ERROR;
115     }
116 
117     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
118 }
119 
ufs_get_utrd_addr(UfsHc * u,uint32_t slot)120 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
121 {
122     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
123     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
124 
125     return utrd_addr;
126 }
127 
ufs_get_req_upiu_base_addr(const UtpTransferReqDesc * utrd)128 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
129 {
130     uint32_t cmd_desc_base_addr_lo =
131         le32_to_cpu(utrd->command_desc_base_addr_lo);
132     uint32_t cmd_desc_base_addr_hi =
133         le32_to_cpu(utrd->command_desc_base_addr_hi);
134 
135     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
136 }
137 
ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc * utrd)138 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
139 {
140     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
141     uint32_t rsp_upiu_byte_off =
142         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
143     return req_upiu_base_addr + rsp_upiu_byte_off;
144 }
145 
ufs_dma_read_utrd(UfsRequest * req)146 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
147 {
148     UfsHc *u = req->hc;
149     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
150     MemTxResult ret;
151 
152     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
153     if (ret) {
154         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
155     }
156     return ret;
157 }
158 
ufs_dma_read_req_upiu(UfsRequest * req)159 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
160 {
161     UfsHc *u = req->hc;
162     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
163     UtpUpiuReq *req_upiu = &req->req_upiu;
164     uint32_t copy_size;
165     uint16_t data_segment_length;
166     MemTxResult ret;
167 
168     /*
169      * To know the size of the req_upiu, we need to read the
170      * data_segment_length in the header first.
171      */
172     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
173                         sizeof(UtpUpiuHeader));
174     if (ret) {
175         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
176         return ret;
177     }
178     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
179 
180     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
181                 data_segment_length;
182 
183     if (copy_size > sizeof(req->req_upiu)) {
184         copy_size = sizeof(req->req_upiu);
185     }
186 
187     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
188     if (ret) {
189         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
190     }
191     return ret;
192 }
193 
ufs_dma_read_prdt(UfsRequest * req)194 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
195 {
196     UfsHc *u = req->hc;
197     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
198     uint16_t prdt_byte_off =
199         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
200     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
201     g_autofree UfshcdSgEntry *prd_entries = NULL;
202     hwaddr req_upiu_base_addr, prdt_base_addr;
203     int err;
204 
205     assert(!req->sg);
206 
207     if (prdt_size == 0) {
208         return MEMTX_OK;
209     }
210     prd_entries = g_new(UfshcdSgEntry, prdt_size);
211 
212     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
213     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
214 
215     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
216     if (err) {
217         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
218         return err;
219     }
220 
221     req->sg = g_malloc0(sizeof(QEMUSGList));
222     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
223     req->data_len = 0;
224 
225     for (uint16_t i = 0; i < prdt_len; ++i) {
226         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
227         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
228         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
229         req->data_len += data_byte_count;
230     }
231     return MEMTX_OK;
232 }
233 
ufs_dma_read_upiu(UfsRequest * req)234 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
235 {
236     MemTxResult ret;
237 
238     /*
239      * In case of MCQ, UTRD has already been read from a SQ, so skip it.
240      */
241     if (!ufs_mcq_req(req)) {
242         ret = ufs_dma_read_utrd(req);
243         if (ret) {
244             return ret;
245         }
246     }
247 
248     ret = ufs_dma_read_req_upiu(req);
249     if (ret) {
250         return ret;
251     }
252 
253     ret = ufs_dma_read_prdt(req);
254     if (ret) {
255         return ret;
256     }
257 
258     return 0;
259 }
260 
ufs_dma_write_utrd(UfsRequest * req)261 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
262 {
263     UfsHc *u = req->hc;
264     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
265     MemTxResult ret;
266 
267     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
268     if (ret) {
269         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
270     }
271     return ret;
272 }
273 
ufs_dma_write_rsp_upiu(UfsRequest * req)274 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
275 {
276     UfsHc *u = req->hc;
277     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
278     uint32_t rsp_upiu_byte_len =
279         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
280     uint16_t data_segment_length =
281         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
282     uint32_t copy_size = sizeof(UtpUpiuHeader) +
283                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
284                          data_segment_length;
285     MemTxResult ret;
286 
287     if (copy_size > rsp_upiu_byte_len) {
288         copy_size = rsp_upiu_byte_len;
289     }
290 
291     if (copy_size > sizeof(req->rsp_upiu)) {
292         copy_size = sizeof(req->rsp_upiu);
293     }
294 
295     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
296     if (ret) {
297         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
298     }
299     return ret;
300 }
301 
ufs_dma_write_upiu(UfsRequest * req)302 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
303 {
304     MemTxResult ret;
305 
306     ret = ufs_dma_write_rsp_upiu(req);
307     if (ret) {
308         return ret;
309     }
310 
311     return ufs_dma_write_utrd(req);
312 }
313 
ufs_irq_check(UfsHc * u)314 static void ufs_irq_check(UfsHc *u)
315 {
316     PCIDevice *pci = PCI_DEVICE(u);
317 
318     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
319         trace_ufs_irq_raise();
320         pci_irq_assert(pci);
321     } else {
322         trace_ufs_irq_lower();
323         pci_irq_deassert(pci);
324     }
325 }
326 
ufs_process_db(UfsHc * u,uint32_t val)327 static void ufs_process_db(UfsHc *u, uint32_t val)
328 {
329     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
330     uint32_t slot;
331     uint32_t nutrs = u->params.nutrs;
332     UfsRequest *req;
333 
334     val &= ~u->reg.utrldbr;
335     if (!val) {
336         return;
337     }
338 
339     doorbell[0] = val;
340     slot = find_first_bit(doorbell, nutrs);
341 
342     while (slot < nutrs) {
343         req = &u->req_list[slot];
344         if (req->state == UFS_REQUEST_ERROR) {
345             trace_ufs_err_utrl_slot_error(req->slot);
346             return;
347         }
348 
349         if (req->state != UFS_REQUEST_IDLE) {
350             trace_ufs_err_utrl_slot_busy(req->slot);
351             return;
352         }
353 
354         trace_ufs_process_db(slot);
355         req->state = UFS_REQUEST_READY;
356         slot = find_next_bit(doorbell, nutrs, slot + 1);
357     }
358 
359     qemu_bh_schedule(u->doorbell_bh);
360 }
361 
ufs_process_uiccmd(UfsHc * u,uint32_t val)362 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
363 {
364     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
365                              u->reg.ucmdarg3);
366     /*
367      * Only the essential uic commands for running drivers on Linux and Windows
368      * are implemented.
369      */
370     switch (val) {
371     case UFS_UIC_CMD_DME_LINK_STARTUP:
372         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
373         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
374         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
375         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
376         break;
377     /* TODO: Revisit it when Power Management is implemented */
378     case UFS_UIC_CMD_DME_HIBER_ENTER:
379         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
380         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
381         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
382         break;
383     case UFS_UIC_CMD_DME_HIBER_EXIT:
384         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
385         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
386         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
387         break;
388     default:
389         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
390     }
391 
392     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
393 
394     ufs_irq_check(u);
395 }
396 
ufs_mcq_init_req(UfsHc * u,UfsRequest * req,UfsSq * sq)397 static void ufs_mcq_init_req(UfsHc *u, UfsRequest *req, UfsSq *sq)
398 {
399     memset(req, 0, sizeof(*req));
400 
401     req->hc = u;
402     req->state = UFS_REQUEST_IDLE;
403     req->slot = UFS_INVALID_SLOT;
404     req->sq = sq;
405 }
406 
ufs_mcq_process_sq(void * opaque)407 static void ufs_mcq_process_sq(void *opaque)
408 {
409     UfsSq *sq = opaque;
410     UfsHc *u = sq->u;
411     UfsSqEntry sqe;
412     UfsRequest *req;
413     hwaddr addr;
414     uint16_t head = ufs_mcq_sq_head(u, sq->sqid);
415     int err;
416 
417     while (!(ufs_mcq_sq_empty(u, sq->sqid) || QTAILQ_EMPTY(&sq->req_list))) {
418         addr = sq->addr + head;
419         err = ufs_addr_read(sq->u, addr, (void *)&sqe, sizeof(sqe));
420         if (err) {
421             trace_ufs_err_dma_read_sq(sq->sqid, addr);
422             return;
423         }
424 
425         head = (head + sizeof(sqe)) % (sq->size * sizeof(sqe));
426         ufs_mcq_update_sq_head(u, sq->sqid, head);
427 
428         req = QTAILQ_FIRST(&sq->req_list);
429         QTAILQ_REMOVE(&sq->req_list, req, entry);
430 
431         ufs_mcq_init_req(sq->u, req, sq);
432         memcpy(&req->utrd, &sqe, sizeof(req->utrd));
433 
434         req->state = UFS_REQUEST_RUNNING;
435         ufs_exec_req(req);
436     }
437 }
438 
ufs_mcq_process_cq(void * opaque)439 static void ufs_mcq_process_cq(void *opaque)
440 {
441     UfsCq *cq = opaque;
442     UfsHc *u = cq->u;
443     UfsRequest *req, *next;
444     MemTxResult ret;
445     uint32_t tail = ufs_mcq_cq_tail(u, cq->cqid);
446 
447     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
448     {
449         ufs_dma_write_rsp_upiu(req);
450 
451         req->cqe.utp_addr =
452             ((uint64_t)req->utrd.command_desc_base_addr_hi << 32ULL) |
453             req->utrd.command_desc_base_addr_lo;
454         req->cqe.utp_addr |= req->sq->sqid;
455         req->cqe.resp_len = req->utrd.response_upiu_length;
456         req->cqe.resp_off = req->utrd.response_upiu_offset;
457         req->cqe.prdt_len = req->utrd.prd_table_length;
458         req->cqe.prdt_off = req->utrd.prd_table_offset;
459         req->cqe.status = req->utrd.header.dword_2 & 0xf;
460         req->cqe.error = 0;
461 
462         ret = ufs_addr_write(u, cq->addr + tail, &req->cqe, sizeof(req->cqe));
463         if (ret) {
464             trace_ufs_err_dma_write_cq(cq->cqid, cq->addr + tail);
465         }
466         QTAILQ_REMOVE(&cq->req_list, req, entry);
467 
468         tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
469         ufs_mcq_update_cq_tail(u, cq->cqid, tail);
470 
471         ufs_clear_req(req);
472         QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
473     }
474 
475     if (!ufs_mcq_cq_empty(u, cq->cqid)) {
476         u->mcq_op_reg[cq->cqid].cq_int.is =
477             FIELD_DP32(u->mcq_op_reg[cq->cqid].cq_int.is, CQIS, TEPS, 1);
478 
479         u->reg.is = FIELD_DP32(u->reg.is, IS, CQES, 1);
480         ufs_irq_check(u);
481     }
482 }
483 
ufs_mcq_create_sq(UfsHc * u,uint8_t qid,uint32_t attr)484 static bool ufs_mcq_create_sq(UfsHc *u, uint8_t qid, uint32_t attr)
485 {
486     UfsMcqReg *reg = &u->mcq_reg[qid];
487     UfsSq *sq;
488     uint8_t cqid = FIELD_EX32(attr, SQATTR, CQID);
489 
490     if (qid >= u->params.mcq_maxq) {
491         trace_ufs_err_mcq_create_sq_invalid_sqid(qid);
492         return false;
493     }
494 
495     if (u->sq[qid]) {
496         trace_ufs_err_mcq_create_sq_already_exists(qid);
497         return false;
498     }
499 
500     if (!u->cq[cqid]) {
501         trace_ufs_err_mcq_create_sq_invalid_cqid(qid);
502         return false;
503     }
504 
505     sq = g_malloc0(sizeof(*sq));
506     sq->u = u;
507     sq->sqid = qid;
508     sq->cq = u->cq[cqid];
509     sq->addr = ((uint64_t)reg->squba << 32) | reg->sqlba;
510     sq->size = ((FIELD_EX32(attr, SQATTR, SIZE) + 1) << 2) / sizeof(UfsSqEntry);
511 
512     sq->bh = qemu_bh_new_guarded(ufs_mcq_process_sq, sq,
513                                  &DEVICE(u)->mem_reentrancy_guard);
514     sq->req = g_new0(UfsRequest, sq->size);
515     QTAILQ_INIT(&sq->req_list);
516     for (int i = 0; i < sq->size; i++) {
517         ufs_mcq_init_req(u, &sq->req[i], sq);
518         QTAILQ_INSERT_TAIL(&sq->req_list, &sq->req[i], entry);
519     }
520 
521     u->sq[qid] = sq;
522 
523     trace_ufs_mcq_create_sq(sq->sqid, sq->cq->cqid, sq->addr, sq->size);
524     return true;
525 }
526 
ufs_mcq_delete_sq(UfsHc * u,uint8_t qid)527 static bool ufs_mcq_delete_sq(UfsHc *u, uint8_t qid)
528 {
529     UfsSq *sq;
530 
531     if (qid >= u->params.mcq_maxq) {
532         trace_ufs_err_mcq_delete_sq_invalid_sqid(qid);
533         return false;
534     }
535 
536     if (!u->sq[qid]) {
537         trace_ufs_err_mcq_delete_sq_not_exists(qid);
538         return false;
539     }
540 
541     sq = u->sq[qid];
542 
543     qemu_bh_delete(sq->bh);
544     g_free(sq->req);
545     g_free(sq);
546     u->sq[qid] = NULL;
547     return true;
548 }
549 
ufs_mcq_create_cq(UfsHc * u,uint8_t qid,uint32_t attr)550 static bool ufs_mcq_create_cq(UfsHc *u, uint8_t qid, uint32_t attr)
551 {
552     UfsMcqReg *reg = &u->mcq_reg[qid];
553     UfsCq *cq;
554 
555     if (qid >= u->params.mcq_maxq) {
556         trace_ufs_err_mcq_create_cq_invalid_cqid(qid);
557         return false;
558     }
559 
560     if (u->cq[qid]) {
561         trace_ufs_err_mcq_create_cq_already_exists(qid);
562         return false;
563     }
564 
565     cq = g_malloc0(sizeof(*cq));
566     cq->u = u;
567     cq->cqid = qid;
568     cq->addr = ((uint64_t)reg->cquba << 32) | reg->cqlba;
569     cq->size = ((FIELD_EX32(attr, CQATTR, SIZE) + 1) << 2) / sizeof(UfsCqEntry);
570 
571     cq->bh = qemu_bh_new_guarded(ufs_mcq_process_cq, cq,
572                                  &DEVICE(u)->mem_reentrancy_guard);
573     QTAILQ_INIT(&cq->req_list);
574 
575     u->cq[qid] = cq;
576 
577     trace_ufs_mcq_create_cq(cq->cqid, cq->addr, cq->size);
578     return true;
579 }
580 
ufs_mcq_delete_cq(UfsHc * u,uint8_t qid)581 static bool ufs_mcq_delete_cq(UfsHc *u, uint8_t qid)
582 {
583     UfsCq *cq;
584 
585     if (qid >= u->params.mcq_maxq) {
586         trace_ufs_err_mcq_delete_cq_invalid_cqid(qid);
587         return false;
588     }
589 
590     if (!u->cq[qid]) {
591         trace_ufs_err_mcq_delete_cq_not_exists(qid);
592         return false;
593     }
594 
595     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
596         if (u->sq[i] && u->sq[i]->cq->cqid == qid) {
597             trace_ufs_err_mcq_delete_cq_sq_not_deleted(i, qid);
598             return false;
599         }
600     }
601 
602     cq = u->cq[qid];
603 
604     qemu_bh_delete(cq->bh);
605     g_free(cq);
606     u->cq[qid] = NULL;
607     return true;
608 }
609 
ufs_write_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)610 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
611 {
612     switch (offset) {
613     case A_IS:
614         u->reg.is &= ~data;
615         ufs_irq_check(u);
616         break;
617     case A_IE:
618         u->reg.ie = data;
619         ufs_irq_check(u);
620         break;
621     case A_HCE:
622         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
623             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
624             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
625         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
626                    !FIELD_EX32(data, HCE, HCE)) {
627             u->reg.hcs = 0;
628             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
629         }
630         break;
631     case A_UTRLBA:
632         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
633         break;
634     case A_UTRLBAU:
635         u->reg.utrlbau = data;
636         break;
637     case A_UTRLDBR:
638         ufs_process_db(u, data);
639         u->reg.utrldbr |= data;
640         break;
641     case A_UTRLRSR:
642         u->reg.utrlrsr = data;
643         break;
644     case A_UTRLCNR:
645         u->reg.utrlcnr &= ~data;
646         break;
647     case A_UTMRLBA:
648         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
649         break;
650     case A_UTMRLBAU:
651         u->reg.utmrlbau = data;
652         break;
653     case A_UICCMD:
654         ufs_process_uiccmd(u, data);
655         break;
656     case A_UCMDARG1:
657         u->reg.ucmdarg1 = data;
658         break;
659     case A_UCMDARG2:
660         u->reg.ucmdarg2 = data;
661         break;
662     case A_UCMDARG3:
663         u->reg.ucmdarg3 = data;
664         break;
665     case A_CONFIG:
666         u->reg.config = data;
667         break;
668     case A_MCQCONFIG:
669         u->reg.mcqconfig = data;
670         break;
671     case A_UTRLCLR:
672     case A_UTMRLDBR:
673     case A_UTMRLCLR:
674     case A_UTMRLRSR:
675         trace_ufs_err_unsupport_register_offset(offset);
676         break;
677     default:
678         trace_ufs_err_invalid_register_offset(offset);
679         break;
680     }
681 }
682 
ufs_write_mcq_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)683 static void ufs_write_mcq_reg(UfsHc *u, hwaddr offset, uint32_t data,
684                               unsigned size)
685 {
686     int qid = offset / sizeof(UfsMcqReg);
687     UfsMcqReg *reg = &u->mcq_reg[qid];
688 
689     switch (offset % sizeof(UfsMcqReg)) {
690     case A_SQATTR:
691         if (!FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
692             FIELD_EX32(data, SQATTR, SQEN)) {
693             if (!ufs_mcq_create_sq(u, qid, data)) {
694                 break;
695             }
696         } else if (FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
697                    !FIELD_EX32(data, SQATTR, SQEN)) {
698             if (!ufs_mcq_delete_sq(u, qid)) {
699                 break;
700             }
701         }
702         reg->sqattr = data;
703         break;
704     case A_SQLBA:
705         reg->sqlba = data;
706         break;
707     case A_SQUBA:
708         reg->squba = data;
709         break;
710     case A_SQCFG:
711         reg->sqcfg = data;
712         break;
713     case A_CQATTR:
714         if (!FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
715             FIELD_EX32(data, CQATTR, CQEN)) {
716             if (!ufs_mcq_create_cq(u, qid, data)) {
717                 break;
718             }
719         } else if (FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
720                    !FIELD_EX32(data, CQATTR, CQEN)) {
721             if (!ufs_mcq_delete_cq(u, qid)) {
722                 break;
723             }
724         }
725         reg->cqattr = data;
726         break;
727     case A_CQLBA:
728         reg->cqlba = data;
729         break;
730     case A_CQUBA:
731         reg->cquba = data;
732         break;
733     case A_CQCFG:
734         reg->cqcfg = data;
735         break;
736     case A_SQDAO:
737     case A_SQISAO:
738     case A_CQDAO:
739     case A_CQISAO:
740         trace_ufs_err_unsupport_register_offset(offset);
741         break;
742     default:
743         trace_ufs_err_invalid_register_offset(offset);
744         break;
745     }
746 }
747 
ufs_mcq_process_db(UfsHc * u,uint8_t qid,uint32_t db)748 static void ufs_mcq_process_db(UfsHc *u, uint8_t qid, uint32_t db)
749 {
750     UfsSq *sq;
751 
752     if (qid >= u->params.mcq_maxq) {
753         trace_ufs_err_mcq_db_wr_invalid_sqid(qid);
754         return;
755     }
756 
757     sq = u->sq[qid];
758     if (sq->size * sizeof(UfsSqEntry) <= db) {
759         trace_ufs_err_mcq_db_wr_invalid_db(qid, db);
760         return;
761     }
762 
763     ufs_mcq_update_sq_tail(u, sq->sqid, db);
764     qemu_bh_schedule(sq->bh);
765 }
766 
ufs_write_mcq_op_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)767 static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
768                                  unsigned size)
769 {
770     int qid = offset / sizeof(UfsMcqOpReg);
771     UfsMcqOpReg *opr = &u->mcq_op_reg[qid];
772 
773     switch (offset % sizeof(UfsMcqOpReg)) {
774     case offsetof(UfsMcqOpReg, sq.tp):
775         if (opr->sq.tp != data) {
776             ufs_mcq_process_db(u, qid, data);
777         }
778         opr->sq.tp = data;
779         break;
780     case offsetof(UfsMcqOpReg, cq.hp):
781         opr->cq.hp = data;
782         ufs_mcq_update_cq_head(u, qid, data);
783         break;
784     case offsetof(UfsMcqOpReg, cq_int.is):
785         opr->cq_int.is &= ~data;
786         break;
787     default:
788         trace_ufs_err_invalid_register_offset(offset);
789         break;
790     }
791 }
792 
ufs_mmio_read(void * opaque,hwaddr addr,unsigned size)793 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
794 {
795     UfsHc *u = (UfsHc *)opaque;
796     uint32_t *ptr;
797     uint64_t value;
798     uint64_t offset;
799 
800     if (addr + size <= sizeof(u->reg)) {
801         offset = addr;
802         ptr = (uint32_t *)&u->reg;
803     } else if (ufs_is_mcq_reg(u, addr, size)) {
804         offset = addr - ufs_mcq_reg_addr(u, 0);
805         ptr = (uint32_t *)&u->mcq_reg;
806     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
807         offset = addr - ufs_mcq_op_reg_addr(u, 0);
808         ptr = (uint32_t *)&u->mcq_op_reg;
809     } else {
810         trace_ufs_err_invalid_register_offset(addr);
811         return 0;
812     }
813 
814     value = ptr[offset >> 2];
815     trace_ufs_mmio_read(addr, value, size);
816     return value;
817 }
818 
ufs_mmio_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)819 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
820                            unsigned size)
821 {
822     UfsHc *u = (UfsHc *)opaque;
823 
824     trace_ufs_mmio_write(addr, data, size);
825 
826     if (addr + size <= sizeof(u->reg)) {
827         ufs_write_reg(u, addr, data, size);
828     } else if (ufs_is_mcq_reg(u, addr, size)) {
829         ufs_write_mcq_reg(u, addr - ufs_mcq_reg_addr(u, 0), data, size);
830     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
831         ufs_write_mcq_op_reg(u, addr - ufs_mcq_op_reg_addr(u, 0), data, size);
832     } else {
833         trace_ufs_err_invalid_register_offset(addr);
834     }
835 }
836 
837 static const MemoryRegionOps ufs_mmio_ops = {
838     .read = ufs_mmio_read,
839     .write = ufs_mmio_write,
840     .endianness = DEVICE_LITTLE_ENDIAN,
841     .impl = {
842         .min_access_size = 4,
843         .max_access_size = 4,
844     },
845 };
846 
ufs_update_ee_status(UfsHc * u)847 static void ufs_update_ee_status(UfsHc *u)
848 {
849     uint16_t ee_status = be16_to_cpu(u->attributes.exception_event_status);
850     uint8_t high_temp_thresh = u->attributes.device_too_high_temp_boundary;
851     uint8_t low_temp_thresh = u->attributes.device_too_low_temp_boundary;
852 
853     if (u->temperature >= high_temp_thresh) {
854         ee_status |= MASK_EE_TOO_HIGH_TEMP;
855     } else {
856         ee_status &= ~MASK_EE_TOO_HIGH_TEMP;
857     }
858 
859     if (u->temperature <= low_temp_thresh) {
860         ee_status |= MASK_EE_TOO_LOW_TEMP;
861     } else {
862         ee_status &= ~MASK_EE_TOO_LOW_TEMP;
863     }
864 
865     u->attributes.exception_event_status = cpu_to_be16(ee_status);
866 }
867 
ufs_check_exception_event_alert(UfsHc * u,uint8_t trans_type)868 static bool ufs_check_exception_event_alert(UfsHc *u, uint8_t trans_type)
869 {
870     uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
871     uint16_t ee_status;
872 
873     if (trans_type != UFS_UPIU_TRANSACTION_RESPONSE) {
874         return false;
875     }
876 
877     ufs_update_ee_status(u);
878 
879     ee_status = be16_to_cpu(u->attributes.exception_event_status);
880 
881     return ee_control & ee_status;
882 }
883 
ufs_build_upiu_header(UfsRequest * req,uint8_t trans_type,uint8_t flags,uint8_t response,uint8_t scsi_status,uint16_t data_segment_length)884 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
885                            uint8_t response, uint8_t scsi_status,
886                            uint16_t data_segment_length)
887 {
888     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
889     req->rsp_upiu.header.trans_type = trans_type;
890     req->rsp_upiu.header.flags = flags;
891     req->rsp_upiu.header.response = response;
892     req->rsp_upiu.header.scsi_status = scsi_status;
893     req->rsp_upiu.header.device_inf =
894         ufs_check_exception_event_alert(req->hc, trans_type);
895     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
896 }
897 
ufs_build_query_response(UfsRequest * req)898 void ufs_build_query_response(UfsRequest *req)
899 {
900     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
901     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
902     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
903     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
904 }
905 
ufs_exec_scsi_cmd(UfsRequest * req)906 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
907 {
908     UfsHc *u = req->hc;
909     uint8_t lun = req->req_upiu.header.lun;
910 
911     UfsLu *lu = NULL;
912 
913     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
914 
915     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
916         trace_ufs_err_scsi_cmd_invalid_lun(lun);
917         return UFS_REQUEST_FAIL;
918     }
919 
920     switch (lun) {
921     case UFS_UPIU_REPORT_LUNS_WLUN:
922         lu = &u->report_wlu;
923         break;
924     case UFS_UPIU_UFS_DEVICE_WLUN:
925         lu = &u->dev_wlu;
926         break;
927     case UFS_UPIU_BOOT_WLUN:
928         lu = &u->boot_wlu;
929         break;
930     case UFS_UPIU_RPMB_WLUN:
931         lu = &u->rpmb_wlu;
932         break;
933     default:
934         lu = u->lus[lun];
935     }
936 
937     return lu->scsi_op(lu, req);
938 }
939 
ufs_exec_nop_cmd(UfsRequest * req)940 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
941 {
942     trace_ufs_exec_nop_cmd(req->slot);
943     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
944     return UFS_REQUEST_SUCCESS;
945 }
946 
947 /*
948  * This defines the permission of flags based on their IDN. There are some
949  * things that are declared read-only, which is inconsistent with the ufs spec,
950  * because we want to return an error for features that are not yet supported.
951  */
952 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
953     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
954     /* Write protection is not supported */
955     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
956     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
957     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
958                                     UFS_QUERY_FLAG_CLEAR |
959                                     UFS_QUERY_FLAG_TOGGLE,
960     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
961         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
962         UFS_QUERY_FLAG_TOGGLE,
963     /* Purge Operation is not supported */
964     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
965     /* Refresh Operation is not supported */
966     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
967     /* Physical Resource Removal is not supported */
968     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
969     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
970     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
971     /* Write Booster is not supported */
972     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
973     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
974     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
975 };
976 
ufs_flag_check_idn_valid(uint8_t idn,int op)977 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
978 {
979     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
980         return UFS_QUERY_RESULT_INVALID_IDN;
981     }
982 
983     if (!(flag_permission[idn] & op)) {
984         if (op == UFS_QUERY_FLAG_READ) {
985             trace_ufs_err_query_flag_not_readable(idn);
986             return UFS_QUERY_RESULT_NOT_READABLE;
987         }
988         trace_ufs_err_query_flag_not_writable(idn);
989         return UFS_QUERY_RESULT_NOT_WRITEABLE;
990     }
991 
992     return UFS_QUERY_RESULT_SUCCESS;
993 }
994 
995 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
996     /* booting is not supported */
997     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
998     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
999     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
1000         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1001     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
1002     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
1003     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
1004     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
1005         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1006     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
1007         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1008     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
1009     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
1010         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1011     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
1012     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
1013         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1014     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
1015         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1016     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
1017     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
1018     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
1019     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
1020     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1021     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
1022         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
1023     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
1024     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
1025     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
1026     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
1027     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
1028     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
1029     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
1030     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
1031     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
1032     /* refresh operation is not supported */
1033     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
1034     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
1035     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
1036 };
1037 
ufs_attr_check_idn_valid(uint8_t idn,int op)1038 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
1039 {
1040     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
1041         return UFS_QUERY_RESULT_INVALID_IDN;
1042     }
1043 
1044     if (!(attr_permission[idn] & op)) {
1045         if (op == UFS_QUERY_ATTR_READ) {
1046             trace_ufs_err_query_attr_not_readable(idn);
1047             return UFS_QUERY_RESULT_NOT_READABLE;
1048         }
1049         trace_ufs_err_query_attr_not_writable(idn);
1050         return UFS_QUERY_RESULT_NOT_WRITEABLE;
1051     }
1052 
1053     return UFS_QUERY_RESULT_SUCCESS;
1054 }
1055 
ufs_exec_query_flag(UfsRequest * req,int op)1056 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
1057 {
1058     UfsHc *u = req->hc;
1059     uint8_t idn = req->req_upiu.qr.idn;
1060     uint32_t value;
1061     QueryRespCode ret;
1062 
1063     ret = ufs_flag_check_idn_valid(idn, op);
1064     if (ret) {
1065         return ret;
1066     }
1067 
1068     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
1069         value = 0;
1070     } else if (op == UFS_QUERY_FLAG_READ) {
1071         value = *(((uint8_t *)&u->flags) + idn);
1072     } else if (op == UFS_QUERY_FLAG_SET) {
1073         value = 1;
1074     } else if (op == UFS_QUERY_FLAG_CLEAR) {
1075         value = 0;
1076     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
1077         value = *(((uint8_t *)&u->flags) + idn);
1078         value = !value;
1079     } else {
1080         trace_ufs_err_query_invalid_opcode(op);
1081         return UFS_QUERY_RESULT_INVALID_OPCODE;
1082     }
1083 
1084     *(((uint8_t *)&u->flags) + idn) = value;
1085     req->rsp_upiu.qr.value = cpu_to_be32(value);
1086     return UFS_QUERY_RESULT_SUCCESS;
1087 }
1088 
ufs_read_device_temp(UfsHc * u)1089 static inline uint8_t ufs_read_device_temp(UfsHc *u)
1090 {
1091     uint8_t feat_sup = u->device_desc.ufs_features_support;
1092     bool high_temp_sup, low_temp_sup, high_temp_en, low_temp_en;
1093     uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control);
1094 
1095     high_temp_sup = feat_sup & UFS_DEV_HIGH_TEMP_NOTIF;
1096     low_temp_sup = feat_sup & UFS_DEV_LOW_TEMP_NOTIF;
1097     high_temp_en = ee_control & MASK_EE_TOO_HIGH_TEMP;
1098     low_temp_en = ee_control & MASK_EE_TOO_LOW_TEMP;
1099 
1100     if ((high_temp_sup && high_temp_en) ||
1101         (low_temp_sup && low_temp_en)) {
1102         return u->temperature;
1103     }
1104 
1105     return 0;
1106 }
1107 
ufs_read_attr_value(UfsHc * u,uint8_t idn)1108 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
1109 {
1110     switch (idn) {
1111     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
1112         return u->attributes.boot_lun_en;
1113     case UFS_QUERY_ATTR_IDN_POWER_MODE:
1114         return u->attributes.current_power_mode;
1115     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1116         return u->attributes.active_icc_level;
1117     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
1118         return u->attributes.out_of_order_data_en;
1119     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
1120         return u->attributes.background_op_status;
1121     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
1122         return u->attributes.purge_status;
1123     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1124         return u->attributes.max_data_in_size;
1125     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1126         return u->attributes.max_data_out_size;
1127     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
1128         return be32_to_cpu(u->attributes.dyn_cap_needed);
1129     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1130         return u->attributes.ref_clk_freq;
1131     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
1132         return u->attributes.config_descr_lock;
1133     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1134         return u->attributes.max_num_of_rtt;
1135     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1136         return be16_to_cpu(u->attributes.exception_event_control);
1137     case UFS_QUERY_ATTR_IDN_EE_STATUS:
1138         ufs_update_ee_status(u);
1139         return be16_to_cpu(u->attributes.exception_event_status);
1140     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1141         return be32_to_cpu(u->attributes.seconds_passed);
1142     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
1143         return be16_to_cpu(u->attributes.context_conf);
1144     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
1145         return u->attributes.device_ffu_status;
1146     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1147         return be32_to_cpu(u->attributes.psa_state);
1148     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1149         return be32_to_cpu(u->attributes.psa_data_size);
1150     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
1151         return u->attributes.ref_clk_gating_wait_time;
1152     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
1153         u->attributes.device_case_rough_temperature = ufs_read_device_temp(u);
1154         return u->attributes.device_case_rough_temperature;
1155     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
1156         return u->attributes.device_too_high_temp_boundary;
1157     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
1158         return u->attributes.device_too_low_temp_boundary;
1159     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
1160         return u->attributes.throttling_status;
1161     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
1162         return u->attributes.wb_buffer_flush_status;
1163     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
1164         return u->attributes.available_wb_buffer_size;
1165     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
1166         return u->attributes.wb_buffer_life_time_est;
1167     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
1168         return be32_to_cpu(u->attributes.current_wb_buffer_size);
1169     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
1170         return u->attributes.refresh_status;
1171     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
1172         return u->attributes.refresh_freq;
1173     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
1174         return u->attributes.refresh_unit;
1175     }
1176     return 0;
1177 }
1178 
ufs_write_attr_value(UfsHc * u,uint8_t idn,uint32_t value)1179 static QueryRespCode ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
1180 {
1181     switch (idn) {
1182     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1183         if (value > UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE) {
1184             return UFS_QUERY_RESULT_INVALID_VALUE;
1185         }
1186         u->attributes.active_icc_level = value;
1187         break;
1188     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1189         u->attributes.max_data_in_size = value;
1190         break;
1191     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1192         u->attributes.max_data_out_size = value;
1193         break;
1194     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1195         u->attributes.ref_clk_freq = value;
1196         break;
1197     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1198         u->attributes.max_num_of_rtt = value;
1199         break;
1200     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1201         u->attributes.exception_event_control = cpu_to_be16(value);
1202         break;
1203     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1204         u->attributes.seconds_passed = cpu_to_be32(value);
1205         break;
1206     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1207         u->attributes.psa_state = value;
1208         break;
1209     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1210         u->attributes.psa_data_size = cpu_to_be32(value);
1211         break;
1212     }
1213     return UFS_QUERY_RESULT_SUCCESS;
1214 }
1215 
ufs_exec_query_attr(UfsRequest * req,int op)1216 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
1217 {
1218     UfsHc *u = req->hc;
1219     uint8_t idn = req->req_upiu.qr.idn;
1220     uint32_t value;
1221     QueryRespCode ret;
1222 
1223     ret = ufs_attr_check_idn_valid(idn, op);
1224     if (ret) {
1225         return ret;
1226     }
1227 
1228     if (op == UFS_QUERY_ATTR_READ) {
1229         value = ufs_read_attr_value(u, idn);
1230         ret = UFS_QUERY_RESULT_SUCCESS;
1231     } else {
1232         value = be32_to_cpu(req->req_upiu.qr.value);
1233         ret = ufs_write_attr_value(u, idn, value);
1234     }
1235     req->rsp_upiu.qr.value = cpu_to_be32(value);
1236     return ret;
1237 }
1238 
1239 static const RpmbUnitDescriptor rpmb_unit_desc = {
1240     .length = sizeof(RpmbUnitDescriptor),
1241     .descriptor_idn = 2,
1242     .unit_index = UFS_UPIU_RPMB_WLUN,
1243     .lu_enable = 0,
1244 };
1245 
ufs_read_unit_desc(UfsRequest * req)1246 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
1247 {
1248     UfsHc *u = req->hc;
1249     uint8_t lun = req->req_upiu.qr.index;
1250 
1251     if (lun != UFS_UPIU_RPMB_WLUN &&
1252         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
1253         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
1254         return UFS_QUERY_RESULT_INVALID_INDEX;
1255     }
1256 
1257     if (lun == UFS_UPIU_RPMB_WLUN) {
1258         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
1259     } else {
1260         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
1261                sizeof(u->lus[lun]->unit_desc));
1262     }
1263 
1264     return UFS_QUERY_RESULT_SUCCESS;
1265 }
1266 
manufacturer_str_desc(void)1267 static inline StringDescriptor manufacturer_str_desc(void)
1268 {
1269     StringDescriptor desc = {
1270         .length = 0x12,
1271         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1272     };
1273     desc.UC[0] = cpu_to_be16('R');
1274     desc.UC[1] = cpu_to_be16('E');
1275     desc.UC[2] = cpu_to_be16('D');
1276     desc.UC[3] = cpu_to_be16('H');
1277     desc.UC[4] = cpu_to_be16('A');
1278     desc.UC[5] = cpu_to_be16('T');
1279     return desc;
1280 }
1281 
product_name_str_desc(void)1282 static inline StringDescriptor product_name_str_desc(void)
1283 {
1284     StringDescriptor desc = {
1285         .length = 0x22,
1286         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1287     };
1288     desc.UC[0] = cpu_to_be16('Q');
1289     desc.UC[1] = cpu_to_be16('E');
1290     desc.UC[2] = cpu_to_be16('M');
1291     desc.UC[3] = cpu_to_be16('U');
1292     desc.UC[4] = cpu_to_be16(' ');
1293     desc.UC[5] = cpu_to_be16('U');
1294     desc.UC[6] = cpu_to_be16('F');
1295     desc.UC[7] = cpu_to_be16('S');
1296     return desc;
1297 }
1298 
product_rev_level_str_desc(void)1299 static inline StringDescriptor product_rev_level_str_desc(void)
1300 {
1301     StringDescriptor desc = {
1302         .length = 0x0a,
1303         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1304     };
1305     desc.UC[0] = cpu_to_be16('0');
1306     desc.UC[1] = cpu_to_be16('0');
1307     desc.UC[2] = cpu_to_be16('0');
1308     desc.UC[3] = cpu_to_be16('1');
1309     return desc;
1310 }
1311 
1312 static const StringDescriptor null_str_desc = {
1313     .length = 0x02,
1314     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1315 };
1316 
ufs_read_string_desc(UfsRequest * req)1317 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
1318 {
1319     UfsHc *u = req->hc;
1320     uint8_t index = req->req_upiu.qr.index;
1321     StringDescriptor desc;
1322 
1323     if (index == u->device_desc.manufacturer_name) {
1324         desc = manufacturer_str_desc();
1325         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1326     } else if (index == u->device_desc.product_name) {
1327         desc = product_name_str_desc();
1328         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1329     } else if (index == u->device_desc.serial_number) {
1330         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1331     } else if (index == u->device_desc.oem_id) {
1332         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1333     } else if (index == u->device_desc.product_revision_level) {
1334         desc = product_rev_level_str_desc();
1335         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1336     } else {
1337         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
1338         return UFS_QUERY_RESULT_INVALID_INDEX;
1339     }
1340     return UFS_QUERY_RESULT_SUCCESS;
1341 }
1342 
interconnect_desc(void)1343 static inline InterconnectDescriptor interconnect_desc(void)
1344 {
1345     InterconnectDescriptor desc = {
1346         .length = sizeof(InterconnectDescriptor),
1347         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
1348     };
1349     desc.bcd_unipro_version = cpu_to_be16(0x180);
1350     desc.bcd_mphy_version = cpu_to_be16(0x410);
1351     return desc;
1352 }
1353 
ufs_read_desc(UfsRequest * req)1354 static QueryRespCode ufs_read_desc(UfsRequest *req)
1355 {
1356     UfsHc *u = req->hc;
1357     QueryRespCode status;
1358     uint8_t idn = req->req_upiu.qr.idn;
1359     uint8_t selector = req->req_upiu.qr.selector;
1360     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
1361     InterconnectDescriptor desc;
1362     if (selector != 0) {
1363         return UFS_QUERY_RESULT_INVALID_SELECTOR;
1364     }
1365     switch (idn) {
1366     case UFS_QUERY_DESC_IDN_DEVICE:
1367         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
1368         status = UFS_QUERY_RESULT_SUCCESS;
1369         break;
1370     case UFS_QUERY_DESC_IDN_UNIT:
1371         status = ufs_read_unit_desc(req);
1372         break;
1373     case UFS_QUERY_DESC_IDN_GEOMETRY:
1374         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
1375                sizeof(u->geometry_desc));
1376         status = UFS_QUERY_RESULT_SUCCESS;
1377         break;
1378     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
1379         desc = interconnect_desc();
1380         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
1381         status = UFS_QUERY_RESULT_SUCCESS;
1382         break;
1383     }
1384     case UFS_QUERY_DESC_IDN_STRING:
1385         status = ufs_read_string_desc(req);
1386         break;
1387     case UFS_QUERY_DESC_IDN_POWER:
1388         /* mocking of power descriptor is not supported */
1389         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
1390         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
1391         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
1392         status = UFS_QUERY_RESULT_SUCCESS;
1393         break;
1394     case UFS_QUERY_DESC_IDN_HEALTH:
1395         /* mocking of health descriptor is not supported */
1396         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
1397         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
1398         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
1399         status = UFS_QUERY_RESULT_SUCCESS;
1400         break;
1401     default:
1402         length = 0;
1403         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
1404         status = UFS_QUERY_RESULT_INVALID_IDN;
1405     }
1406 
1407     if (length > req->rsp_upiu.qr.data[0]) {
1408         length = req->rsp_upiu.qr.data[0];
1409     }
1410     req->rsp_upiu.qr.length = cpu_to_be16(length);
1411 
1412     return status;
1413 }
1414 
ufs_exec_query_read(UfsRequest * req)1415 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1416 {
1417     QueryRespCode status;
1418     switch (req->req_upiu.qr.opcode) {
1419     case UFS_UPIU_QUERY_OPCODE_NOP:
1420         status = UFS_QUERY_RESULT_SUCCESS;
1421         break;
1422     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1423         status = ufs_read_desc(req);
1424         break;
1425     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1426         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1427         break;
1428     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1429         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1430         break;
1431     default:
1432         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1433         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1434         break;
1435     }
1436 
1437     return status;
1438 }
1439 
ufs_exec_query_write(UfsRequest * req)1440 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1441 {
1442     QueryRespCode status;
1443     switch (req->req_upiu.qr.opcode) {
1444     case UFS_UPIU_QUERY_OPCODE_NOP:
1445         status = UFS_QUERY_RESULT_SUCCESS;
1446         break;
1447     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1448         /* write descriptor is not supported */
1449         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1450         break;
1451     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1452         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1453         break;
1454     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1455         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1456         break;
1457     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1458         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1459         break;
1460     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1461         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1462         break;
1463     default:
1464         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1465         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1466         break;
1467     }
1468 
1469     return status;
1470 }
1471 
ufs_exec_query_cmd(UfsRequest * req)1472 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1473 {
1474     uint8_t query_func = req->req_upiu.header.query_func;
1475     uint16_t data_segment_length;
1476     QueryRespCode status;
1477 
1478     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1479     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1480         status = ufs_exec_query_read(req);
1481     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1482         status = ufs_exec_query_write(req);
1483     } else {
1484         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1485     }
1486 
1487     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1488     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1489                           data_segment_length);
1490     ufs_build_query_response(req);
1491 
1492     if (status != UFS_QUERY_RESULT_SUCCESS) {
1493         return UFS_REQUEST_FAIL;
1494     }
1495     return UFS_REQUEST_SUCCESS;
1496 }
1497 
ufs_exec_req(UfsRequest * req)1498 static void ufs_exec_req(UfsRequest *req)
1499 {
1500     UfsReqResult req_result;
1501 
1502     if (ufs_dma_read_upiu(req)) {
1503         return;
1504     }
1505 
1506     switch (req->req_upiu.header.trans_type) {
1507     case UFS_UPIU_TRANSACTION_NOP_OUT:
1508         req_result = ufs_exec_nop_cmd(req);
1509         break;
1510     case UFS_UPIU_TRANSACTION_COMMAND:
1511         req_result = ufs_exec_scsi_cmd(req);
1512         break;
1513     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1514         req_result = ufs_exec_query_cmd(req);
1515         break;
1516     default:
1517         trace_ufs_err_invalid_trans_code(req->slot,
1518                                          req->req_upiu.header.trans_type);
1519         req_result = UFS_REQUEST_FAIL;
1520     }
1521 
1522     /*
1523      * The ufs_complete_req for scsi commands is handled by the
1524      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1525      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1526      */
1527     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1528         ufs_complete_req(req, req_result);
1529     }
1530 }
1531 
ufs_process_req(void * opaque)1532 static void ufs_process_req(void *opaque)
1533 {
1534     UfsHc *u = opaque;
1535     UfsRequest *req;
1536     int slot;
1537 
1538     for (slot = 0; slot < u->params.nutrs; slot++) {
1539         req = &u->req_list[slot];
1540 
1541         if (req->state != UFS_REQUEST_READY) {
1542             continue;
1543         }
1544         trace_ufs_process_req(slot);
1545         req->state = UFS_REQUEST_RUNNING;
1546 
1547         ufs_exec_req(req);
1548     }
1549 }
1550 
ufs_complete_req(UfsRequest * req,UfsReqResult req_result)1551 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1552 {
1553     UfsHc *u = req->hc;
1554     assert(req->state == UFS_REQUEST_RUNNING);
1555 
1556     if (req_result == UFS_REQUEST_SUCCESS) {
1557         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1558     } else {
1559         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1560     }
1561 
1562     req->state = UFS_REQUEST_COMPLETE;
1563 
1564     if (ufs_mcq_req(req)) {
1565         trace_ufs_mcq_complete_req(req->sq->sqid);
1566         QTAILQ_INSERT_TAIL(&req->sq->cq->req_list, req, entry);
1567         qemu_bh_schedule(req->sq->cq->bh);
1568     } else {
1569         trace_ufs_complete_req(req->slot);
1570         qemu_bh_schedule(u->complete_bh);
1571     }
1572 }
1573 
ufs_clear_req(UfsRequest * req)1574 static void ufs_clear_req(UfsRequest *req)
1575 {
1576     if (req->sg != NULL) {
1577         qemu_sglist_destroy(req->sg);
1578         g_free(req->sg);
1579         req->sg = NULL;
1580         req->data_len = 0;
1581     }
1582 
1583     memset(&req->utrd, 0, sizeof(req->utrd));
1584     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1585     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1586 }
1587 
ufs_sendback_req(void * opaque)1588 static void ufs_sendback_req(void *opaque)
1589 {
1590     UfsHc *u = opaque;
1591     UfsRequest *req;
1592     int slot;
1593 
1594     for (slot = 0; slot < u->params.nutrs; slot++) {
1595         req = &u->req_list[slot];
1596 
1597         if (req->state != UFS_REQUEST_COMPLETE) {
1598             continue;
1599         }
1600 
1601         if (ufs_dma_write_upiu(req)) {
1602             req->state = UFS_REQUEST_ERROR;
1603             continue;
1604         }
1605 
1606         /*
1607          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1608          * supported
1609          */
1610         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1611             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1612             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1613         }
1614 
1615         u->reg.utrldbr &= ~(1 << slot);
1616         u->reg.utrlcnr |= (1 << slot);
1617 
1618         trace_ufs_sendback_req(req->slot);
1619 
1620         ufs_clear_req(req);
1621         req->state = UFS_REQUEST_IDLE;
1622     }
1623 
1624     ufs_irq_check(u);
1625 }
1626 
ufs_check_constraints(UfsHc * u,Error ** errp)1627 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1628 {
1629     if (u->params.nutrs > UFS_MAX_NUTRS) {
1630         error_setg(errp, "nutrs must be less than or equal to %d",
1631                    UFS_MAX_NUTRS);
1632         return false;
1633     }
1634 
1635     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1636         error_setg(errp, "nutmrs must be less than or equal to %d",
1637                    UFS_MAX_NUTMRS);
1638         return false;
1639     }
1640 
1641     if (u->params.mcq_maxq >= UFS_MAX_MCQ_QNUM) {
1642         error_setg(errp, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM);
1643         return false;
1644     }
1645 
1646     return true;
1647 }
1648 
ufs_init_pci(UfsHc * u,PCIDevice * pci_dev)1649 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1650 {
1651     uint8_t *pci_conf = pci_dev->config;
1652 
1653     pci_conf[PCI_INTERRUPT_PIN] = 1;
1654     pci_config_set_prog_interface(pci_conf, 0x1);
1655 
1656     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1657                           u->reg_size);
1658     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1659     u->irq = pci_allocate_irq(pci_dev);
1660 }
1661 
ufs_init_state(UfsHc * u)1662 static void ufs_init_state(UfsHc *u)
1663 {
1664     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1665 
1666     for (int i = 0; i < u->params.nutrs; i++) {
1667         u->req_list[i].hc = u;
1668         u->req_list[i].slot = i;
1669         u->req_list[i].sg = NULL;
1670         u->req_list[i].state = UFS_REQUEST_IDLE;
1671     }
1672 
1673     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1674                                          &DEVICE(u)->mem_reentrancy_guard);
1675     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1676                                          &DEVICE(u)->mem_reentrancy_guard);
1677 
1678     if (u->params.mcq) {
1679         memset(u->sq, 0, sizeof(u->sq));
1680         memset(u->cq, 0, sizeof(u->cq));
1681     }
1682 }
1683 
ufs_init_hc(UfsHc * u)1684 static void ufs_init_hc(UfsHc *u)
1685 {
1686     uint32_t cap = 0;
1687     uint32_t mcqconfig = 0;
1688     uint32_t mcqcap = 0;
1689 
1690     u->reg_size = pow2ceil(ufs_reg_size(u));
1691 
1692     memset(&u->reg, 0, sizeof(u->reg));
1693     memset(&u->mcq_reg, 0, sizeof(u->mcq_reg));
1694     memset(&u->mcq_op_reg, 0, sizeof(u->mcq_op_reg));
1695     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1696     cap = FIELD_DP32(cap, CAP, RTT, 2);
1697     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1698     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1699     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1700     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1701     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1702     cap = FIELD_DP32(cap, CAP, CS, 0);
1703     cap = FIELD_DP32(cap, CAP, LSDBS, 0);
1704     cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
1705     u->reg.cap = cap;
1706 
1707     if (u->params.mcq) {
1708         mcqconfig = FIELD_DP32(mcqconfig, MCQCONFIG, MAC, 0x1f);
1709         u->reg.mcqconfig = mcqconfig;
1710 
1711         mcqcap = FIELD_DP32(mcqcap, MCQCAP, MAXQ, u->params.mcq_maxq - 1);
1712         mcqcap = FIELD_DP32(mcqcap, MCQCAP, RRP, 1);
1713         mcqcap = FIELD_DP32(mcqcap, MCQCAP, QCFGPTR, UFS_MCQ_QCFGPTR);
1714         u->reg.mcqcap = mcqcap;
1715 
1716         for (int i = 0; i < ARRAY_SIZE(u->mcq_reg); i++) {
1717             uint64_t addr = ufs_mcq_op_reg_addr(u, i);
1718             u->mcq_reg[i].sqdao = addr;
1719             u->mcq_reg[i].sqisao = addr + sizeof(UfsMcqSqReg);
1720             addr += sizeof(UfsMcqSqReg);
1721             u->mcq_reg[i].cqdao = addr + sizeof(UfsMcqSqIntReg);
1722             addr += sizeof(UfsMcqSqIntReg);
1723             u->mcq_reg[i].cqisao = addr + sizeof(UfsMcqCqReg);
1724         }
1725     }
1726     u->reg.ver = UFS_SPEC_VER;
1727 
1728     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1729     u->device_desc.length = sizeof(DeviceDescriptor);
1730     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1731     u->device_desc.device_sub_class = 0x01;
1732     u->device_desc.number_lu = 0x00;
1733     u->device_desc.number_wlu = 0x04;
1734     /* TODO: Revisit it when Power Management is implemented */
1735     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1736     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1737     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1738     u->device_desc.manufacturer_name = 0x00;
1739     u->device_desc.product_name = 0x01;
1740     u->device_desc.serial_number = 0x02;
1741     u->device_desc.oem_id = 0x03;
1742     u->device_desc.ud_0_base_offset = 0x16;
1743     u->device_desc.ud_config_p_length = 0x1A;
1744     u->device_desc.device_rtt_cap = 0x02;
1745     u->device_desc.ufs_features_support = UFS_DEV_HIGH_TEMP_NOTIF |
1746         UFS_DEV_LOW_TEMP_NOTIF;
1747     u->device_desc.queue_depth = u->params.nutrs;
1748     u->device_desc.product_revision_level = 0x04;
1749     u->device_desc.extended_ufs_features_support =
1750         cpu_to_be32(UFS_DEV_HIGH_TEMP_NOTIF | UFS_DEV_LOW_TEMP_NOTIF);
1751 
1752     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1753     u->geometry_desc.length = sizeof(GeometryDescriptor);
1754     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1755     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1756     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4MB: 8192 * 512B */
1757     u->geometry_desc.allocation_unit_size = 0x1; /* 4MB: 1 segment */
1758     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1759     u->geometry_desc.max_in_buffer_size = 0x8;
1760     u->geometry_desc.max_out_buffer_size = 0x8;
1761     u->geometry_desc.rpmb_read_write_size = 0x40;
1762     u->geometry_desc.data_ordering =
1763         0x0; /* out-of-order data transfer is not supported */
1764     u->geometry_desc.max_context_id_number = 0x5;
1765     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1766 
1767     memset(&u->attributes, 0, sizeof(u->attributes));
1768     u->attributes.max_data_in_size = 0x08;
1769     u->attributes.max_data_out_size = 0x08;
1770     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1771     /* configure descriptor is not supported */
1772     u->attributes.config_descr_lock = 0x01;
1773     u->attributes.max_num_of_rtt = 0x02;
1774     u->attributes.device_too_high_temp_boundary = UFS_TOO_HIGH_TEMP_BOUNDARY;
1775     u->attributes.device_too_low_temp_boundary = UFS_TOO_LOW_TEMP_BOUNDARY;
1776 
1777     memset(&u->flags, 0, sizeof(u->flags));
1778     u->flags.permanently_disable_fw_update = 1;
1779 
1780     /*
1781      * The temperature value is fixed to UFS_TEMPERATURE and does not change
1782      * dynamically
1783      */
1784     u->temperature = UFS_TEMPERATURE;
1785 }
1786 
ufs_realize(PCIDevice * pci_dev,Error ** errp)1787 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1788 {
1789     UfsHc *u = UFS(pci_dev);
1790 
1791     if (!ufs_check_constraints(u, errp)) {
1792         return;
1793     }
1794 
1795     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1796               u->parent_obj.qdev.id);
1797 
1798     ufs_init_state(u);
1799     ufs_init_hc(u);
1800     ufs_init_pci(u, pci_dev);
1801 
1802     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1803     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1804     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1805     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1806 }
1807 
ufs_exit(PCIDevice * pci_dev)1808 static void ufs_exit(PCIDevice *pci_dev)
1809 {
1810     UfsHc *u = UFS(pci_dev);
1811 
1812     qemu_free_irq(u->irq);
1813 
1814     qemu_bh_delete(u->doorbell_bh);
1815     qemu_bh_delete(u->complete_bh);
1816 
1817     for (int i = 0; i < u->params.nutrs; i++) {
1818         ufs_clear_req(&u->req_list[i]);
1819     }
1820     g_free(u->req_list);
1821 
1822     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
1823         if (u->sq[i]) {
1824             ufs_mcq_delete_sq(u, i);
1825         }
1826     }
1827     for (int i = 0; i < ARRAY_SIZE(u->cq); i++) {
1828         if (u->cq[i]) {
1829             ufs_mcq_delete_cq(u, i);
1830         }
1831     }
1832 }
1833 
1834 static const Property ufs_props[] = {
1835     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1836     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1837     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1838     DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
1839     DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
1840 };
1841 
1842 static const VMStateDescription ufs_vmstate = {
1843     .name = "ufs",
1844     .unmigratable = 1,
1845 };
1846 
ufs_class_init(ObjectClass * oc,const void * data)1847 static void ufs_class_init(ObjectClass *oc, const void *data)
1848 {
1849     DeviceClass *dc = DEVICE_CLASS(oc);
1850     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1851 
1852     pc->realize = ufs_realize;
1853     pc->exit = ufs_exit;
1854     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1855     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1856     pc->class_id = PCI_CLASS_STORAGE_UFS;
1857 
1858     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1859     dc->desc = "Universal Flash Storage";
1860     device_class_set_props(dc, ufs_props);
1861     dc->vmsd = &ufs_vmstate;
1862 }
1863 
ufs_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)1864 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1865                                   Error **errp)
1866 {
1867     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1868         error_setg(errp, "%s cannot be connected to ufs-bus",
1869                    object_get_typename(OBJECT(qdev)));
1870         return false;
1871     }
1872 
1873     return true;
1874 }
1875 
ufs_bus_get_dev_path(DeviceState * dev)1876 static char *ufs_bus_get_dev_path(DeviceState *dev)
1877 {
1878     BusState *bus = qdev_get_parent_bus(dev);
1879 
1880     return qdev_get_dev_path(bus->parent);
1881 }
1882 
ufs_bus_class_init(ObjectClass * class,const void * data)1883 static void ufs_bus_class_init(ObjectClass *class, const void *data)
1884 {
1885     BusClass *bc = BUS_CLASS(class);
1886     bc->get_dev_path = ufs_bus_get_dev_path;
1887     bc->check_address = ufs_bus_check_address;
1888 }
1889 
1890 static const TypeInfo ufs_info = {
1891     .name = TYPE_UFS,
1892     .parent = TYPE_PCI_DEVICE,
1893     .class_init = ufs_class_init,
1894     .instance_size = sizeof(UfsHc),
1895     .interfaces = (const InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1896 };
1897 
1898 static const TypeInfo ufs_bus_info = {
1899     .name = TYPE_UFS_BUS,
1900     .parent = TYPE_BUS,
1901     .class_init = ufs_bus_class_init,
1902     .class_size = sizeof(UfsBusClass),
1903     .instance_size = sizeof(UfsBus),
1904 };
1905 
ufs_register_types(void)1906 static void ufs_register_types(void)
1907 {
1908     type_register_static(&ufs_info);
1909     type_register_static(&ufs_bus_info);
1910 }
1911 
1912 type_init(ufs_register_types)
1913