1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Apple ANS NVM Express device driver
4 * Copyright The Asahi Linux Contributors
5 *
6 * Based on the pci.c NVM Express device driver
7 * Copyright (c) 2011-2014, Intel Corporation.
8 * and on the rdma.c NVMe over Fabrics RDMA host code.
9 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10 */
11
12 #include <linux/async.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-mq.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-lo-hi.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/jiffies.h>
23 #include <linux/mempool.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/once.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/soc/apple/rtkit.h>
31 #include <linux/soc/apple/sart.h>
32 #include <linux/reset.h>
33 #include <linux/time64.h>
34
35 #include "nvme.h"
36
37 #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
38
39 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44
40 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
41
42 #define APPLE_ANS_ACQ_DB 0x1004
43 #define APPLE_ANS_IOCQ_DB 0x100c
44
45 #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
46
47 #define APPLE_ANS_BOOT_STATUS 0x1300
48 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
49
50 #define APPLE_ANS_UNKNOWN_CTRL 0x24008
51 #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
52
53 #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
54 #define APPLE_ANS_LINEAR_SQ_EN BIT(0)
55
56 #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
57 #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
58
59 #define APPLE_NVMMU_NUM_TCBS 0x28100
60 #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
61 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
62 #define APPLE_NVMMU_TCB_INVAL 0x28118
63 #define APPLE_NVMMU_TCB_STAT 0x28120
64
65 /*
66 * This controller is a bit weird in the way command tags works: Both the
67 * admin and the IO queue share the same tag space. Additionally, tags
68 * cannot be higher than 0x40 which effectively limits the combined
69 * queue depth to 0x40. Instead of wasting half of that on the admin queue
70 * which gets much less traffic we instead reduce its size here.
71 * The controller also doesn't support async event such that no space must
72 * be reserved for NVME_NR_AEN_COMMANDS.
73 */
74 #define APPLE_NVME_AQ_DEPTH 2
75 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
76
77 #define APPLE_NVME_IOSQES 7
78
79 /*
80 * These can be higher, but we need to ensure that any command doesn't
81 * require an sg allocation that needs more than a page of data.
82 */
83 #define NVME_MAX_KB_SZ 4096
84 #define NVME_MAX_SEGS 127
85
86 /*
87 * This controller comes with an embedded IOMMU known as NVMMU.
88 * The NVMMU is pointed to an array of TCBs indexed by the command tag.
89 * Each command must be configured inside this structure before it's allowed
90 * to execute, including commands that don't require DMA transfers.
91 *
92 * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
93 * admin queue): Those commands must still be added to the NVMMU but the DMA
94 * buffers cannot be represented as PRPs and must instead be allowed using SART.
95 *
96 * Programming the PRPs to the same values as those in the submission queue
97 * looks rather silly at first. This hardware is however designed for a kernel
98 * that runs the NVMMU code in a higher exception level than the NVMe driver.
99 * In that setting the NVMe driver first programs the submission queue entry
100 * and then executes a hypercall to the code that is allowed to program the
101 * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
102 * verifying that they don't point to kernel text, data, pagetables, or similar
103 * protected areas before programming the TCB to point to this shadow copy.
104 * Since Linux doesn't do any of that we may as well just point both the queue
105 * and the TCB PRP pointer to the same memory.
106 */
107 struct apple_nvmmu_tcb {
108 u8 opcode;
109
110 #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
111 #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
112 u8 dma_flags;
113
114 u8 command_id;
115 u8 _unk0;
116 __le16 length;
117 u8 _unk1[18];
118 __le64 prp1;
119 __le64 prp2;
120 u8 _unk2[16];
121 u8 aes_iv[8];
122 u8 _aes_unk[64];
123 };
124
125 /*
126 * The Apple NVMe controller only supports a single admin and a single IO queue
127 * which are both limited to 64 entries and share a single interrupt.
128 *
129 * The completion queue works as usual. The submission "queue" instead is
130 * an array indexed by the command tag on this hardware. Commands must also be
131 * present in the NVMMU's tcb array. They are triggered by writing their tag to
132 * a MMIO register.
133 */
134 struct apple_nvme_queue {
135 struct nvme_command *sqes;
136 struct nvme_completion *cqes;
137 struct apple_nvmmu_tcb *tcbs;
138
139 dma_addr_t sq_dma_addr;
140 dma_addr_t cq_dma_addr;
141 dma_addr_t tcb_dma_addr;
142
143 u32 __iomem *sq_db;
144 u32 __iomem *cq_db;
145
146 u16 sq_tail;
147 u16 cq_head;
148 u8 cq_phase;
149
150 bool is_adminq;
151 bool enabled;
152 };
153
154 /*
155 * The apple_nvme_iod describes the data in an I/O.
156 *
157 * The sg pointer contains the list of PRP chunk allocations in addition
158 * to the actual struct scatterlist.
159 */
160 struct apple_nvme_iod {
161 struct nvme_request req;
162 struct nvme_command cmd;
163 struct apple_nvme_queue *q;
164 int npages; /* In the PRP list. 0 means small pool in use */
165 int nents; /* Used in scatterlist */
166 dma_addr_t first_dma;
167 unsigned int dma_len; /* length of single DMA segment mapping */
168 struct scatterlist *sg;
169 };
170
171 struct apple_nvme_hw {
172 bool has_lsq_nvmmu;
173 u32 max_queue_depth;
174 };
175
176 struct apple_nvme {
177 struct device *dev;
178
179 void __iomem *mmio_coproc;
180 void __iomem *mmio_nvme;
181 const struct apple_nvme_hw *hw;
182
183 struct device **pd_dev;
184 struct device_link **pd_link;
185 int pd_count;
186
187 struct apple_sart *sart;
188 struct apple_rtkit *rtk;
189 struct reset_control *reset;
190
191 struct dma_pool *prp_page_pool;
192 struct dma_pool *prp_small_pool;
193 mempool_t *iod_mempool;
194
195 struct nvme_ctrl ctrl;
196 struct work_struct remove_work;
197
198 struct apple_nvme_queue adminq;
199 struct apple_nvme_queue ioq;
200
201 struct blk_mq_tag_set admin_tagset;
202 struct blk_mq_tag_set tagset;
203
204 int irq;
205 spinlock_t lock;
206 };
207
208 static_assert(sizeof(struct nvme_command) == 64);
209 static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
210
ctrl_to_apple_nvme(struct nvme_ctrl * ctrl)211 static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
212 {
213 return container_of(ctrl, struct apple_nvme, ctrl);
214 }
215
queue_to_apple_nvme(struct apple_nvme_queue * q)216 static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
217 {
218 if (q->is_adminq)
219 return container_of(q, struct apple_nvme, adminq);
220
221 return container_of(q, struct apple_nvme, ioq);
222 }
223
apple_nvme_queue_depth(struct apple_nvme_queue * q)224 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
225 {
226 struct apple_nvme *anv = queue_to_apple_nvme(q);
227
228 if (q->is_adminq && anv->hw->has_lsq_nvmmu)
229 return APPLE_NVME_AQ_DEPTH;
230
231 return anv->hw->max_queue_depth;
232 }
233
apple_nvme_rtkit_crashed(void * cookie,const void * crashlog,size_t crashlog_size)234 static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
235 {
236 struct apple_nvme *anv = cookie;
237
238 dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
239 nvme_reset_ctrl(&anv->ctrl);
240 }
241
apple_nvme_sart_dma_setup(void * cookie,struct apple_rtkit_shmem * bfr)242 static int apple_nvme_sart_dma_setup(void *cookie,
243 struct apple_rtkit_shmem *bfr)
244 {
245 struct apple_nvme *anv = cookie;
246 int ret;
247
248 if (bfr->iova)
249 return -EINVAL;
250 if (!bfr->size)
251 return -EINVAL;
252
253 bfr->buffer =
254 dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
255 if (!bfr->buffer)
256 return -ENOMEM;
257
258 ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
259 if (ret) {
260 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
261 bfr->buffer = NULL;
262 return -ENOMEM;
263 }
264
265 return 0;
266 }
267
apple_nvme_sart_dma_destroy(void * cookie,struct apple_rtkit_shmem * bfr)268 static void apple_nvme_sart_dma_destroy(void *cookie,
269 struct apple_rtkit_shmem *bfr)
270 {
271 struct apple_nvme *anv = cookie;
272
273 apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
274 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
275 }
276
277 static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
278 .crashed = apple_nvme_rtkit_crashed,
279 .shmem_setup = apple_nvme_sart_dma_setup,
280 .shmem_destroy = apple_nvme_sart_dma_destroy,
281 };
282
apple_nvmmu_inval(struct apple_nvme_queue * q,unsigned int tag)283 static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
284 {
285 struct apple_nvme *anv = queue_to_apple_nvme(q);
286
287 writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
288 if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
289 dev_warn_ratelimited(anv->dev,
290 "NVMMU TCB invalidation failed\n");
291 }
292
apple_nvme_submit_cmd_t8015(struct apple_nvme_queue * q,struct nvme_command * cmd)293 static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
294 struct nvme_command *cmd)
295 {
296 struct apple_nvme *anv = queue_to_apple_nvme(q);
297
298 spin_lock_irq(&anv->lock);
299
300 if (q->is_adminq)
301 memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
302 else
303 memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
304 cmd, sizeof(*cmd));
305
306 if (++q->sq_tail == anv->hw->max_queue_depth)
307 q->sq_tail = 0;
308
309 writel(q->sq_tail, q->sq_db);
310 spin_unlock_irq(&anv->lock);
311 }
312
313
apple_nvme_submit_cmd_t8103(struct apple_nvme_queue * q,struct nvme_command * cmd)314 static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
315 struct nvme_command *cmd)
316 {
317 struct apple_nvme *anv = queue_to_apple_nvme(q);
318 u32 tag = nvme_tag_from_cid(cmd->common.command_id);
319 struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
320
321 tcb->opcode = cmd->common.opcode;
322 tcb->prp1 = cmd->common.dptr.prp1;
323 tcb->prp2 = cmd->common.dptr.prp2;
324 tcb->length = cmd->rw.length;
325 tcb->command_id = tag;
326
327 if (nvme_is_write(cmd))
328 tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
329 else
330 tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
331
332 memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
333
334 /*
335 * This lock here doesn't make much sense at a first glance but
336 * removing it will result in occasional missed completion
337 * interrupts even though the commands still appear on the CQ.
338 * It's unclear why this happens but our best guess is that
339 * there is a bug in the firmware triggered when a new command
340 * is issued while we're inside the irq handler between the
341 * NVMMU invalidation (and making the tag available again)
342 * and the final CQ update.
343 */
344 spin_lock_irq(&anv->lock);
345 writel(tag, q->sq_db);
346 spin_unlock_irq(&anv->lock);
347 }
348
349 /*
350 * From pci.c:
351 * Will slightly overestimate the number of pages needed. This is OK
352 * as it only leads to a small amount of wasted memory for the lifetime of
353 * the I/O.
354 */
apple_nvme_iod_alloc_size(void)355 static inline size_t apple_nvme_iod_alloc_size(void)
356 {
357 const unsigned int nprps = DIV_ROUND_UP(
358 NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
359 const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
360 const size_t alloc_size = sizeof(__le64 *) * npages +
361 sizeof(struct scatterlist) * NVME_MAX_SEGS;
362
363 return alloc_size;
364 }
365
apple_nvme_iod_list(struct request * req)366 static void **apple_nvme_iod_list(struct request *req)
367 {
368 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
369
370 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
371 }
372
apple_nvme_free_prps(struct apple_nvme * anv,struct request * req)373 static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
374 {
375 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
376 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
377 dma_addr_t dma_addr = iod->first_dma;
378 int i;
379
380 for (i = 0; i < iod->npages; i++) {
381 __le64 *prp_list = apple_nvme_iod_list(req)[i];
382 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
383
384 dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
385 dma_addr = next_dma_addr;
386 }
387 }
388
apple_nvme_unmap_data(struct apple_nvme * anv,struct request * req)389 static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
390 {
391 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
392
393 if (iod->dma_len) {
394 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
395 rq_dma_dir(req));
396 return;
397 }
398
399 WARN_ON_ONCE(!iod->nents);
400
401 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
402 if (iod->npages == 0)
403 dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
404 iod->first_dma);
405 else
406 apple_nvme_free_prps(anv, req);
407 mempool_free(iod->sg, anv->iod_mempool);
408 }
409
apple_nvme_print_sgl(struct scatterlist * sgl,int nents)410 static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
411 {
412 int i;
413 struct scatterlist *sg;
414
415 for_each_sg(sgl, sg, nents, i) {
416 dma_addr_t phys = sg_phys(sg);
417
418 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
419 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
420 sg_dma_len(sg));
421 }
422 }
423
apple_nvme_setup_prps(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd)424 static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
425 struct request *req,
426 struct nvme_rw_command *cmnd)
427 {
428 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
429 struct dma_pool *pool;
430 int length = blk_rq_payload_bytes(req);
431 struct scatterlist *sg = iod->sg;
432 int dma_len = sg_dma_len(sg);
433 u64 dma_addr = sg_dma_address(sg);
434 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
435 __le64 *prp_list;
436 void **list = apple_nvme_iod_list(req);
437 dma_addr_t prp_dma;
438 int nprps, i;
439
440 length -= (NVME_CTRL_PAGE_SIZE - offset);
441 if (length <= 0) {
442 iod->first_dma = 0;
443 goto done;
444 }
445
446 dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
447 if (dma_len) {
448 dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
449 } else {
450 sg = sg_next(sg);
451 dma_addr = sg_dma_address(sg);
452 dma_len = sg_dma_len(sg);
453 }
454
455 if (length <= NVME_CTRL_PAGE_SIZE) {
456 iod->first_dma = dma_addr;
457 goto done;
458 }
459
460 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
461 if (nprps <= (256 / 8)) {
462 pool = anv->prp_small_pool;
463 iod->npages = 0;
464 } else {
465 pool = anv->prp_page_pool;
466 iod->npages = 1;
467 }
468
469 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
470 if (!prp_list) {
471 iod->first_dma = dma_addr;
472 iod->npages = -1;
473 return BLK_STS_RESOURCE;
474 }
475 list[0] = prp_list;
476 iod->first_dma = prp_dma;
477 i = 0;
478 for (;;) {
479 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
480 __le64 *old_prp_list = prp_list;
481
482 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
483 if (!prp_list)
484 goto free_prps;
485 list[iod->npages++] = prp_list;
486 prp_list[0] = old_prp_list[i - 1];
487 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
488 i = 1;
489 }
490 prp_list[i++] = cpu_to_le64(dma_addr);
491 dma_len -= NVME_CTRL_PAGE_SIZE;
492 dma_addr += NVME_CTRL_PAGE_SIZE;
493 length -= NVME_CTRL_PAGE_SIZE;
494 if (length <= 0)
495 break;
496 if (dma_len > 0)
497 continue;
498 if (unlikely(dma_len < 0))
499 goto bad_sgl;
500 sg = sg_next(sg);
501 dma_addr = sg_dma_address(sg);
502 dma_len = sg_dma_len(sg);
503 }
504 done:
505 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
506 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
507 return BLK_STS_OK;
508 free_prps:
509 apple_nvme_free_prps(anv, req);
510 return BLK_STS_RESOURCE;
511 bad_sgl:
512 WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
513 "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
514 iod->nents);
515 return BLK_STS_IOERR;
516 }
517
apple_nvme_setup_prp_simple(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd,struct bio_vec * bv)518 static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
519 struct request *req,
520 struct nvme_rw_command *cmnd,
521 struct bio_vec *bv)
522 {
523 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
524 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
525 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
526
527 iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
528 if (dma_mapping_error(anv->dev, iod->first_dma))
529 return BLK_STS_RESOURCE;
530 iod->dma_len = bv->bv_len;
531
532 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
533 if (bv->bv_len > first_prp_len)
534 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
535 return BLK_STS_OK;
536 }
537
apple_nvme_map_data(struct apple_nvme * anv,struct request * req,struct nvme_command * cmnd)538 static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
539 struct request *req,
540 struct nvme_command *cmnd)
541 {
542 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
543 blk_status_t ret = BLK_STS_RESOURCE;
544 int nr_mapped;
545
546 if (blk_rq_nr_phys_segments(req) == 1) {
547 struct bio_vec bv = req_bvec(req);
548
549 if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
550 return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
551 &bv);
552 }
553
554 iod->dma_len = 0;
555 iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
556 if (!iod->sg)
557 return BLK_STS_RESOURCE;
558 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
559 iod->nents = blk_rq_map_sg(req, iod->sg);
560 if (!iod->nents)
561 goto out_free_sg;
562
563 nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
564 rq_dma_dir(req), DMA_ATTR_NO_WARN);
565 if (!nr_mapped)
566 goto out_free_sg;
567
568 ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
569 if (ret != BLK_STS_OK)
570 goto out_unmap_sg;
571 return BLK_STS_OK;
572
573 out_unmap_sg:
574 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
575 out_free_sg:
576 mempool_free(iod->sg, anv->iod_mempool);
577 return ret;
578 }
579
apple_nvme_unmap_rq(struct request * req)580 static __always_inline void apple_nvme_unmap_rq(struct request *req)
581 {
582 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
583 struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
584
585 if (blk_rq_nr_phys_segments(req))
586 apple_nvme_unmap_data(anv, req);
587 }
588
apple_nvme_complete_rq(struct request * req)589 static void apple_nvme_complete_rq(struct request *req)
590 {
591 apple_nvme_unmap_rq(req);
592 nvme_complete_rq(req);
593 }
594
apple_nvme_complete_batch(struct io_comp_batch * iob)595 static void apple_nvme_complete_batch(struct io_comp_batch *iob)
596 {
597 nvme_complete_batch(iob, apple_nvme_unmap_rq);
598 }
599
apple_nvme_cqe_pending(struct apple_nvme_queue * q)600 static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
601 {
602 struct nvme_completion *hcqe = &q->cqes[q->cq_head];
603
604 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
605 }
606
607 static inline struct blk_mq_tags *
apple_nvme_queue_tagset(struct apple_nvme * anv,struct apple_nvme_queue * q)608 apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
609 {
610 if (q->is_adminq)
611 return anv->admin_tagset.tags[0];
612 else
613 return anv->tagset.tags[0];
614 }
615
apple_nvme_handle_cqe(struct apple_nvme_queue * q,struct io_comp_batch * iob,u16 idx)616 static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
617 struct io_comp_batch *iob, u16 idx)
618 {
619 struct apple_nvme *anv = queue_to_apple_nvme(q);
620 struct nvme_completion *cqe = &q->cqes[idx];
621 __u16 command_id = READ_ONCE(cqe->command_id);
622 struct request *req;
623
624 if (anv->hw->has_lsq_nvmmu)
625 apple_nvmmu_inval(q, command_id);
626
627 req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
628 if (unlikely(!req)) {
629 dev_warn(anv->dev, "invalid id %d completed", command_id);
630 return;
631 }
632
633 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
634 !blk_mq_add_to_batch(req, iob,
635 nvme_req(req)->status != NVME_SC_SUCCESS,
636 apple_nvme_complete_batch))
637 apple_nvme_complete_rq(req);
638 }
639
apple_nvme_update_cq_head(struct apple_nvme_queue * q)640 static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
641 {
642 u32 tmp = q->cq_head + 1;
643
644 if (tmp == apple_nvme_queue_depth(q)) {
645 q->cq_head = 0;
646 q->cq_phase ^= 1;
647 } else {
648 q->cq_head = tmp;
649 }
650 }
651
apple_nvme_poll_cq(struct apple_nvme_queue * q,struct io_comp_batch * iob)652 static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
653 struct io_comp_batch *iob)
654 {
655 bool found = false;
656
657 while (apple_nvme_cqe_pending(q)) {
658 found = true;
659
660 /*
661 * load-load control dependency between phase and the rest of
662 * the cqe requires a full read memory barrier
663 */
664 dma_rmb();
665 apple_nvme_handle_cqe(q, iob, q->cq_head);
666 apple_nvme_update_cq_head(q);
667 }
668
669 if (found)
670 writel(q->cq_head, q->cq_db);
671
672 return found;
673 }
674
apple_nvme_handle_cq(struct apple_nvme_queue * q,bool force)675 static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
676 {
677 bool found;
678 DEFINE_IO_COMP_BATCH(iob);
679
680 if (!READ_ONCE(q->enabled) && !force)
681 return false;
682
683 found = apple_nvme_poll_cq(q, &iob);
684
685 if (!rq_list_empty(&iob.req_list))
686 apple_nvme_complete_batch(&iob);
687
688 return found;
689 }
690
apple_nvme_irq(int irq,void * data)691 static irqreturn_t apple_nvme_irq(int irq, void *data)
692 {
693 struct apple_nvme *anv = data;
694 bool handled = false;
695 unsigned long flags;
696
697 spin_lock_irqsave(&anv->lock, flags);
698 if (apple_nvme_handle_cq(&anv->ioq, false))
699 handled = true;
700 if (apple_nvme_handle_cq(&anv->adminq, false))
701 handled = true;
702 spin_unlock_irqrestore(&anv->lock, flags);
703
704 if (handled)
705 return IRQ_HANDLED;
706 return IRQ_NONE;
707 }
708
apple_nvme_create_cq(struct apple_nvme * anv)709 static int apple_nvme_create_cq(struct apple_nvme *anv)
710 {
711 struct nvme_command c = {};
712
713 /*
714 * Note: we (ab)use the fact that the prp fields survive if no data
715 * is attached to the request.
716 */
717 c.create_cq.opcode = nvme_admin_create_cq;
718 c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
719 c.create_cq.cqid = cpu_to_le16(1);
720 c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
721 c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
722 c.create_cq.irq_vector = cpu_to_le16(0);
723
724 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
725 }
726
apple_nvme_remove_cq(struct apple_nvme * anv)727 static int apple_nvme_remove_cq(struct apple_nvme *anv)
728 {
729 struct nvme_command c = {};
730
731 c.delete_queue.opcode = nvme_admin_delete_cq;
732 c.delete_queue.qid = cpu_to_le16(1);
733
734 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
735 }
736
apple_nvme_create_sq(struct apple_nvme * anv)737 static int apple_nvme_create_sq(struct apple_nvme *anv)
738 {
739 struct nvme_command c = {};
740
741 /*
742 * Note: we (ab)use the fact that the prp fields survive if no data
743 * is attached to the request.
744 */
745 c.create_sq.opcode = nvme_admin_create_sq;
746 c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
747 c.create_sq.sqid = cpu_to_le16(1);
748 c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
749 c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
750 c.create_sq.cqid = cpu_to_le16(1);
751
752 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
753 }
754
apple_nvme_remove_sq(struct apple_nvme * anv)755 static int apple_nvme_remove_sq(struct apple_nvme *anv)
756 {
757 struct nvme_command c = {};
758
759 c.delete_queue.opcode = nvme_admin_delete_sq;
760 c.delete_queue.qid = cpu_to_le16(1);
761
762 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
763 }
764
apple_nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)765 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
766 const struct blk_mq_queue_data *bd)
767 {
768 struct nvme_ns *ns = hctx->queue->queuedata;
769 struct apple_nvme_queue *q = hctx->driver_data;
770 struct apple_nvme *anv = queue_to_apple_nvme(q);
771 struct request *req = bd->rq;
772 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
773 struct nvme_command *cmnd = &iod->cmd;
774 blk_status_t ret;
775
776 iod->npages = -1;
777 iod->nents = 0;
778
779 /*
780 * We should not need to do this, but we're still using this to
781 * ensure we can drain requests on a dying queue.
782 */
783 if (unlikely(!READ_ONCE(q->enabled)))
784 return BLK_STS_IOERR;
785
786 if (!nvme_check_ready(&anv->ctrl, req, true))
787 return nvme_fail_nonready_command(&anv->ctrl, req);
788
789 ret = nvme_setup_cmd(ns, req);
790 if (ret)
791 return ret;
792
793 if (blk_rq_nr_phys_segments(req)) {
794 ret = apple_nvme_map_data(anv, req, cmnd);
795 if (ret)
796 goto out_free_cmd;
797 }
798
799 nvme_start_request(req);
800
801 if (anv->hw->has_lsq_nvmmu)
802 apple_nvme_submit_cmd_t8103(q, cmnd);
803 else
804 apple_nvme_submit_cmd_t8015(q, cmnd);
805
806 return BLK_STS_OK;
807
808 out_free_cmd:
809 nvme_cleanup_cmd(req);
810 return ret;
811 }
812
apple_nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)813 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
814 unsigned int hctx_idx)
815 {
816 hctx->driver_data = data;
817 return 0;
818 }
819
apple_nvme_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)820 static int apple_nvme_init_request(struct blk_mq_tag_set *set,
821 struct request *req, unsigned int hctx_idx,
822 unsigned int numa_node)
823 {
824 struct apple_nvme_queue *q = set->driver_data;
825 struct apple_nvme *anv = queue_to_apple_nvme(q);
826 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
827 struct nvme_request *nreq = nvme_req(req);
828
829 iod->q = q;
830 nreq->ctrl = &anv->ctrl;
831 nreq->cmd = &iod->cmd;
832
833 return 0;
834 }
835
apple_nvme_disable(struct apple_nvme * anv,bool shutdown)836 static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
837 {
838 enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
839 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
840 bool dead = false, freeze = false;
841 unsigned long flags;
842
843 if (apple_rtkit_is_crashed(anv->rtk))
844 dead = true;
845 if (!(csts & NVME_CSTS_RDY))
846 dead = true;
847 if (csts & NVME_CSTS_CFS)
848 dead = true;
849
850 if (state == NVME_CTRL_LIVE ||
851 state == NVME_CTRL_RESETTING) {
852 freeze = true;
853 nvme_start_freeze(&anv->ctrl);
854 }
855
856 /*
857 * Give the controller a chance to complete all entered requests if
858 * doing a safe shutdown.
859 */
860 if (!dead && shutdown && freeze)
861 nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
862
863 nvme_quiesce_io_queues(&anv->ctrl);
864
865 if (!dead) {
866 if (READ_ONCE(anv->ioq.enabled)) {
867 apple_nvme_remove_sq(anv);
868 apple_nvme_remove_cq(anv);
869 }
870
871 /*
872 * Always disable the NVMe controller after shutdown.
873 * We need to do this to bring it back up later anyway, and we
874 * can't do it while the firmware is not running (e.g. in the
875 * resume reset path before RTKit is initialized), so for Apple
876 * controllers it makes sense to unconditionally do it here.
877 * Additionally, this sequence of events is reliable, while
878 * others (like disabling after bringing back the firmware on
879 * resume) seem to run into trouble under some circumstances.
880 *
881 * Both U-Boot and m1n1 also use this convention (i.e. an ANS
882 * NVMe controller is handed off with firmware shut down, in an
883 * NVMe disabled state, after a clean shutdown).
884 */
885 if (shutdown)
886 nvme_disable_ctrl(&anv->ctrl, shutdown);
887 nvme_disable_ctrl(&anv->ctrl, false);
888 }
889
890 WRITE_ONCE(anv->ioq.enabled, false);
891 WRITE_ONCE(anv->adminq.enabled, false);
892 mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
893 nvme_quiesce_admin_queue(&anv->ctrl);
894
895 /* last chance to complete any requests before nvme_cancel_request */
896 spin_lock_irqsave(&anv->lock, flags);
897 apple_nvme_handle_cq(&anv->ioq, true);
898 apple_nvme_handle_cq(&anv->adminq, true);
899 spin_unlock_irqrestore(&anv->lock, flags);
900
901 nvme_cancel_tagset(&anv->ctrl);
902 nvme_cancel_admin_tagset(&anv->ctrl);
903
904 /*
905 * The driver will not be starting up queues again if shutting down so
906 * must flush all entered requests to their failed completion to avoid
907 * deadlocking blk-mq hot-cpu notifier.
908 */
909 if (shutdown) {
910 nvme_unquiesce_io_queues(&anv->ctrl);
911 nvme_unquiesce_admin_queue(&anv->ctrl);
912 }
913 }
914
apple_nvme_timeout(struct request * req)915 static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
916 {
917 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
918 struct apple_nvme_queue *q = iod->q;
919 struct apple_nvme *anv = queue_to_apple_nvme(q);
920 unsigned long flags;
921 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
922
923 if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
924 /*
925 * From rdma.c:
926 * If we are resetting, connecting or deleting we should
927 * complete immediately because we may block controller
928 * teardown or setup sequence
929 * - ctrl disable/shutdown fabrics requests
930 * - connect requests
931 * - initialization admin requests
932 * - I/O requests that entered after unquiescing and
933 * the controller stopped responding
934 *
935 * All other requests should be cancelled by the error
936 * recovery work, so it's fine that we fail it here.
937 */
938 dev_warn(anv->dev,
939 "I/O %d(aq:%d) timeout while not in live state\n",
940 req->tag, q->is_adminq);
941 if (blk_mq_request_started(req) &&
942 !blk_mq_request_completed(req)) {
943 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
944 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
945 blk_mq_complete_request(req);
946 }
947 return BLK_EH_DONE;
948 }
949
950 /* check if we just missed an interrupt if we're still alive */
951 if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
952 spin_lock_irqsave(&anv->lock, flags);
953 apple_nvme_handle_cq(q, false);
954 spin_unlock_irqrestore(&anv->lock, flags);
955 if (blk_mq_request_completed(req)) {
956 dev_warn(anv->dev,
957 "I/O %d(aq:%d) timeout: completion polled\n",
958 req->tag, q->is_adminq);
959 return BLK_EH_DONE;
960 }
961 }
962
963 /*
964 * aborting commands isn't supported which leaves a full reset as our
965 * only option here
966 */
967 dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
968 req->tag, q->is_adminq);
969 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
970 apple_nvme_disable(anv, false);
971 nvme_reset_ctrl(&anv->ctrl);
972 return BLK_EH_DONE;
973 }
974
apple_nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)975 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
976 struct io_comp_batch *iob)
977 {
978 struct apple_nvme_queue *q = hctx->driver_data;
979 struct apple_nvme *anv = queue_to_apple_nvme(q);
980 bool found;
981 unsigned long flags;
982
983 spin_lock_irqsave(&anv->lock, flags);
984 found = apple_nvme_poll_cq(q, iob);
985 spin_unlock_irqrestore(&anv->lock, flags);
986
987 return found;
988 }
989
990 static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
991 .queue_rq = apple_nvme_queue_rq,
992 .complete = apple_nvme_complete_rq,
993 .init_hctx = apple_nvme_init_hctx,
994 .init_request = apple_nvme_init_request,
995 .timeout = apple_nvme_timeout,
996 };
997
998 static const struct blk_mq_ops apple_nvme_mq_ops = {
999 .queue_rq = apple_nvme_queue_rq,
1000 .complete = apple_nvme_complete_rq,
1001 .init_hctx = apple_nvme_init_hctx,
1002 .init_request = apple_nvme_init_request,
1003 .timeout = apple_nvme_timeout,
1004 .poll = apple_nvme_poll,
1005 };
1006
apple_nvme_init_queue(struct apple_nvme_queue * q)1007 static void apple_nvme_init_queue(struct apple_nvme_queue *q)
1008 {
1009 unsigned int depth = apple_nvme_queue_depth(q);
1010 struct apple_nvme *anv = queue_to_apple_nvme(q);
1011
1012 q->cq_head = 0;
1013 q->cq_phase = 1;
1014 if (anv->hw->has_lsq_nvmmu)
1015 memset(q->tcbs, 0, anv->hw->max_queue_depth
1016 * sizeof(struct apple_nvmmu_tcb));
1017 memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
1018 WRITE_ONCE(q->enabled, true);
1019 wmb(); /* ensure the first interrupt sees the initialization */
1020 }
1021
apple_nvme_reset_work(struct work_struct * work)1022 static void apple_nvme_reset_work(struct work_struct *work)
1023 {
1024 unsigned int nr_io_queues = 1;
1025 int ret;
1026 u32 boot_status, aqa;
1027 struct apple_nvme *anv =
1028 container_of(work, struct apple_nvme, ctrl.reset_work);
1029 enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
1030
1031 if (state != NVME_CTRL_RESETTING) {
1032 dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
1033 ret = -ENODEV;
1034 goto out;
1035 }
1036
1037 /* there's unfortunately no known way to recover if RTKit crashed :( */
1038 if (apple_rtkit_is_crashed(anv->rtk)) {
1039 dev_err(anv->dev,
1040 "RTKit has crashed without any way to recover.");
1041 ret = -EIO;
1042 goto out;
1043 }
1044
1045 /* RTKit must be shut down cleanly for the (soft)-reset to work */
1046 if (apple_rtkit_is_running(anv->rtk)) {
1047 /* reset the controller if it is enabled */
1048 if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
1049 apple_nvme_disable(anv, false);
1050 dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
1051 ret = apple_rtkit_shutdown(anv->rtk);
1052 if (ret)
1053 goto out;
1054
1055 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1056 }
1057
1058 /*
1059 * Only do the soft-reset if the CPU is not running, which means either we
1060 * or the previous stage shut it down cleanly.
1061 */
1062 if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
1063 APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
1064
1065 ret = reset_control_assert(anv->reset);
1066 if (ret)
1067 goto out;
1068
1069 ret = apple_rtkit_reinit(anv->rtk);
1070 if (ret)
1071 goto out;
1072
1073 ret = reset_control_deassert(anv->reset);
1074 if (ret)
1075 goto out;
1076
1077 writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
1078 anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1079
1080 ret = apple_rtkit_boot(anv->rtk);
1081 } else {
1082 ret = apple_rtkit_wake(anv->rtk);
1083 }
1084
1085 if (ret) {
1086 dev_err(anv->dev, "ANS did not boot");
1087 goto out;
1088 }
1089
1090 ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
1091 boot_status,
1092 boot_status == APPLE_ANS_BOOT_STATUS_OK,
1093 USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
1094 if (ret) {
1095 dev_err(anv->dev, "ANS did not initialize");
1096 goto out;
1097 }
1098
1099 dev_dbg(anv->dev, "ANS booted successfully.");
1100
1101 /*
1102 * Limit the max command size to prevent iod->sg allocations going
1103 * over a single page.
1104 */
1105 anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
1106 dma_max_mapping_size(anv->dev) >> 9);
1107 anv->ctrl.max_segments = NVME_MAX_SEGS;
1108
1109 dma_set_max_seg_size(anv->dev, 0xffffffff);
1110
1111 if (anv->hw->has_lsq_nvmmu) {
1112 /*
1113 * Enable NVMMU and linear submission queues which is required
1114 * since T6000.
1115 */
1116 writel(APPLE_ANS_LINEAR_SQ_EN,
1117 anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
1118
1119 /* Allow as many pending command as possible for both queues */
1120 writel(anv->hw->max_queue_depth
1121 | (anv->hw->max_queue_depth << 16), anv->mmio_nvme
1122 + APPLE_ANS_MAX_PEND_CMDS_CTRL);
1123
1124 /* Setup the NVMMU for the maximum admin and IO queue depth */
1125 writel(anv->hw->max_queue_depth - 1,
1126 anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
1127
1128 /*
1129 * This is probably a chicken bit: without it all commands
1130 * where any PRP is set to zero (including those that don't use
1131 * that field) fail and the co-processor complains about
1132 * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the
1133 * syslog
1134 */
1135 writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
1136 ~APPLE_ANS_PRP_NULL_CHECK,
1137 anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
1138 }
1139
1140 /* Setup the admin queue */
1141 if (anv->hw->has_lsq_nvmmu)
1142 aqa = APPLE_NVME_AQ_DEPTH - 1;
1143 else
1144 aqa = anv->hw->max_queue_depth - 1;
1145 aqa |= aqa << 16;
1146 writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
1147 writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
1148 writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
1149
1150 if (anv->hw->has_lsq_nvmmu) {
1151 /* Setup NVMMU for both queues */
1152 writeq(anv->adminq.tcb_dma_addr,
1153 anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
1154 writeq(anv->ioq.tcb_dma_addr,
1155 anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
1156 }
1157
1158 anv->ctrl.sqsize =
1159 anv->hw->max_queue_depth - 1; /* 0's based queue depth */
1160 anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
1161
1162 dev_dbg(anv->dev, "Enabling controller now");
1163 ret = nvme_enable_ctrl(&anv->ctrl);
1164 if (ret)
1165 goto out;
1166
1167 dev_dbg(anv->dev, "Starting admin queue");
1168 apple_nvme_init_queue(&anv->adminq);
1169 nvme_unquiesce_admin_queue(&anv->ctrl);
1170
1171 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
1172 dev_warn(anv->ctrl.device,
1173 "failed to mark controller CONNECTING\n");
1174 ret = -ENODEV;
1175 goto out;
1176 }
1177
1178 ret = nvme_init_ctrl_finish(&anv->ctrl, false);
1179 if (ret)
1180 goto out;
1181
1182 dev_dbg(anv->dev, "Creating IOCQ");
1183 ret = apple_nvme_create_cq(anv);
1184 if (ret)
1185 goto out;
1186 dev_dbg(anv->dev, "Creating IOSQ");
1187 ret = apple_nvme_create_sq(anv);
1188 if (ret)
1189 goto out_remove_cq;
1190
1191 apple_nvme_init_queue(&anv->ioq);
1192 nr_io_queues = 1;
1193 ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
1194 if (ret)
1195 goto out_remove_sq;
1196 if (nr_io_queues != 1) {
1197 ret = -ENXIO;
1198 goto out_remove_sq;
1199 }
1200
1201 anv->ctrl.queue_count = nr_io_queues + 1;
1202
1203 nvme_unquiesce_io_queues(&anv->ctrl);
1204 nvme_wait_freeze(&anv->ctrl);
1205 blk_mq_update_nr_hw_queues(&anv->tagset, 1);
1206 nvme_unfreeze(&anv->ctrl);
1207
1208 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
1209 dev_warn(anv->ctrl.device,
1210 "failed to mark controller live state\n");
1211 ret = -ENODEV;
1212 goto out_remove_sq;
1213 }
1214
1215 nvme_start_ctrl(&anv->ctrl);
1216
1217 dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
1218 return;
1219
1220 out_remove_sq:
1221 apple_nvme_remove_sq(anv);
1222 out_remove_cq:
1223 apple_nvme_remove_cq(anv);
1224 out:
1225 dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
1226 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1227 nvme_get_ctrl(&anv->ctrl);
1228 apple_nvme_disable(anv, false);
1229 nvme_mark_namespaces_dead(&anv->ctrl);
1230 if (!queue_work(nvme_wq, &anv->remove_work))
1231 nvme_put_ctrl(&anv->ctrl);
1232 }
1233
apple_nvme_remove_dead_ctrl_work(struct work_struct * work)1234 static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
1235 {
1236 struct apple_nvme *anv =
1237 container_of(work, struct apple_nvme, remove_work);
1238
1239 nvme_put_ctrl(&anv->ctrl);
1240 device_release_driver(anv->dev);
1241 }
1242
apple_nvme_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)1243 static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1244 {
1245 *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1246 return 0;
1247 }
1248
apple_nvme_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)1249 static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1250 {
1251 writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1252 return 0;
1253 }
1254
apple_nvme_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)1255 static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1256 {
1257 *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1258 return 0;
1259 }
1260
apple_nvme_get_address(struct nvme_ctrl * ctrl,char * buf,int size)1261 static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
1262 {
1263 struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
1264
1265 return snprintf(buf, size, "%s\n", dev_name(dev));
1266 }
1267
apple_nvme_free_ctrl(struct nvme_ctrl * ctrl)1268 static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
1269 {
1270 struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
1271
1272 if (anv->ctrl.admin_q)
1273 blk_put_queue(anv->ctrl.admin_q);
1274 put_device(anv->dev);
1275 }
1276
1277 static const struct nvme_ctrl_ops nvme_ctrl_ops = {
1278 .name = "apple-nvme",
1279 .module = THIS_MODULE,
1280 .flags = 0,
1281 .reg_read32 = apple_nvme_reg_read32,
1282 .reg_write32 = apple_nvme_reg_write32,
1283 .reg_read64 = apple_nvme_reg_read64,
1284 .free_ctrl = apple_nvme_free_ctrl,
1285 .get_address = apple_nvme_get_address,
1286 };
1287
apple_nvme_async_probe(void * data,async_cookie_t cookie)1288 static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
1289 {
1290 struct apple_nvme *anv = data;
1291
1292 flush_work(&anv->ctrl.reset_work);
1293 flush_work(&anv->ctrl.scan_work);
1294 nvme_put_ctrl(&anv->ctrl);
1295 }
1296
devm_apple_nvme_put_tag_set(void * data)1297 static void devm_apple_nvme_put_tag_set(void *data)
1298 {
1299 blk_mq_free_tag_set(data);
1300 }
1301
apple_nvme_alloc_tagsets(struct apple_nvme * anv)1302 static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
1303 {
1304 int ret;
1305
1306 anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
1307 anv->admin_tagset.nr_hw_queues = 1;
1308 anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
1309 anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1310 anv->admin_tagset.numa_node = NUMA_NO_NODE;
1311 anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
1312 anv->admin_tagset.driver_data = &anv->adminq;
1313
1314 ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
1315 if (ret)
1316 return ret;
1317 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1318 &anv->admin_tagset);
1319 if (ret)
1320 return ret;
1321
1322 anv->tagset.ops = &apple_nvme_mq_ops;
1323 anv->tagset.nr_hw_queues = 1;
1324 anv->tagset.nr_maps = 1;
1325 /*
1326 * Tags are used as an index to the NVMMU and must be unique across
1327 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
1328 * must be marked as reserved in the IO queue.
1329 */
1330 if (anv->hw->has_lsq_nvmmu)
1331 anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
1332 anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
1333 anv->tagset.timeout = NVME_IO_TIMEOUT;
1334 anv->tagset.numa_node = NUMA_NO_NODE;
1335 anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
1336 anv->tagset.driver_data = &anv->ioq;
1337
1338 ret = blk_mq_alloc_tag_set(&anv->tagset);
1339 if (ret)
1340 return ret;
1341 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1342 &anv->tagset);
1343 if (ret)
1344 return ret;
1345
1346 anv->ctrl.admin_tagset = &anv->admin_tagset;
1347 anv->ctrl.tagset = &anv->tagset;
1348
1349 return 0;
1350 }
1351
apple_nvme_queue_alloc(struct apple_nvme * anv,struct apple_nvme_queue * q)1352 static int apple_nvme_queue_alloc(struct apple_nvme *anv,
1353 struct apple_nvme_queue *q)
1354 {
1355 unsigned int depth = apple_nvme_queue_depth(q);
1356 size_t iosq_size;
1357
1358 q->cqes = dmam_alloc_coherent(anv->dev,
1359 depth * sizeof(struct nvme_completion),
1360 &q->cq_dma_addr, GFP_KERNEL);
1361 if (!q->cqes)
1362 return -ENOMEM;
1363
1364 if (anv->hw->has_lsq_nvmmu)
1365 iosq_size = depth * sizeof(struct nvme_command);
1366 else
1367 iosq_size = depth << APPLE_NVME_IOSQES;
1368
1369 q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
1370 &q->sq_dma_addr, GFP_KERNEL);
1371 if (!q->sqes)
1372 return -ENOMEM;
1373
1374 if (anv->hw->has_lsq_nvmmu) {
1375 /*
1376 * We need the maximum queue depth here because the NVMMU only
1377 * has a single depth configuration shared between both queues.
1378 */
1379 q->tcbs = dmam_alloc_coherent(anv->dev,
1380 anv->hw->max_queue_depth *
1381 sizeof(struct apple_nvmmu_tcb),
1382 &q->tcb_dma_addr, GFP_KERNEL);
1383 if (!q->tcbs)
1384 return -ENOMEM;
1385 }
1386
1387 /*
1388 * initialize phase to make sure the allocated and empty memory
1389 * doesn't look like a full cq already.
1390 */
1391 q->cq_phase = 1;
1392 return 0;
1393 }
1394
apple_nvme_detach_genpd(struct apple_nvme * anv)1395 static void apple_nvme_detach_genpd(struct apple_nvme *anv)
1396 {
1397 int i;
1398
1399 if (anv->pd_count <= 1)
1400 return;
1401
1402 for (i = anv->pd_count - 1; i >= 0; i--) {
1403 if (anv->pd_link[i])
1404 device_link_del(anv->pd_link[i]);
1405 if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
1406 dev_pm_domain_detach(anv->pd_dev[i], true);
1407 }
1408 }
1409
apple_nvme_attach_genpd(struct apple_nvme * anv)1410 static int apple_nvme_attach_genpd(struct apple_nvme *anv)
1411 {
1412 struct device *dev = anv->dev;
1413 int i;
1414
1415 anv->pd_count = of_count_phandle_with_args(
1416 dev->of_node, "power-domains", "#power-domain-cells");
1417 if (anv->pd_count <= 1)
1418 return 0;
1419
1420 anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
1421 GFP_KERNEL);
1422 if (!anv->pd_dev)
1423 return -ENOMEM;
1424
1425 anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
1426 GFP_KERNEL);
1427 if (!anv->pd_link)
1428 return -ENOMEM;
1429
1430 for (i = 0; i < anv->pd_count; i++) {
1431 anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
1432 if (IS_ERR(anv->pd_dev[i])) {
1433 apple_nvme_detach_genpd(anv);
1434 return PTR_ERR(anv->pd_dev[i]);
1435 }
1436
1437 anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
1438 DL_FLAG_STATELESS |
1439 DL_FLAG_PM_RUNTIME |
1440 DL_FLAG_RPM_ACTIVE);
1441 if (!anv->pd_link[i]) {
1442 apple_nvme_detach_genpd(anv);
1443 return -EINVAL;
1444 }
1445 }
1446
1447 return 0;
1448 }
1449
devm_apple_nvme_mempool_destroy(void * data)1450 static void devm_apple_nvme_mempool_destroy(void *data)
1451 {
1452 mempool_destroy(data);
1453 }
1454
apple_nvme_alloc(struct platform_device * pdev)1455 static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
1456 {
1457 struct device *dev = &pdev->dev;
1458 struct apple_nvme *anv;
1459 int ret;
1460
1461 anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
1462 if (!anv)
1463 return ERR_PTR(-ENOMEM);
1464
1465 anv->dev = get_device(dev);
1466 anv->adminq.is_adminq = true;
1467 platform_set_drvdata(pdev, anv);
1468
1469 anv->hw = of_device_get_match_data(&pdev->dev);
1470 if (!anv->hw) {
1471 ret = -ENODEV;
1472 goto put_dev;
1473 }
1474
1475 ret = apple_nvme_attach_genpd(anv);
1476 if (ret < 0) {
1477 dev_err_probe(dev, ret, "Failed to attach power domains");
1478 goto put_dev;
1479 }
1480 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
1481 ret = -ENXIO;
1482 goto put_dev;
1483 }
1484
1485 anv->irq = platform_get_irq(pdev, 0);
1486 if (anv->irq < 0) {
1487 ret = anv->irq;
1488 goto put_dev;
1489 }
1490 if (!anv->irq) {
1491 ret = -ENXIO;
1492 goto put_dev;
1493 }
1494
1495 anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
1496 if (IS_ERR(anv->mmio_coproc)) {
1497 ret = PTR_ERR(anv->mmio_coproc);
1498 goto put_dev;
1499 }
1500 anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
1501 if (IS_ERR(anv->mmio_nvme)) {
1502 ret = PTR_ERR(anv->mmio_nvme);
1503 goto put_dev;
1504 }
1505
1506 if (anv->hw->has_lsq_nvmmu) {
1507 anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
1508 anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1509 anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
1510 anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1511 } else {
1512 anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS;
1513 anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1514 anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8;
1515 anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1516 }
1517
1518 anv->sart = devm_apple_sart_get(dev);
1519 if (IS_ERR(anv->sart)) {
1520 ret = dev_err_probe(dev, PTR_ERR(anv->sart),
1521 "Failed to initialize SART");
1522 goto put_dev;
1523 }
1524
1525 anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
1526 if (IS_ERR(anv->reset)) {
1527 ret = dev_err_probe(dev, PTR_ERR(anv->reset),
1528 "Failed to get reset control");
1529 goto put_dev;
1530 }
1531
1532 INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
1533 INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
1534 spin_lock_init(&anv->lock);
1535
1536 ret = apple_nvme_queue_alloc(anv, &anv->adminq);
1537 if (ret)
1538 goto put_dev;
1539 ret = apple_nvme_queue_alloc(anv, &anv->ioq);
1540 if (ret)
1541 goto put_dev;
1542
1543 anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
1544 NVME_CTRL_PAGE_SIZE,
1545 NVME_CTRL_PAGE_SIZE, 0);
1546 if (!anv->prp_page_pool) {
1547 ret = -ENOMEM;
1548 goto put_dev;
1549 }
1550
1551 anv->prp_small_pool =
1552 dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
1553 if (!anv->prp_small_pool) {
1554 ret = -ENOMEM;
1555 goto put_dev;
1556 }
1557
1558 WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
1559 anv->iod_mempool =
1560 mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1561 if (!anv->iod_mempool) {
1562 ret = -ENOMEM;
1563 goto put_dev;
1564 }
1565 ret = devm_add_action_or_reset(anv->dev,
1566 devm_apple_nvme_mempool_destroy, anv->iod_mempool);
1567 if (ret)
1568 goto put_dev;
1569
1570 ret = apple_nvme_alloc_tagsets(anv);
1571 if (ret)
1572 goto put_dev;
1573
1574 ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
1575 "nvme-apple", anv);
1576 if (ret) {
1577 dev_err_probe(dev, ret, "Failed to request IRQ");
1578 goto put_dev;
1579 }
1580
1581 anv->rtk =
1582 devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
1583 if (IS_ERR(anv->rtk)) {
1584 ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
1585 "Failed to initialize RTKit");
1586 goto put_dev;
1587 }
1588
1589 ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
1590 NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
1591 if (ret) {
1592 dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
1593 goto put_dev;
1594 }
1595
1596 return anv;
1597 put_dev:
1598 apple_nvme_detach_genpd(anv);
1599 put_device(anv->dev);
1600 return ERR_PTR(ret);
1601 }
1602
apple_nvme_probe(struct platform_device * pdev)1603 static int apple_nvme_probe(struct platform_device *pdev)
1604 {
1605 struct apple_nvme *anv;
1606 int ret;
1607
1608 anv = apple_nvme_alloc(pdev);
1609 if (IS_ERR(anv))
1610 return PTR_ERR(anv);
1611
1612 ret = nvme_add_ctrl(&anv->ctrl);
1613 if (ret)
1614 goto out_put_ctrl;
1615
1616 anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
1617 if (IS_ERR(anv->ctrl.admin_q)) {
1618 ret = -ENOMEM;
1619 anv->ctrl.admin_q = NULL;
1620 goto out_uninit_ctrl;
1621 }
1622
1623 nvme_reset_ctrl(&anv->ctrl);
1624 async_schedule(apple_nvme_async_probe, anv);
1625
1626 return 0;
1627
1628 out_uninit_ctrl:
1629 nvme_uninit_ctrl(&anv->ctrl);
1630 out_put_ctrl:
1631 nvme_put_ctrl(&anv->ctrl);
1632 apple_nvme_detach_genpd(anv);
1633 return ret;
1634 }
1635
apple_nvme_remove(struct platform_device * pdev)1636 static void apple_nvme_remove(struct platform_device *pdev)
1637 {
1638 struct apple_nvme *anv = platform_get_drvdata(pdev);
1639
1640 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1641 flush_work(&anv->ctrl.reset_work);
1642 nvme_stop_ctrl(&anv->ctrl);
1643 nvme_remove_namespaces(&anv->ctrl);
1644 apple_nvme_disable(anv, true);
1645 nvme_uninit_ctrl(&anv->ctrl);
1646
1647 if (apple_rtkit_is_running(anv->rtk)) {
1648 apple_rtkit_shutdown(anv->rtk);
1649
1650 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1651 }
1652
1653 apple_nvme_detach_genpd(anv);
1654 }
1655
apple_nvme_shutdown(struct platform_device * pdev)1656 static void apple_nvme_shutdown(struct platform_device *pdev)
1657 {
1658 struct apple_nvme *anv = platform_get_drvdata(pdev);
1659
1660 apple_nvme_disable(anv, true);
1661 if (apple_rtkit_is_running(anv->rtk)) {
1662 apple_rtkit_shutdown(anv->rtk);
1663
1664 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1665 }
1666 }
1667
apple_nvme_resume(struct device * dev)1668 static int apple_nvme_resume(struct device *dev)
1669 {
1670 struct apple_nvme *anv = dev_get_drvdata(dev);
1671
1672 return nvme_reset_ctrl(&anv->ctrl);
1673 }
1674
apple_nvme_suspend(struct device * dev)1675 static int apple_nvme_suspend(struct device *dev)
1676 {
1677 struct apple_nvme *anv = dev_get_drvdata(dev);
1678 int ret = 0;
1679
1680 apple_nvme_disable(anv, true);
1681
1682 if (apple_rtkit_is_running(anv->rtk)) {
1683 ret = apple_rtkit_shutdown(anv->rtk);
1684
1685 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1686 }
1687
1688 return ret;
1689 }
1690
1691 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
1692 apple_nvme_resume);
1693
1694 static const struct apple_nvme_hw apple_nvme_t8015_hw = {
1695 .has_lsq_nvmmu = false,
1696 .max_queue_depth = 16,
1697 };
1698
1699 static const struct apple_nvme_hw apple_nvme_t8103_hw = {
1700 .has_lsq_nvmmu = true,
1701 .max_queue_depth = 64,
1702 };
1703
1704 static const struct of_device_id apple_nvme_of_match[] = {
1705 { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
1706 { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
1707 {},
1708 };
1709 MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
1710
1711 static struct platform_driver apple_nvme_driver = {
1712 .driver = {
1713 .name = "nvme-apple",
1714 .of_match_table = apple_nvme_of_match,
1715 .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
1716 },
1717 .probe = apple_nvme_probe,
1718 .remove = apple_nvme_remove,
1719 .shutdown = apple_nvme_shutdown,
1720 };
1721 module_platform_driver(apple_nvme_driver);
1722
1723 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1724 MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
1725 MODULE_LICENSE("GPL");
1726