Lines Matching +full:mmc +full:- +full:card

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2006-2007 Pierre Ossman
12 #include <linux/dma-mapping.h>
13 #include <linux/backing-dev.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
21 #include "card.h"
29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy()
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy()
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy()
37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; in mmc_cqe_check_busy()
42 return host->caps2 & MMC_CAP2_CQE_DCMD; in mmc_cqe_can_dcmd()
63 struct mmc_host *host = mq->card->host; in mmc_issue_type()
65 if (mq->use_cqe && !host->hsq_enabled) in mmc_issue_type()
76 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier()
77 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier()
78 schedule_work(&mq->recovery_work); in __mmc_cqe_recovery_notifier()
87 struct request_queue *q = req->q; in mmc_cqe_recovery_notifier()
88 struct mmc_queue *mq = q->queuedata; in mmc_cqe_recovery_notifier()
91 spin_lock_irqsave(&mq->lock, flags); in mmc_cqe_recovery_notifier()
93 spin_unlock_irqrestore(&mq->lock, flags); in mmc_cqe_recovery_notifier()
99 struct mmc_request *mrq = &mqrq->brq.mrq; in mmc_cqe_timed_out()
100 struct mmc_queue *mq = req->q->queuedata; in mmc_cqe_timed_out()
101 struct mmc_host *host = mq->card->host; in mmc_cqe_timed_out()
108 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { in mmc_cqe_timed_out()
116 /* Timeout is handled by mmc core */ in mmc_cqe_timed_out()
124 struct request_queue *q = req->q; in mmc_mq_timed_out()
125 struct mmc_queue *mq = q->queuedata; in mmc_mq_timed_out()
126 struct mmc_card *card = mq->card; in mmc_mq_timed_out() local
127 struct mmc_host *host = card->host; in mmc_mq_timed_out()
131 spin_lock_irqsave(&mq->lock, flags); in mmc_mq_timed_out()
132 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled; in mmc_mq_timed_out()
133 spin_unlock_irqrestore(&mq->lock, flags); in mmc_mq_timed_out()
142 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler()
143 struct mmc_host *host = mq->card->host; in mmc_mq_recovery_handler()
145 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
147 mq->in_recovery = true; in mmc_mq_recovery_handler()
149 if (mq->use_cqe && !host->hsq_enabled) in mmc_mq_recovery_handler()
154 mq->in_recovery = false; in mmc_mq_recovery_handler()
156 spin_lock_irq(&mq->lock); in mmc_mq_recovery_handler()
157 mq->recovery_needed = false; in mmc_mq_recovery_handler()
158 spin_unlock_irq(&mq->lock); in mmc_mq_recovery_handler()
160 if (host->hsq_enabled) in mmc_mq_recovery_handler()
161 host->cqe_ops->cqe_recovery_finish(host); in mmc_mq_recovery_handler()
163 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
180 struct mmc_card *card) in mmc_queue_setup_discard() argument
184 max_discard = mmc_calc_max_discard(card); in mmc_queue_setup_discard()
190 q->limits.discard_granularity = card->pref_erase << 9; in mmc_queue_setup_discard()
192 if (card->pref_erase > max_discard) in mmc_queue_setup_discard()
193 q->limits.discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard()
194 if (mmc_can_secure_erase_trim(card)) in mmc_queue_setup_discard()
200 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : in mmc_get_max_segments()
201 host->max_segs; in mmc_get_max_segments()
205 * mmc_init_request() - initialize the MMC-specific per-request data
214 struct mmc_card *card = mq->card; in __mmc_init_request() local
215 struct mmc_host *host = card->host; in __mmc_init_request()
217 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp); in __mmc_init_request()
218 if (!mq_rq->sg) in __mmc_init_request()
219 return -ENOMEM; in __mmc_init_request()
228 kfree(mq_rq->sg); in mmc_exit_request()
229 mq_rq->sg = NULL; in mmc_exit_request()
235 return __mmc_init_request(set->driver_data, req, GFP_KERNEL); in mmc_mq_init_request()
241 struct mmc_queue *mq = set->driver_data; in mmc_mq_exit_request()
243 mmc_exit_request(mq->queue, req); in mmc_mq_exit_request()
249 struct request *req = bd->rq; in mmc_mq_queue_rq()
250 struct request_queue *q = req->q; in mmc_mq_queue_rq()
251 struct mmc_queue *mq = q->queuedata; in mmc_mq_queue_rq()
252 struct mmc_card *card = mq->card; in mmc_mq_queue_rq() local
253 struct mmc_host *host = card->host; in mmc_mq_queue_rq()
259 if (mmc_card_removed(mq->card)) { in mmc_mq_queue_rq()
260 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
266 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
268 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq()
269 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
276 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; in mmc_mq_queue_rq()
277 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
283 * For MMC host software queue, we only allow 2 requests in in mmc_mq_queue_rq()
286 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { in mmc_mq_queue_rq()
287 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
293 * Timeouts are handled by mmc core, and we don't have a host in mmc_mq_queue_rq()
299 req->timeout = 600 * HZ; in mmc_mq_queue_rq()
304 mq->busy = true; in mmc_mq_queue_rq()
306 mq->in_flight[issue_type] += 1; in mmc_mq_queue_rq()
310 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
312 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
313 req_to_mmc_queue_req(req)->retries = 0; in mmc_mq_queue_rq()
314 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
318 mmc_get_card(card, &mq->ctx); in mmc_mq_queue_rq()
320 if (mq->use_cqe) { in mmc_mq_queue_rq()
321 host->retune_now = host->need_retune && cqe_retune_ok && in mmc_mq_queue_rq()
322 !host->hold_retune; in mmc_mq_queue_rq()
344 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
345 mq->in_flight[issue_type] -= 1; in mmc_mq_queue_rq()
348 mq->busy = false; in mmc_mq_queue_rq()
349 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
351 mmc_put_card(card, &mq->ctx); in mmc_mq_queue_rq()
353 WRITE_ONCE(mq->busy, false); in mmc_mq_queue_rq()
367 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_setup_queue() argument
369 struct mmc_host *host = card->host; in mmc_setup_queue()
372 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); in mmc_setup_queue()
373 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_setup_queue()
374 if (mmc_can_erase(card)) in mmc_setup_queue()
375 mmc_queue_setup_discard(mq->queue, card); in mmc_setup_queue()
377 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) in mmc_setup_queue()
378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); in mmc_setup_queue()
379 blk_queue_max_hw_sectors(mq->queue, in mmc_setup_queue()
380 min(host->max_blk_count, host->max_req_size / 512)); in mmc_setup_queue()
381 if (host->can_dma_map_merge) in mmc_setup_queue()
382 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, in mmc_setup_queue()
385 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); in mmc_setup_queue()
387 if (mmc_card_mmc(card)) in mmc_setup_queue()
388 block_size = card->ext_csd.data_sector_size; in mmc_setup_queue()
390 blk_queue_logical_block_size(mq->queue, block_size); in mmc_setup_queue()
393 * since it calls blk_queue_virt_boundary(), the mmc should not call in mmc_setup_queue()
396 if (!host->can_dma_map_merge) in mmc_setup_queue()
397 blk_queue_max_segment_size(mq->queue, in mmc_setup_queue()
398 round_down(host->max_seg_size, block_size)); in mmc_setup_queue()
400 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); in mmc_setup_queue()
402 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); in mmc_setup_queue()
403 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); in mmc_setup_queue()
405 mutex_init(&mq->complete_lock); in mmc_setup_queue()
407 init_waitqueue_head(&mq->wait); in mmc_setup_queue()
412 return host->caps2 & MMC_CAP2_MERGE_CAPABLE; in mmc_merge_capable()
415 /* Set queue depth to get a reasonable value for q->nr_requests */
419 * mmc_init_queue - initialise a queue structure.
420 * @mq: mmc queue
421 * @card: mmc card to attach this queue
423 * Initialise a MMC card request queue.
425 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_init_queue() argument
427 struct mmc_host *host = card->host; in mmc_init_queue()
430 mq->card = card; in mmc_init_queue()
431 mq->use_cqe = host->cqe_enabled; in mmc_init_queue()
433 spin_lock_init(&mq->lock); in mmc_init_queue()
435 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
436 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
441 if (mq->use_cqe && !host->hsq_enabled) in mmc_init_queue()
442 mq->tag_set.queue_depth = in mmc_init_queue()
443 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); in mmc_init_queue()
445 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
446 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
447 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
448 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
449 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
450 mq->tag_set.driver_data = mq; in mmc_init_queue()
454 * the host->can_dma_map_merge should be set before to get max_segs in mmc_init_queue()
458 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && in mmc_init_queue()
460 host->can_dma_map_merge = 1; in mmc_init_queue()
462 host->can_dma_map_merge = 0; in mmc_init_queue()
464 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
468 mq->queue = blk_mq_init_queue(&mq->tag_set); in mmc_init_queue()
469 if (IS_ERR(mq->queue)) { in mmc_init_queue()
470 ret = PTR_ERR(mq->queue); in mmc_init_queue()
474 if (mmc_host_is_spi(host) && host->use_spi_crc) in mmc_init_queue()
475 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); in mmc_init_queue()
477 mq->queue->queuedata = mq; in mmc_init_queue()
478 blk_queue_rq_timeout(mq->queue, 60 * HZ); in mmc_init_queue()
480 mmc_setup_queue(mq, card); in mmc_init_queue()
484 blk_mq_free_tag_set(&mq->tag_set); in mmc_init_queue()
490 blk_mq_quiesce_queue(mq->queue); in mmc_queue_suspend()
496 mmc_claim_host(mq->card->host); in mmc_queue_suspend()
497 mmc_release_host(mq->card->host); in mmc_queue_suspend()
502 blk_mq_unquiesce_queue(mq->queue); in mmc_queue_resume()
507 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
517 blk_mq_free_tag_set(&mq->tag_set); in mmc_cleanup_queue()
524 flush_work(&mq->complete_work); in mmc_cleanup_queue()
526 mq->card = NULL; in mmc_cleanup_queue()
536 return blk_rq_map_sg(mq->queue, req, mqrq->sg); in mmc_queue_map_sg()