Lines Matching +full:mmc +full:- +full:card

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2006-2007 Pierre Ossman
11 #include <linux/dma-mapping.h>
12 #include <linux/backing-dev.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/host.h>
20 #include "card.h"
29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy()
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy()
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy()
40 return host->caps2 & MMC_CAP2_CQE_DCMD; in mmc_cqe_can_dcmd()
62 struct mmc_host *host = mq->card->host; in mmc_issue_type()
64 if (host->cqe_enabled && !host->hsq_enabled) in mmc_issue_type()
75 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier()
76 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier()
77 schedule_work(&mq->recovery_work); in __mmc_cqe_recovery_notifier()
86 struct request_queue *q = req->q; in mmc_cqe_recovery_notifier()
87 struct mmc_queue *mq = q->queuedata; in mmc_cqe_recovery_notifier()
90 spin_lock_irqsave(&mq->lock, flags); in mmc_cqe_recovery_notifier()
92 spin_unlock_irqrestore(&mq->lock, flags); in mmc_cqe_recovery_notifier()
98 struct mmc_request *mrq = &mqrq->brq.mrq; in mmc_cqe_timed_out()
99 struct mmc_queue *mq = req->q->queuedata; in mmc_cqe_timed_out()
100 struct mmc_host *host = mq->card->host; in mmc_cqe_timed_out()
107 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { in mmc_cqe_timed_out()
115 /* Timeout is handled by mmc core */ in mmc_cqe_timed_out()
122 struct request_queue *q = req->q; in mmc_mq_timed_out()
123 struct mmc_queue *mq = q->queuedata; in mmc_mq_timed_out()
124 struct mmc_card *card = mq->card; in mmc_mq_timed_out() local
125 struct mmc_host *host = card->host; in mmc_mq_timed_out()
129 spin_lock_irqsave(&mq->lock, flags); in mmc_mq_timed_out()
130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; in mmc_mq_timed_out()
131 spin_unlock_irqrestore(&mq->lock, flags); in mmc_mq_timed_out()
140 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler()
141 struct mmc_host *host = mq->card->host; in mmc_mq_recovery_handler()
143 mmc_get_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
145 mq->in_recovery = true; in mmc_mq_recovery_handler()
147 if (host->cqe_enabled && !host->hsq_enabled) in mmc_mq_recovery_handler()
152 mq->in_recovery = false; in mmc_mq_recovery_handler()
154 spin_lock_irq(&mq->lock); in mmc_mq_recovery_handler()
155 mq->recovery_needed = false; in mmc_mq_recovery_handler()
156 spin_unlock_irq(&mq->lock); in mmc_mq_recovery_handler()
158 if (host->hsq_enabled) in mmc_mq_recovery_handler()
159 host->cqe_ops->cqe_recovery_finish(host); in mmc_mq_recovery_handler()
161 mmc_put_card(mq->card, &mq->ctx); in mmc_mq_recovery_handler()
178 struct mmc_card *card) in mmc_queue_setup_discard() argument
182 max_discard = mmc_calc_max_discard(card); in mmc_queue_setup_discard()
187 q->limits.discard_granularity = card->pref_erase << 9; in mmc_queue_setup_discard()
189 if (card->pref_erase > max_discard) in mmc_queue_setup_discard()
190 q->limits.discard_granularity = SECTOR_SIZE; in mmc_queue_setup_discard()
191 if (mmc_can_secure_erase_trim(card)) in mmc_queue_setup_discard()
193 if (mmc_can_trim(card) && card->erased_byte == 0) in mmc_queue_setup_discard()
199 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : in mmc_get_max_segments()
200 host->max_segs; in mmc_get_max_segments()
207 struct mmc_queue *mq = set->driver_data; in mmc_mq_init_request()
208 struct mmc_card *card = mq->card; in mmc_mq_init_request() local
209 struct mmc_host *host = card->host; in mmc_mq_init_request()
211 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL); in mmc_mq_init_request()
212 if (!mq_rq->sg) in mmc_mq_init_request()
213 return -ENOMEM; in mmc_mq_init_request()
223 kfree(mq_rq->sg); in mmc_mq_exit_request()
224 mq_rq->sg = NULL; in mmc_mq_exit_request()
230 struct request *req = bd->rq; in mmc_mq_queue_rq()
231 struct request_queue *q = req->q; in mmc_mq_queue_rq()
232 struct mmc_queue *mq = q->queuedata; in mmc_mq_queue_rq()
233 struct mmc_card *card = mq->card; in mmc_mq_queue_rq() local
234 struct mmc_host *host = card->host; in mmc_mq_queue_rq()
240 if (mmc_card_removed(mq->card)) { in mmc_mq_queue_rq()
241 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
247 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
249 if (mq->recovery_needed || mq->busy) { in mmc_mq_queue_rq()
250 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
257 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; in mmc_mq_queue_rq()
258 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
263 if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) { in mmc_mq_queue_rq()
264 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
270 * Timeouts are handled by mmc core, and we don't have a host in mmc_mq_queue_rq()
276 req->timeout = 600 * HZ; in mmc_mq_queue_rq()
281 mq->busy = true; in mmc_mq_queue_rq()
283 mq->in_flight[issue_type] += 1; in mmc_mq_queue_rq()
287 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
289 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
290 req_to_mmc_queue_req(req)->retries = 0; in mmc_mq_queue_rq()
291 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
295 mmc_get_card(card, &mq->ctx); in mmc_mq_queue_rq()
297 if (host->cqe_enabled) { in mmc_mq_queue_rq()
298 host->retune_now = host->need_retune && cqe_retune_ok && in mmc_mq_queue_rq()
299 !host->hold_retune; in mmc_mq_queue_rq()
321 spin_lock_irq(&mq->lock); in mmc_mq_queue_rq()
322 mq->in_flight[issue_type] -= 1; in mmc_mq_queue_rq()
325 mq->busy = false; in mmc_mq_queue_rq()
326 spin_unlock_irq(&mq->lock); in mmc_mq_queue_rq()
328 mmc_put_card(card, &mq->ctx); in mmc_mq_queue_rq()
330 WRITE_ONCE(mq->busy, false); in mmc_mq_queue_rq()
344 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_setup_queue() argument
346 struct mmc_host *host = card->host; in mmc_setup_queue()
349 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); in mmc_setup_queue()
350 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_setup_queue()
351 if (mmc_can_erase(card)) in mmc_setup_queue()
352 mmc_queue_setup_discard(mq->queue, card); in mmc_setup_queue()
354 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) in mmc_setup_queue()
355 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); in mmc_setup_queue()
356 blk_queue_max_hw_sectors(mq->queue, in mmc_setup_queue()
357 min(host->max_blk_count, host->max_req_size / 512)); in mmc_setup_queue()
358 if (host->can_dma_map_merge) in mmc_setup_queue()
359 WARN(!blk_queue_can_use_dma_map_merging(mq->queue, in mmc_setup_queue()
362 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); in mmc_setup_queue()
364 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { in mmc_setup_queue()
365 block_size = card->ext_csd.data_sector_size; in mmc_setup_queue()
369 blk_queue_logical_block_size(mq->queue, block_size); in mmc_setup_queue()
372 * since it calls blk_queue_virt_boundary(), the mmc should not call in mmc_setup_queue()
375 if (!host->can_dma_map_merge) in mmc_setup_queue()
376 blk_queue_max_segment_size(mq->queue, in mmc_setup_queue()
377 round_down(host->max_seg_size, block_size)); in mmc_setup_queue()
379 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); in mmc_setup_queue()
381 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); in mmc_setup_queue()
382 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); in mmc_setup_queue()
384 mutex_init(&mq->complete_lock); in mmc_setup_queue()
386 init_waitqueue_head(&mq->wait); in mmc_setup_queue()
388 mmc_crypto_setup_queue(mq->queue, host); in mmc_setup_queue()
393 return host->caps2 & MMC_CAP2_MERGE_CAPABLE; in mmc_merge_capable()
396 /* Set queue depth to get a reasonable value for q->nr_requests */
400 * mmc_init_queue - initialise a queue structure.
401 * @mq: mmc queue
402 * @card: mmc card to attach this queue
404 * Initialise a MMC card request queue.
406 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) in mmc_init_queue() argument
408 struct mmc_host *host = card->host; in mmc_init_queue()
412 mq->card = card; in mmc_init_queue()
414 spin_lock_init(&mq->lock); in mmc_init_queue()
416 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
417 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
422 if (host->cqe_enabled && !host->hsq_enabled) in mmc_init_queue()
423 mq->tag_set.queue_depth = in mmc_init_queue()
424 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); in mmc_init_queue()
426 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
427 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
428 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in mmc_init_queue()
429 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
430 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
431 mq->tag_set.driver_data = mq; in mmc_init_queue()
435 * the host->can_dma_map_merge should be set before to get max_segs in mmc_init_queue()
439 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && in mmc_init_queue()
441 host->can_dma_map_merge = 1; in mmc_init_queue()
443 host->can_dma_map_merge = 0; in mmc_init_queue()
445 ret = blk_mq_alloc_tag_set(&mq->tag_set); in mmc_init_queue()
450 disk = blk_mq_alloc_disk(&mq->tag_set, mq); in mmc_init_queue()
452 blk_mq_free_tag_set(&mq->tag_set); in mmc_init_queue()
455 mq->queue = disk->queue; in mmc_init_queue()
457 if (mmc_host_is_spi(host) && host->use_spi_crc) in mmc_init_queue()
458 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); in mmc_init_queue()
459 blk_queue_rq_timeout(mq->queue, 60 * HZ); in mmc_init_queue()
461 mmc_setup_queue(mq, card); in mmc_init_queue()
467 blk_mq_quiesce_queue(mq->queue); in mmc_queue_suspend()
473 mmc_claim_host(mq->card->host); in mmc_queue_suspend()
474 mmc_release_host(mq->card->host); in mmc_queue_suspend()
479 blk_mq_unquiesce_queue(mq->queue); in mmc_queue_resume()
484 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
495 * the queue, and the card has been removed, we could end up here with in mmc_cleanup_queue()
498 cancel_work_sync(&mq->recovery_work); in mmc_cleanup_queue()
500 blk_mq_free_tag_set(&mq->tag_set); in mmc_cleanup_queue()
507 flush_work(&mq->complete_work); in mmc_cleanup_queue()
509 mq->card = NULL; in mmc_cleanup_queue()
519 return blk_rq_map_sg(mq->queue, req, mqrq->sg); in mmc_queue_map_sg()