Lines Matching +full:mmc +full:- +full:card
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mmc/core/core.c
5 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
7 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
23 #include <linux/fault-inject.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/mmc/slot-gpio.h>
35 #include <trace/events/mmc.h>
38 #include "card.h"
49 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
84 struct mmc_command *cmd = mrq->cmd; in mmc_should_fail_request()
85 struct mmc_data *data = mrq->data; in mmc_should_fail_request()
87 -ETIMEDOUT, in mmc_should_fail_request()
88 -EILSEQ, in mmc_should_fail_request()
89 -EIO, in mmc_should_fail_request()
95 if ((cmd && cmd->error) || data->error || in mmc_should_fail_request()
96 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) in mmc_should_fail_request()
99 data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))]; in mmc_should_fail_request()
100 data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9; in mmc_should_fail_request()
114 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion)) in mmc_complete_cmd()
115 complete_all(&mrq->cmd_completion); in mmc_complete_cmd()
120 if (!mrq->cap_cmd_during_tfr) in mmc_command_done()
126 mmc_hostname(host), mrq->cmd->opcode); in mmc_command_done()
131 * mmc_request_done - finish processing an MMC request
132 * @host: MMC host which completed request
133 * @mrq: MMC request which request
135 * MMC drivers should call this function when they have completed
140 struct mmc_command *cmd = mrq->cmd; in mmc_request_done()
141 int err = cmd->error; in mmc_request_done()
143 /* Flag re-tuning needed on CRC errors */ in mmc_request_done()
144 if (!mmc_op_tuning(cmd->opcode) && in mmc_request_done()
145 !host->retune_crc_disable && in mmc_request_done()
146 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || in mmc_request_done()
147 (mrq->data && mrq->data->error == -EILSEQ) || in mmc_request_done()
148 (mrq->stop && mrq->stop->error == -EILSEQ))) in mmc_request_done()
151 if (err && cmd->retries && mmc_host_is_spi(host)) { in mmc_request_done()
152 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) in mmc_request_done()
153 cmd->retries = 0; in mmc_request_done()
156 if (host->ongoing_mrq == mrq) in mmc_request_done()
157 host->ongoing_mrq = NULL; in mmc_request_done()
167 * - There was no error, OK fine then in mmc_request_done()
168 * - We are not doing some kind of retry in mmc_request_done()
169 * - The card was removed (...so just complete everything no matter in mmc_request_done()
172 if (!err || !cmd->retries || mmc_card_removed(host->card)) { in mmc_request_done()
175 if (!host->ongoing_mrq) in mmc_request_done()
176 led_trigger_event(host->led, LED_OFF); in mmc_request_done()
178 if (mrq->sbc) { in mmc_request_done()
180 mmc_hostname(host), mrq->sbc->opcode, in mmc_request_done()
181 mrq->sbc->error, in mmc_request_done()
182 mrq->sbc->resp[0], mrq->sbc->resp[1], in mmc_request_done()
183 mrq->sbc->resp[2], mrq->sbc->resp[3]); in mmc_request_done()
187 mmc_hostname(host), cmd->opcode, err, in mmc_request_done()
188 cmd->resp[0], cmd->resp[1], in mmc_request_done()
189 cmd->resp[2], cmd->resp[3]); in mmc_request_done()
191 if (mrq->data) { in mmc_request_done()
194 mrq->data->bytes_xfered, mrq->data->error); in mmc_request_done()
197 if (mrq->stop) { in mmc_request_done()
199 mmc_hostname(host), mrq->stop->opcode, in mmc_request_done()
200 mrq->stop->error, in mmc_request_done()
201 mrq->stop->resp[0], mrq->stop->resp[1], in mmc_request_done()
202 mrq->stop->resp[2], mrq->stop->resp[3]); in mmc_request_done()
206 * Request starter must handle retries - see in mmc_request_done()
209 if (mrq->done) in mmc_request_done()
210 mrq->done(mrq); in mmc_request_done()
222 mrq->cmd->error = err; in __mmc_start_request()
228 * For sdio rw commands we must wait for card busy otherwise some in __mmc_start_request()
232 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) && in __mmc_start_request()
233 host->ops->card_busy) { in __mmc_start_request()
236 while (host->ops->card_busy(host) && --tries) in __mmc_start_request()
240 mrq->cmd->error = -EBUSY; in __mmc_start_request()
246 if (mrq->cap_cmd_during_tfr) { in __mmc_start_request()
247 host->ongoing_mrq = mrq; in __mmc_start_request()
252 reinit_completion(&mrq->cmd_completion); in __mmc_start_request()
257 if (host->cqe_on) in __mmc_start_request()
258 host->cqe_ops->cqe_off(host); in __mmc_start_request()
260 host->ops->request(host, mrq); in __mmc_start_request()
266 if (mrq->sbc) { in mmc_mrq_pr_debug()
268 mmc_hostname(host), mrq->sbc->opcode, in mmc_mrq_pr_debug()
269 mrq->sbc->arg, mrq->sbc->flags); in mmc_mrq_pr_debug()
272 if (mrq->cmd) { in mmc_mrq_pr_debug()
275 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); in mmc_mrq_pr_debug()
278 mmc_hostname(host), mrq->tag, mrq->data->blk_addr); in mmc_mrq_pr_debug()
281 if (mrq->data) { in mmc_mrq_pr_debug()
284 mmc_hostname(host), mrq->data->blksz, in mmc_mrq_pr_debug()
285 mrq->data->blocks, mrq->data->flags, in mmc_mrq_pr_debug()
286 mrq->data->timeout_ns / 1000000, in mmc_mrq_pr_debug()
287 mrq->data->timeout_clks); in mmc_mrq_pr_debug()
290 if (mrq->stop) { in mmc_mrq_pr_debug()
292 mmc_hostname(host), mrq->stop->opcode, in mmc_mrq_pr_debug()
293 mrq->stop->arg, mrq->stop->flags); in mmc_mrq_pr_debug()
302 if (mrq->cmd) { in mmc_mrq_prep()
303 mrq->cmd->error = 0; in mmc_mrq_prep()
304 mrq->cmd->mrq = mrq; in mmc_mrq_prep()
305 mrq->cmd->data = mrq->data; in mmc_mrq_prep()
307 if (mrq->sbc) { in mmc_mrq_prep()
308 mrq->sbc->error = 0; in mmc_mrq_prep()
309 mrq->sbc->mrq = mrq; in mmc_mrq_prep()
311 if (mrq->data) { in mmc_mrq_prep()
312 if (mrq->data->blksz > host->max_blk_size || in mmc_mrq_prep()
313 mrq->data->blocks > host->max_blk_count || in mmc_mrq_prep()
314 mrq->data->blocks * mrq->data->blksz > host->max_req_size) in mmc_mrq_prep()
315 return -EINVAL; in mmc_mrq_prep()
317 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) in mmc_mrq_prep()
318 sz += sg->length; in mmc_mrq_prep()
319 if (sz != mrq->data->blocks * mrq->data->blksz) in mmc_mrq_prep()
320 return -EINVAL; in mmc_mrq_prep()
322 mrq->data->error = 0; in mmc_mrq_prep()
323 mrq->data->mrq = mrq; in mmc_mrq_prep()
324 if (mrq->stop) { in mmc_mrq_prep()
325 mrq->data->stop = mrq->stop; in mmc_mrq_prep()
326 mrq->stop->error = 0; in mmc_mrq_prep()
327 mrq->stop->mrq = mrq; in mmc_mrq_prep()
338 if (mrq->cmd->has_ext_addr) in mmc_start_request()
339 mmc_send_ext_addr(host, mrq->cmd->ext_addr); in mmc_start_request()
341 init_completion(&mrq->cmd_completion); in mmc_start_request()
345 if (mmc_card_removed(host->card)) in mmc_start_request()
346 return -ENOMEDIUM; in mmc_start_request()
350 WARN_ON(!host->claimed); in mmc_start_request()
356 if (host->uhs2_sd_tran) in mmc_start_request()
359 led_trigger_event(host->led, LED_FULL); in mmc_start_request()
368 complete(&mrq->completion); in mmc_wait_done()
373 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq); in mmc_wait_ongoing_tfr_cmd()
379 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion)) in mmc_wait_ongoing_tfr_cmd()
380 wait_for_completion(&ongoing_mrq->cmd_completion); in mmc_wait_ongoing_tfr_cmd()
389 init_completion(&mrq->completion); in __mmc_start_req()
390 mrq->done = mmc_wait_done; in __mmc_start_req()
394 mrq->cmd->error = err; in __mmc_start_req()
396 complete(&mrq->completion); in __mmc_start_req()
407 wait_for_completion(&mrq->completion); in mmc_wait_for_req_done()
409 cmd = mrq->cmd; in mmc_wait_for_req_done()
411 if (!cmd->error || !cmd->retries || in mmc_wait_for_req_done()
412 mmc_card_removed(host->card)) in mmc_wait_for_req_done()
418 mmc_hostname(host), cmd->opcode, cmd->error); in mmc_wait_for_req_done()
419 cmd->retries--; in mmc_wait_for_req_done()
420 cmd->error = 0; in mmc_wait_for_req_done()
429 * mmc_cqe_start_req - Start a CQE request.
430 * @host: MMC host to start the request
433 * Start the request, re-tuning if needed and it is possible. Returns an error
434 * code if the request fails to start or -EBUSY if CQE is busy.
441 * CQE cannot process re-tuning commands. Caller must hold retuning in mmc_cqe_start_req()
442 * while CQE is in use. Re-tuning can happen here only when CQE has no in mmc_cqe_start_req()
443 * active requests i.e. this is the first. Note, re-tuning will call in mmc_cqe_start_req()
444 * ->cqe_off(). in mmc_cqe_start_req()
450 mrq->host = host; in mmc_cqe_start_req()
458 if (host->uhs2_sd_tran) in mmc_cqe_start_req()
461 err = host->cqe_ops->cqe_request(host, mrq); in mmc_cqe_start_req()
470 if (mrq->cmd) { in mmc_cqe_start_req()
472 mmc_hostname(host), mrq->cmd->opcode, err); in mmc_cqe_start_req()
475 mmc_hostname(host), mrq->tag, err); in mmc_cqe_start_req()
482 * mmc_cqe_request_done - CQE has finished processing an MMC request
483 * @host: MMC host which completed request
484 * @mrq: MMC request which completed
493 /* Flag re-tuning needed on CRC errors */ in mmc_cqe_request_done()
494 if ((mrq->cmd && mrq->cmd->error == -EILSEQ) || in mmc_cqe_request_done()
495 (mrq->data && mrq->data->error == -EILSEQ)) in mmc_cqe_request_done()
500 if (mrq->cmd) { in mmc_cqe_request_done()
502 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error); in mmc_cqe_request_done()
505 mmc_hostname(host), mrq->tag); in mmc_cqe_request_done()
508 if (mrq->data) { in mmc_cqe_request_done()
511 mrq->data->bytes_xfered, mrq->data->error); in mmc_cqe_request_done()
514 mrq->done(mrq); in mmc_cqe_request_done()
519 * mmc_cqe_post_req - CQE post process of a completed MMC request
520 * @host: MMC host
521 * @mrq: MMC request to be processed
525 if (host->cqe_ops->cqe_post_req) in mmc_cqe_post_req()
526 host->cqe_ops->cqe_post_req(host, mrq); in mmc_cqe_post_req()
534 * mmc_cqe_recovery - Recover from CQE errors.
535 * @host: MMC host to recover
555 host->cqe_ops->cqe_recovery_start(host); in mmc_cqe_recovery()
563 mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO); in mmc_cqe_recovery()
572 host->cqe_ops->cqe_recovery_finish(host); in mmc_cqe_recovery()
584 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
585 * @host: MMC host
586 * @mrq: MMC request
589 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
597 return completion_done(&mrq->completion); in mmc_is_req_done()
602 * mmc_wait_for_req - start a request and wait for completion
603 * @host: MMC host to start command
604 * @mrq: MMC request to start
606 * Start a new MMC custom command request for a host, and wait
617 if (!mrq->cap_cmd_during_tfr) in mmc_wait_for_req()
623 * mmc_wait_for_cmd - start a command and wait for completion
624 * @host: MMC host to start command
625 * @cmd: MMC command to start
628 * Start a new MMC command for a host, and wait for the command
636 WARN_ON(!host->claimed); in mmc_wait_for_cmd()
638 memset(cmd->resp, 0, sizeof(cmd->resp)); in mmc_wait_for_cmd()
639 cmd->retries = retries; in mmc_wait_for_cmd()
642 cmd->data = NULL; in mmc_wait_for_cmd()
646 return cmd->error; in mmc_wait_for_cmd()
652 * mmc_set_data_timeout - set the timeout for a data command
654 * @card: the MMC card associated with the data transfer
657 * correct algorithm given the card type.
659 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) in mmc_set_data_timeout() argument
666 if (mmc_card_sdio(card)) { in mmc_set_data_timeout()
667 data->timeout_ns = 1000000000; in mmc_set_data_timeout()
668 data->timeout_clks = 0; in mmc_set_data_timeout()
675 mult = mmc_card_sd(card) ? 100 : 10; in mmc_set_data_timeout()
681 if (data->flags & MMC_DATA_WRITE) in mmc_set_data_timeout()
682 mult <<= card->csd.r2w_factor; in mmc_set_data_timeout()
684 data->timeout_ns = card->csd.taac_ns * mult; in mmc_set_data_timeout()
685 data->timeout_clks = card->csd.taac_clks * mult; in mmc_set_data_timeout()
690 if (mmc_card_sd(card)) { in mmc_set_data_timeout()
693 timeout_us = data->timeout_ns / 1000; in mmc_set_data_timeout()
694 if (card->host->ios.clock) in mmc_set_data_timeout()
695 timeout_us += data->timeout_clks * 1000 / in mmc_set_data_timeout()
696 (card->host->ios.clock / 1000); in mmc_set_data_timeout()
698 if (data->flags & MMC_DATA_WRITE) in mmc_set_data_timeout()
700 * The MMC spec "It is strongly recommended in mmc_set_data_timeout()
702 * timeout value even if the card indicates in mmc_set_data_timeout()
715 data->timeout_ns = limit_us * 1000; in mmc_set_data_timeout()
716 data->timeout_clks = 0; in mmc_set_data_timeout()
721 data->timeout_ns = limit_us * 1000; in mmc_set_data_timeout()
730 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { in mmc_set_data_timeout()
731 data->timeout_ns = 600000000; in mmc_set_data_timeout()
732 data->timeout_clks = 0; in mmc_set_data_timeout()
741 if (mmc_host_is_spi(card->host)) { in mmc_set_data_timeout()
742 if (data->flags & MMC_DATA_WRITE) { in mmc_set_data_timeout()
743 if (data->timeout_ns < 1000000000) in mmc_set_data_timeout()
744 data->timeout_ns = 1000000000; /* 1s */ in mmc_set_data_timeout()
746 if (data->timeout_ns < 100000000) in mmc_set_data_timeout()
747 data->timeout_ns = 100000000; /* 100ms */ in mmc_set_data_timeout()
760 return host->claimer == ctx || in mmc_ctx_matches()
761 (!ctx && task && host->claimer->task == task); in mmc_ctx_matches()
768 if (!host->claimer) { in mmc_ctx_set_claimer()
770 host->claimer = ctx; in mmc_ctx_set_claimer()
772 host->claimer = &host->default_ctx; in mmc_ctx_set_claimer()
775 host->claimer->task = task; in mmc_ctx_set_claimer()
779 * __mmc_claim_host - exclusively claim a host
780 * @host: mmc host to claim
786 * dereference a non-zero value then this will return prematurely with
787 * that non-zero value without acquiring the lock. Returns zero
801 add_wait_queue(&host->wq, &wait); in __mmc_claim_host()
802 spin_lock_irqsave(&host->lock, flags); in __mmc_claim_host()
806 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task)) in __mmc_claim_host()
808 spin_unlock_irqrestore(&host->lock, flags); in __mmc_claim_host()
810 spin_lock_irqsave(&host->lock, flags); in __mmc_claim_host()
814 host->claimed = 1; in __mmc_claim_host()
816 host->claim_cnt += 1; in __mmc_claim_host()
817 if (host->claim_cnt == 1) in __mmc_claim_host()
820 wake_up(&host->wq); in __mmc_claim_host()
821 spin_unlock_irqrestore(&host->lock, flags); in __mmc_claim_host()
822 remove_wait_queue(&host->wq, &wait); in __mmc_claim_host()
832 * mmc_release_host - release a host
833 * @host: mmc host to release
835 * Release a MMC host, allowing others to claim the host
842 WARN_ON(!host->claimed); in mmc_release_host()
844 spin_lock_irqsave(&host->lock, flags); in mmc_release_host()
845 if (--host->claim_cnt) { in mmc_release_host()
847 spin_unlock_irqrestore(&host->lock, flags); in mmc_release_host()
849 host->claimed = 0; in mmc_release_host()
850 host->claimer->task = NULL; in mmc_release_host()
851 host->claimer = NULL; in mmc_release_host()
852 spin_unlock_irqrestore(&host->lock, flags); in mmc_release_host()
853 wake_up(&host->wq); in mmc_release_host()
855 if (host->caps & MMC_CAP_SYNC_RUNTIME_PM) in mmc_release_host()
865 * card device and also claims the host.
867 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx) in mmc_get_card() argument
869 pm_runtime_get_sync(&card->dev); in mmc_get_card()
870 __mmc_claim_host(card->host, ctx, NULL); in mmc_get_card()
876 * pm reference for the card device.
878 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx) in mmc_put_card() argument
880 struct mmc_host *host = card->host; in mmc_put_card()
882 WARN_ON(ctx && host->claimer != ctx); in mmc_put_card()
885 pm_runtime_mark_last_busy(&card->dev); in mmc_put_card()
886 pm_runtime_put_autosuspend(&card->dev); in mmc_put_card()
896 struct mmc_ios *ios = &host->ios; in mmc_set_ios()
900 mmc_hostname(host), ios->clock, ios->bus_mode, in mmc_set_ios()
901 ios->power_mode, ios->chip_select, ios->vdd, in mmc_set_ios()
902 1 << ios->bus_width, ios->timing); in mmc_set_ios()
904 host->ops->set_ios(host, ios); in mmc_set_ios()
912 host->ios.chip_select = mode; in mmc_set_chip_select()
922 WARN_ON(hz && hz < host->f_min); in mmc_set_clock()
924 if (hz > host->f_max) in mmc_set_clock()
925 hz = host->f_max; in mmc_set_clock()
927 host->ios.clock = hz; in mmc_set_clock()
931 int mmc_execute_tuning(struct mmc_card *card) in mmc_execute_tuning() argument
933 struct mmc_host *host = card->host; in mmc_execute_tuning()
937 if (!host->ops->execute_tuning) in mmc_execute_tuning()
940 if (host->cqe_on) in mmc_execute_tuning()
941 host->cqe_ops->cqe_off(host); in mmc_execute_tuning()
943 if (mmc_card_mmc(card)) in mmc_execute_tuning()
948 err = host->ops->execute_tuning(host, opcode); in mmc_execute_tuning()
955 /* Only print error when we don't check for card removal */ in mmc_execute_tuning()
956 if (!host->detect_change) { in mmc_execute_tuning()
966 * Change the bus mode (open drain/push-pull) of a host.
970 host->ios.bus_mode = mode; in mmc_set_bus_mode()
979 host->ios.bus_width = width; in mmc_set_bus_width()
988 if (host->cqe_on) in mmc_set_initial_state()
989 host->cqe_ops->cqe_off(host); in mmc_set_initial_state()
994 host->ios.chip_select = MMC_CS_HIGH; in mmc_set_initial_state()
996 host->ios.chip_select = MMC_CS_DONTCARE; in mmc_set_initial_state()
997 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; in mmc_set_initial_state()
998 host->ios.bus_width = MMC_BUS_WIDTH_1; in mmc_set_initial_state()
999 host->ios.timing = MMC_TIMING_LEGACY; in mmc_set_initial_state()
1000 host->ios.drv_type = 0; in mmc_set_initial_state()
1001 host->ios.enhanced_strobe = false; in mmc_set_initial_state()
1004 * Make sure we are in non-enhanced strobe mode before we in mmc_set_initial_state()
1007 if ((host->caps2 & MMC_CAP2_HS400_ES) && in mmc_set_initial_state()
1008 host->ops->hs400_enhanced_strobe) in mmc_set_initial_state()
1009 host->ops->hs400_enhanced_strobe(host, &host->ios); in mmc_set_initial_state()
1017 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1037 return -EINVAL; in mmc_vdd_to_ocrbitnum()
1043 vdd -= 1; in mmc_vdd_to_ocrbitnum()
1046 bit = (vdd - 2000) / 100 + 8; in mmc_vdd_to_ocrbitnum()
1053 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1084 mask |= 1 << vdd_max--; in mmc_vddrange_to_ocrmask()
1106 if (!host->parent || !host->parent->of_node) in mmc_of_find_child_device()
1109 for_each_child_of_node(host->parent->of_node, node) { in mmc_of_find_child_device()
1126 * Sanity check the voltages that the card claims to in mmc_select_voltage()
1131 "card claims to support voltages below defined range\n"); in mmc_select_voltage()
1135 ocr &= host->ocr_avail; in mmc_select_voltage()
1137 dev_warn(mmc_dev(host), "no support for card's volts\n"); in mmc_select_voltage()
1141 if (!mmc_card_uhs2(host) && host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { in mmc_select_voltage()
1142 bit = ffs(ocr) - 1; in mmc_select_voltage()
1146 bit = fls(ocr) - 1; in mmc_select_voltage()
1151 * we must shift the mask '3' with (bit - 1). in mmc_select_voltage()
1153 ocr &= 3 << (bit - 1); in mmc_select_voltage()
1154 if (bit != host->ios.vdd) in mmc_select_voltage()
1155 dev_warn(mmc_dev(host), "exceeding card's volts\n"); in mmc_select_voltage()
1164 int old_signal_voltage = host->ios.signal_voltage; in mmc_set_signal_voltage()
1166 host->ios.signal_voltage = signal_voltage; in mmc_set_signal_voltage()
1167 if (host->ops->start_signal_voltage_switch) in mmc_set_signal_voltage()
1168 err = host->ops->start_signal_voltage_switch(host, &host->ios); in mmc_set_signal_voltage()
1171 host->ios.signal_voltage = old_signal_voltage; in mmc_set_signal_voltage()
1196 clock = host->ios.clock; in mmc_host_set_uhs_voltage()
1197 host->ios.clock = 0; in mmc_host_set_uhs_voltage()
1201 return -EAGAIN; in mmc_host_set_uhs_voltage()
1205 host->ios.clock = clock; in mmc_host_set_uhs_voltage()
1220 if (!host->ops->start_signal_voltage_switch) in mmc_set_uhs_voltage()
1221 return -EPERM; in mmc_set_uhs_voltage()
1222 if (!host->ops->card_busy) in mmc_set_uhs_voltage()
1235 return -EIO; in mmc_set_uhs_voltage()
1238 * The card should drive cmd and dat[0:3] low immediately in mmc_set_uhs_voltage()
1242 if (host->ops->card_busy && !host->ops->card_busy(host)) { in mmc_set_uhs_voltage()
1243 err = -EAGAIN; in mmc_set_uhs_voltage()
1252 err = -EAGAIN; in mmc_set_uhs_voltage()
1260 * Failure to switch is indicated by the card holding in mmc_set_uhs_voltage()
1263 if (host->ops->card_busy && host->ops->card_busy(host)) in mmc_set_uhs_voltage()
1264 err = -EAGAIN; in mmc_set_uhs_voltage()
1269 "power cycling card\n", mmc_hostname(host)); in mmc_set_uhs_voltage()
1281 host->ios.timing = timing; in mmc_set_timing()
1290 host->ios.drv_type = drv_type; in mmc_set_driver_type()
1294 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, in mmc_select_drive_strength() argument
1297 struct mmc_host *host = card->host; in mmc_select_drive_strength()
1302 if (!host->ops->select_drive_strength) in mmc_select_drive_strength()
1306 if (host->caps & MMC_CAP_DRIVER_TYPE_A) in mmc_select_drive_strength()
1309 if (host->caps & MMC_CAP_DRIVER_TYPE_C) in mmc_select_drive_strength()
1312 if (host->caps & MMC_CAP_DRIVER_TYPE_D) in mmc_select_drive_strength()
1321 return host->ops->select_drive_strength(card, max_dtr, in mmc_select_drive_strength()
1328 * Apply power to the MMC stack. This is a two-stage process.
1329 * First, we enable power to the card without the clock running.
1331 * enable the bus drivers and clock to the card.
1340 if (host->ios.power_mode == MMC_POWER_ON) in mmc_power_up()
1345 host->ios.vdd = fls(ocr) - 1; in mmc_power_up()
1346 host->ios.power_mode = MMC_POWER_UP; in mmc_power_up()
1356 mmc_delay(host->ios.power_delay_ms); in mmc_power_up()
1360 host->ios.clock = host->f_init; in mmc_power_up()
1362 host->ios.power_mode = MMC_POWER_ON; in mmc_power_up()
1369 mmc_delay(host->ios.power_delay_ms); in mmc_power_up()
1374 if (host->ios.power_mode == MMC_POWER_OFF) in mmc_power_off()
1379 host->ios.clock = 0; in mmc_power_off()
1380 host->ios.vdd = 0; in mmc_power_off()
1382 host->ios.power_mode = MMC_POWER_OFF; in mmc_power_off()
1387 * Some configurations, such as the 802.11 SDIO card in the OLPC in mmc_power_off()
1388 * XO-1.5, require a short delay after poweroff before the card in mmc_power_off()
1403 * Assign a mmc bus handler to a host. Only one bus handler may control a
1408 host->bus_ops = ops; in mmc_attach_bus()
1416 host->bus_ops = NULL; in mmc_detach_bus()
1426 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL)) in _mmc_detect_change()
1427 __pm_wakeup_event(host->ws, 5000); in _mmc_detect_change()
1429 host->detect_change = 1; in _mmc_detect_change()
1430 mmc_schedule_delayed_work(&host->detect, delay); in _mmc_detect_change()
1434 * mmc_detect_change - process change of state on a MMC socket
1438 * MMC drivers should call this when they detect a card has been
1439 * inserted or removed. The MMC layer will confirm that any
1440 * present card is still functional, and initialize any newly
1449 void mmc_init_erase(struct mmc_card *card) in mmc_init_erase() argument
1453 if (is_power_of_2(card->erase_size)) in mmc_init_erase()
1454 card->erase_shift = ffs(card->erase_size) - 1; in mmc_init_erase()
1456 card->erase_shift = 0; in mmc_init_erase()
1459 * It is possible to erase an arbitrarily large area of an SD or MMC in mmc_init_erase()
1460 * card. That is not desirable because it can take a long time in mmc_init_erase()
1462 * timeout calculations become increasingly hugely over-estimated. in mmc_init_erase()
1468 * For MMC, have a stab at ai good value and for modern cards it will in mmc_init_erase()
1473 if (mmc_card_sd(card) && card->ssr.au) { in mmc_init_erase()
1474 card->pref_erase = card->ssr.au; in mmc_init_erase()
1475 card->erase_shift = ffs(card->ssr.au) - 1; in mmc_init_erase()
1476 } else if (card->erase_size) { in mmc_init_erase()
1477 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; in mmc_init_erase()
1479 card->pref_erase = 512 * 1024 / 512; in mmc_init_erase()
1481 card->pref_erase = 1024 * 1024 / 512; in mmc_init_erase()
1483 card->pref_erase = 2 * 1024 * 1024 / 512; in mmc_init_erase()
1485 card->pref_erase = 4 * 1024 * 1024 / 512; in mmc_init_erase()
1486 if (card->pref_erase < card->erase_size) in mmc_init_erase()
1487 card->pref_erase = card->erase_size; in mmc_init_erase()
1489 sz = card->pref_erase % card->erase_size; in mmc_init_erase()
1491 card->pref_erase += card->erase_size - sz; in mmc_init_erase()
1494 card->pref_erase = 0; in mmc_init_erase()
1502 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, in mmc_mmc_erase_timeout() argument
1508 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { in mmc_mmc_erase_timeout()
1509 erase_timeout = card->ext_csd.trim_timeout; in mmc_mmc_erase_timeout()
1510 } else if (card->ext_csd.erase_group_def & 1) { in mmc_mmc_erase_timeout()
1513 erase_timeout = card->ext_csd.trim_timeout; in mmc_mmc_erase_timeout()
1515 erase_timeout = card->ext_csd.hc_erase_timeout; in mmc_mmc_erase_timeout()
1518 unsigned int mult = (10 << card->csd.r2w_factor); in mmc_mmc_erase_timeout()
1519 unsigned int timeout_clks = card->csd.taac_clks * mult; in mmc_mmc_erase_timeout()
1523 if (card->csd.taac_ns < 1000000) in mmc_mmc_erase_timeout()
1524 timeout_us = (card->csd.taac_ns * mult) / 1000; in mmc_mmc_erase_timeout()
1526 timeout_us = (card->csd.taac_ns / 1000) * mult; in mmc_mmc_erase_timeout()
1534 (card->host->ios.clock / 1000); in mmc_mmc_erase_timeout()
1549 erase_timeout *= card->ext_csd.sec_erase_mult; in mmc_mmc_erase_timeout()
1551 erase_timeout *= card->ext_csd.sec_trim_mult; in mmc_mmc_erase_timeout()
1560 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) in mmc_mmc_erase_timeout()
1566 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, in mmc_sd_erase_timeout() argument
1578 if (card->ssr.erase_timeout) { in mmc_sd_erase_timeout()
1580 erase_timeout = card->ssr.erase_timeout * qty + in mmc_sd_erase_timeout()
1581 card->ssr.erase_offset; in mmc_sd_erase_timeout()
1597 static unsigned int mmc_erase_timeout(struct mmc_card *card, in mmc_erase_timeout() argument
1601 if (mmc_card_sd(card)) in mmc_erase_timeout()
1602 return mmc_sd_erase_timeout(card, arg, qty); in mmc_erase_timeout()
1604 return mmc_mmc_erase_timeout(card, arg, qty); in mmc_erase_timeout()
1607 static int mmc_do_erase(struct mmc_card *card, sector_t from, in mmc_do_erase() argument
1615 mmc_retune_hold(card->host); in mmc_do_erase()
1621 * For SD, the allocation units are always a power of 2. For MMC, the in mmc_do_erase()
1633 if (card->erase_shift) in mmc_do_erase()
1634 qty += ((to >> card->erase_shift) - in mmc_do_erase()
1635 (from >> card->erase_shift)) + 1; in mmc_do_erase()
1636 else if (mmc_card_sd(card)) in mmc_do_erase()
1637 qty += to - from + 1; in mmc_do_erase()
1639 qty += (mmc_sector_div(to, card->erase_size) - in mmc_do_erase()
1640 mmc_sector_div(from, card->erase_size)) + 1; in mmc_do_erase()
1642 if (!mmc_card_blockaddr(card)) { in mmc_do_erase()
1647 if (mmc_card_sd(card)) in mmc_do_erase()
1654 if (mmc_card_ult_capacity(card)) { in mmc_do_erase()
1659 err = mmc_wait_for_cmd(card->host, &cmd, 0); in mmc_do_erase()
1663 err = -EIO; in mmc_do_erase()
1668 if (mmc_card_sd(card)) in mmc_do_erase()
1675 if (mmc_card_ult_capacity(card)) { in mmc_do_erase()
1680 err = mmc_wait_for_cmd(card->host, &cmd, 0); in mmc_do_erase()
1684 err = -EIO; in mmc_do_erase()
1691 busy_timeout = mmc_erase_timeout(card, arg, qty); in mmc_do_erase()
1692 use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout); in mmc_do_erase()
1694 err = mmc_wait_for_cmd(card->host, &cmd, 0); in mmc_do_erase()
1698 err = -EIO; in mmc_do_erase()
1702 if (mmc_host_is_spi(card->host)) in mmc_do_erase()
1709 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) in mmc_do_erase()
1713 err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE); in mmc_do_erase()
1716 mmc_retune_release(card->host); in mmc_do_erase()
1720 static unsigned int mmc_align_erase_size(struct mmc_card *card, in mmc_align_erase_size() argument
1729 * When the 'card->erase_size' is power of 2, we can use round_up/down() in mmc_align_erase_size()
1732 if (is_power_of_2(card->erase_size)) { in mmc_align_erase_size()
1735 from_new = round_up(temp, card->erase_size); in mmc_align_erase_size()
1736 rem = from_new - temp; in mmc_align_erase_size()
1739 nr_new -= rem; in mmc_align_erase_size()
1743 nr_new = round_down(nr_new, card->erase_size); in mmc_align_erase_size()
1745 rem = mmc_sector_mod(from_new, card->erase_size); in mmc_align_erase_size()
1747 rem = card->erase_size - rem; in mmc_align_erase_size()
1750 nr_new -= rem; in mmc_align_erase_size()
1755 rem = nr_new % card->erase_size; in mmc_align_erase_size()
1757 nr_new -= rem; in mmc_align_erase_size()
1770 * mmc_erase - erase sectors.
1771 * @card: card to erase
1778 int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr, in mmc_erase() argument
1786 if (!(card->csd.cmdclass & CCC_ERASE)) in mmc_erase()
1787 return -EOPNOTSUPP; in mmc_erase()
1789 if (!card->erase_size) in mmc_erase()
1790 return -EOPNOTSUPP; in mmc_erase()
1792 if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG) in mmc_erase()
1793 return -EOPNOTSUPP; in mmc_erase()
1795 if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) && in mmc_erase()
1796 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) in mmc_erase()
1797 return -EOPNOTSUPP; in mmc_erase()
1799 if (mmc_card_mmc(card) && is_trim_arg(arg) && in mmc_erase()
1800 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) in mmc_erase()
1801 return -EOPNOTSUPP; in mmc_erase()
1804 if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size) in mmc_erase()
1805 return -EINVAL; in mmc_erase()
1809 nr = mmc_align_erase_size(card, &from, &to, nr); in mmc_erase()
1815 return -EINVAL; in mmc_erase()
1818 to -= 1; in mmc_erase()
1821 * Special case where only one erase-group fits in the timeout budget: in mmc_erase()
1822 * If the region crosses an erase-group boundary on this particular in mmc_erase()
1823 * case, we will be trimming more than one erase-group which, does not in mmc_erase()
1826 * identified by the card->eg_boundary flag. in mmc_erase()
1828 rem = card->erase_size - mmc_sector_mod(from, card->erase_size); in mmc_erase()
1829 if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) { in mmc_erase()
1830 err = mmc_do_erase(card, from, from + rem - 1, arg); in mmc_erase()
1836 return mmc_do_erase(card, from, to, arg); in mmc_erase()
1840 int mmc_can_erase(struct mmc_card *card) in mmc_can_erase() argument
1842 if (card->csd.cmdclass & CCC_ERASE && card->erase_size) in mmc_can_erase()
1848 int mmc_can_trim(struct mmc_card *card) in mmc_can_trim() argument
1850 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) && in mmc_can_trim()
1851 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN))) in mmc_can_trim()
1857 int mmc_can_discard(struct mmc_card *card) in mmc_can_discard() argument
1863 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) in mmc_can_discard()
1869 int mmc_can_sanitize(struct mmc_card *card) in mmc_can_sanitize() argument
1871 if (!mmc_can_trim(card) && !mmc_can_erase(card)) in mmc_can_sanitize()
1873 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) in mmc_can_sanitize()
1878 int mmc_can_secure_erase_trim(struct mmc_card *card) in mmc_can_secure_erase_trim() argument
1880 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) && in mmc_can_secure_erase_trim()
1881 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) in mmc_can_secure_erase_trim()
1887 int mmc_erase_group_aligned(struct mmc_card *card, sector_t from, in mmc_erase_group_aligned() argument
1890 if (!card->erase_size) in mmc_erase_group_aligned()
1892 if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size) in mmc_erase_group_aligned()
1898 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, in mmc_do_calc_max_discard() argument
1901 struct mmc_host *host = card->host; in mmc_do_calc_max_discard()
1904 unsigned int max_busy_timeout = host->max_busy_timeout ? in mmc_do_calc_max_discard()
1905 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS; in mmc_do_calc_max_discard()
1907 if (card->erase_shift) { in mmc_do_calc_max_discard()
1908 max_qty = UINT_MAX >> card->erase_shift; in mmc_do_calc_max_discard()
1909 min_qty = card->pref_erase >> card->erase_shift; in mmc_do_calc_max_discard()
1910 } else if (mmc_card_sd(card)) { in mmc_do_calc_max_discard()
1912 min_qty = card->pref_erase; in mmc_do_calc_max_discard()
1914 max_qty = UINT_MAX / card->erase_size; in mmc_do_calc_max_discard()
1915 min_qty = card->pref_erase / card->erase_size; in mmc_do_calc_max_discard()
1919 * We should not only use 'host->max_busy_timeout' as the limitation in mmc_do_calc_max_discard()
1924 * Here we set 'card->pref_erase' as the minimal discard sectors no in mmc_do_calc_max_discard()
1925 * matter what size of 'host->max_busy_timeout', but if the in mmc_do_calc_max_discard()
1926 * 'host->max_busy_timeout' is large enough for more discard sectors, in mmc_do_calc_max_discard()
1928 * get a balance value. In cases when the 'host->max_busy_timeout' in mmc_do_calc_max_discard()
1933 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { in mmc_do_calc_max_discard()
1934 timeout = mmc_erase_timeout(card, arg, qty + x); in mmc_do_calc_max_discard()
1952 * an erase-group boundary even if the amount of sectors is less than in mmc_do_calc_max_discard()
1953 * one erase-group. in mmc_do_calc_max_discard()
1954 * If we can only fit one erase-group in the controller timeout budget, in mmc_do_calc_max_discard()
1955 * we have to care that erase-group boundaries are not crossed by a in mmc_do_calc_max_discard()
1958 * always touch (qty + 1) erase-groups as a simple optimization. in mmc_do_calc_max_discard()
1961 card->eg_boundary = 1; in mmc_do_calc_max_discard()
1963 qty--; in mmc_do_calc_max_discard()
1966 if (card->erase_shift) in mmc_do_calc_max_discard()
1967 max_discard = qty << card->erase_shift; in mmc_do_calc_max_discard()
1968 else if (mmc_card_sd(card)) in mmc_do_calc_max_discard()
1971 max_discard = qty * card->erase_size; in mmc_do_calc_max_discard()
1976 unsigned int mmc_calc_max_discard(struct mmc_card *card) in mmc_calc_max_discard() argument
1978 struct mmc_host *host = card->host; in mmc_calc_max_discard()
1982 * Without erase_group_def set, MMC erase timeout depends on clock in mmc_calc_max_discard()
1986 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) in mmc_calc_max_discard()
1987 return card->pref_erase; in mmc_calc_max_discard()
1989 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); in mmc_calc_max_discard()
1990 if (mmc_can_trim(card)) { in mmc_calc_max_discard()
1991 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); in mmc_calc_max_discard()
1994 } else if (max_discard < card->erase_size) { in mmc_calc_max_discard()
1998 mmc_hostname(host), max_discard, host->max_busy_timeout ? in mmc_calc_max_discard()
1999 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS); in mmc_calc_max_discard()
2004 bool mmc_card_is_blockaddr(struct mmc_card *card) in mmc_card_is_blockaddr() argument
2006 return card ? mmc_card_blockaddr(card) : false; in mmc_card_is_blockaddr()
2010 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) in mmc_set_blocklen() argument
2014 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) || in mmc_set_blocklen()
2015 mmc_card_hs400(card) || mmc_card_hs400es(card)) in mmc_set_blocklen()
2021 return mmc_wait_for_cmd(card->host, &cmd, 5); in mmc_set_blocklen()
2029 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset) in mmc_hw_reset_for_init()
2031 host->ops->card_hw_reset(host); in mmc_hw_reset_for_init()
2035 * mmc_hw_reset - reset the card in hardware
2036 * @card: card to be reset
2038 * Hard reset the card. This function is only for upper layers, like the
2039 * block layer or card drivers. You cannot use it in host drivers (struct
2042 * Return: 0 on success, -errno on failure
2044 int mmc_hw_reset(struct mmc_card *card) in mmc_hw_reset() argument
2046 struct mmc_host *host = card->host; in mmc_hw_reset()
2049 ret = host->bus_ops->hw_reset(host); in mmc_hw_reset()
2051 pr_warn("%s: tried to HW reset card, got error %d\n", in mmc_hw_reset()
2058 int mmc_sw_reset(struct mmc_card *card) in mmc_sw_reset() argument
2060 struct mmc_host *host = card->host; in mmc_sw_reset()
2063 if (!host->bus_ops->sw_reset) in mmc_sw_reset()
2064 return -EOPNOTSUPP; in mmc_sw_reset()
2066 ret = host->bus_ops->sw_reset(host); in mmc_sw_reset()
2068 pr_warn("%s: tried to SW reset card, got error %d\n", in mmc_sw_reset()
2077 host->f_init = freq; in mmc_rescan_try_freq()
2079 pr_debug("%s: %s: trying to init card at %u Hz\n", in mmc_rescan_try_freq()
2080 mmc_hostname(host), __func__, host->f_init); in mmc_rescan_try_freq()
2082 mmc_power_up(host, host->ocr_avail); in mmc_rescan_try_freq()
2091 * sdio_reset sends CMD52 to reset card. Since we do not know in mmc_rescan_try_freq()
2092 * if the card is being re-initialized, just send it. CMD52 in mmc_rescan_try_freq()
2096 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) in mmc_rescan_try_freq()
2101 if (!(host->caps2 & MMC_CAP2_NO_SD)) { in mmc_rescan_try_freq()
2102 if (mmc_send_if_cond_pcie(host, host->ocr_avail)) in mmc_rescan_try_freq()
2108 /* Order's important: probe SDIO, then SD, then MMC */ in mmc_rescan_try_freq()
2109 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) in mmc_rescan_try_freq()
2113 if (!(host->caps2 & MMC_CAP2_NO_SD)) in mmc_rescan_try_freq()
2117 if (!(host->caps2 & MMC_CAP2_NO_MMC)) in mmc_rescan_try_freq()
2123 return -EIO; in mmc_rescan_try_freq()
2130 if (!host->card || mmc_card_removed(host->card)) in _mmc_detect_card_removed()
2133 ret = host->bus_ops->alive(host); in _mmc_detect_card_removed()
2136 * Card detect status and alive check may be out of sync if card is in _mmc_detect_card_removed()
2137 * removed slowly, when card detect switch changes while card/slot in _mmc_detect_card_removed()
2138 * pads are still contacted in hardware (refer to "SD Card Mechanical in _mmc_detect_card_removed()
2139 * Addendum, Appendix C: Card Detection Switch"). So reschedule a in _mmc_detect_card_removed()
2142 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { in _mmc_detect_card_removed()
2144 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); in _mmc_detect_card_removed()
2148 mmc_card_set_removed(host->card); in _mmc_detect_card_removed()
2149 pr_debug("%s: card remove detected\n", mmc_hostname(host)); in _mmc_detect_card_removed()
2157 struct mmc_card *card = host->card; in mmc_detect_card_removed() local
2160 WARN_ON(!host->claimed); in mmc_detect_card_removed()
2162 if (!card) in mmc_detect_card_removed()
2168 ret = mmc_card_removed(card); in mmc_detect_card_removed()
2170 * The card will be considered unchanged unless we have been asked to in mmc_detect_card_removed()
2171 * detect a change or host requires polling to provide card detection. in mmc_detect_card_removed()
2173 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) in mmc_detect_card_removed()
2176 host->detect_change = 0; in mmc_detect_card_removed()
2179 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { in mmc_detect_card_removed()
2182 * rescan handle the card removal. in mmc_detect_card_removed()
2184 cancel_delayed_work(&host->detect); in mmc_detect_card_removed()
2193 int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector) in mmc_card_alternative_gpt_sector() argument
2197 if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA))) in mmc_card_alternative_gpt_sector()
2198 return -EOPNOTSUPP; in mmc_card_alternative_gpt_sector()
2201 if (card->ext_csd.rev < 3 || in mmc_card_alternative_gpt_sector()
2202 !mmc_card_mmc(card) || in mmc_card_alternative_gpt_sector()
2203 !mmc_card_is_blockaddr(card) || in mmc_card_alternative_gpt_sector()
2204 mmc_card_is_removable(card->host)) in mmc_card_alternative_gpt_sector()
2205 return -ENOENT; in mmc_card_alternative_gpt_sector()
2209 * main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main in mmc_card_alternative_gpt_sector()
2218 boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K / in mmc_card_alternative_gpt_sector()
2222 *gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1; in mmc_card_alternative_gpt_sector()
2234 if (host->rescan_disable) in mmc_rescan()
2237 /* If there is a non-removable card registered, only scan once */ in mmc_rescan()
2238 if (!mmc_card_is_removable(host) && host->rescan_entered) in mmc_rescan()
2240 host->rescan_entered = 1; in mmc_rescan()
2242 if (host->trigger_card_event && host->ops->card_event) { in mmc_rescan()
2244 host->ops->card_event(host); in mmc_rescan()
2246 host->trigger_card_event = false; in mmc_rescan()
2249 /* Verify a registered card to be functional, else remove it. */ in mmc_rescan()
2250 if (host->bus_ops) in mmc_rescan()
2251 host->bus_ops->detect(host); in mmc_rescan()
2253 host->detect_change = 0; in mmc_rescan()
2255 /* if there still is a card present, stop here */ in mmc_rescan()
2256 if (host->bus_ops != NULL) in mmc_rescan()
2260 if (mmc_card_is_removable(host) && host->ops->get_cd && in mmc_rescan()
2261 host->ops->get_cd(host) == 0) { in mmc_rescan()
2267 /* If an SD express card is present, then leave it as is. */ in mmc_rescan()
2275 * UHS-II enumeration. However, it seems like cards doesn't reliably in mmc_rescan()
2276 * announce their support for UHS-II in the response to the ACMD41, in mmc_rescan()
2278 * with UHS-II for now. in mmc_rescan()
2287 if (freq > host->f_max) { in mmc_rescan()
2290 freq = host->f_max; in mmc_rescan()
2292 if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) in mmc_rescan()
2294 if (freqs[i] <= host->f_min) in mmc_rescan()
2298 /* A non-removable card should have been detected by now. */ in mmc_rescan()
2299 if (!mmc_card_is_removable(host) && !host->bus_ops) in mmc_rescan()
2300 pr_info("%s: Failed to initialize a non-removable card", in mmc_rescan()
2305 * the card init as those are excepted. in mmc_rescan()
2307 host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0; in mmc_rescan()
2311 if (host->caps & MMC_CAP_NEEDS_POLL) in mmc_rescan()
2312 mmc_schedule_delayed_work(&host->detect, HZ); in mmc_rescan()
2317 bool power_up = !(host->caps2 & in mmc_start_host()
2320 host->f_init = max(min(freqs[0], host->f_max), host->f_min); in mmc_start_host()
2321 host->rescan_disable = 0; in mmc_start_host()
2325 mmc_power_up(host, host->ocr_avail); in mmc_start_host()
2335 if (host->rescan_disable) in __mmc_stop_host()
2338 if (host->slot.cd_irq >= 0) { in __mmc_stop_host()
2340 disable_irq(host->slot.cd_irq); in __mmc_stop_host()
2343 host->rescan_disable = 1; in __mmc_stop_host()
2344 cancel_delayed_work_sync(&host->detect); in __mmc_stop_host()
2351 /* clear pm flags now and let card drivers set them as needed */ in mmc_stop_host()
2352 host->pm_flags = 0; in mmc_stop_host()
2354 if (host->bus_ops) { in mmc_stop_host()
2355 /* Calling bus_ops->remove() with a claimed host can deadlock */ in mmc_stop_host()
2356 host->bus_ops->remove(host); in mmc_stop_host()
2404 MODULE_DESCRIPTION("MMC core driver");