2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
52 /* If the device is not responding */
53 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
56 * Background operations can take a long time, depending on the housekeeping
57 * operations the card has to perform.
59 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
61 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
62 #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
64 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
66 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
69 * Enabling software CRCs on the data blocks can be a significant (30%)
70 * performance cost, and for other reasons may not always be desired.
71 * So we allow it it to be disabled.
74 module_param(use_spi_crc, bool, 0);
76 static int mmc_schedule_delayed_work(struct delayed_work *work,
80 * We use the system_freezable_wq, because of two reasons.
81 * First, it allows several works (not the same work item) to be
82 * executed simultaneously. Second, the queue becomes frozen when
83 * userspace becomes frozen during system PM.
85 return queue_delayed_work(system_freezable_wq, work, delay);
88 #ifdef CONFIG_FAIL_MMC_REQUEST
91 * Internal function. Inject random data errors.
92 * If mmc_data is NULL no errors are injected.
94 static void mmc_should_fail_request(struct mmc_host *host,
95 struct mmc_request *mrq)
97 struct mmc_command *cmd = mrq->cmd;
98 struct mmc_data *data = mrq->data;
99 static const int data_errors[] = {
108 if (cmd->error || data->error ||
109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
112 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
113 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
116 #else /* CONFIG_FAIL_MMC_REQUEST */
118 static inline void mmc_should_fail_request(struct mmc_host *host,
119 struct mmc_request *mrq)
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
125 static inline void mmc_complete_cmd(struct mmc_request *mrq)
127 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
128 complete_all(&mrq->cmd_completion);
131 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
133 if (!mrq->cap_cmd_during_tfr)
136 mmc_complete_cmd(mrq);
138 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
139 mmc_hostname(host), mrq->cmd->opcode);
141 EXPORT_SYMBOL(mmc_command_done);
144 * mmc_request_done - finish processing an MMC request
145 * @host: MMC host which completed request
146 * @mrq: MMC request which request
148 * MMC drivers should call this function when they have completed
149 * their processing of a request.
151 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
153 struct mmc_command *cmd = mrq->cmd;
154 int err = cmd->error;
156 /* Flag re-tuning needed on CRC errors */
157 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
158 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
159 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
160 (mrq->data && mrq->data->error == -EILSEQ) ||
161 (mrq->stop && mrq->stop->error == -EILSEQ)))
162 mmc_retune_needed(host);
164 if (err && cmd->retries && mmc_host_is_spi(host)) {
165 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
169 if (host->ongoing_mrq == mrq)
170 host->ongoing_mrq = NULL;
172 mmc_complete_cmd(mrq);
174 trace_mmc_request_done(host, mrq);
176 if (err && cmd->retries && !mmc_card_removed(host->card)) {
178 * Request starter must handle retries - see
179 * mmc_wait_for_req_done().
184 mmc_should_fail_request(host, mrq);
186 if (!host->ongoing_mrq)
187 led_trigger_event(host->led, LED_OFF);
190 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
191 mmc_hostname(host), mrq->sbc->opcode,
193 mrq->sbc->resp[0], mrq->sbc->resp[1],
194 mrq->sbc->resp[2], mrq->sbc->resp[3]);
197 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
198 mmc_hostname(host), cmd->opcode, err,
199 cmd->resp[0], cmd->resp[1],
200 cmd->resp[2], cmd->resp[3]);
203 pr_debug("%s: %d bytes transferred: %d\n",
205 mrq->data->bytes_xfered, mrq->data->error);
209 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
210 mmc_hostname(host), mrq->stop->opcode,
212 mrq->stop->resp[0], mrq->stop->resp[1],
213 mrq->stop->resp[2], mrq->stop->resp[3]);
221 EXPORT_SYMBOL(mmc_request_done);
223 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
227 /* Assumes host controller has been runtime resumed by mmc_claim_host */
228 err = mmc_retune(host);
230 mrq->cmd->error = err;
231 mmc_request_done(host, mrq);
236 * For sdio rw commands we must wait for card busy otherwise some
237 * sdio devices won't work properly.
239 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
240 int tries = 500; /* Wait aprox 500ms at maximum */
242 while (host->ops->card_busy(host) && --tries)
246 mrq->cmd->error = -EBUSY;
247 mmc_request_done(host, mrq);
252 if (mrq->cap_cmd_during_tfr) {
253 host->ongoing_mrq = mrq;
255 * Retry path could come through here without having waiting on
256 * cmd_completion, so ensure it is reinitialised.
258 reinit_completion(&mrq->cmd_completion);
261 trace_mmc_request_start(host, mrq);
263 host->ops->request(host, mrq);
266 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
268 #ifdef CONFIG_MMC_DEBUG
270 struct scatterlist *sg;
272 mmc_retune_hold(host);
274 if (mmc_card_removed(host->card))
278 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
279 mmc_hostname(host), mrq->sbc->opcode,
280 mrq->sbc->arg, mrq->sbc->flags);
283 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
284 mmc_hostname(host), mrq->cmd->opcode,
285 mrq->cmd->arg, mrq->cmd->flags);
288 pr_debug("%s: blksz %d blocks %d flags %08x "
289 "tsac %d ms nsac %d\n",
290 mmc_hostname(host), mrq->data->blksz,
291 mrq->data->blocks, mrq->data->flags,
292 mrq->data->timeout_ns / 1000000,
293 mrq->data->timeout_clks);
297 pr_debug("%s: CMD%u arg %08x flags %08x\n",
298 mmc_hostname(host), mrq->stop->opcode,
299 mrq->stop->arg, mrq->stop->flags);
302 WARN_ON(!host->claimed);
311 BUG_ON(mrq->data->blksz > host->max_blk_size);
312 BUG_ON(mrq->data->blocks > host->max_blk_count);
313 BUG_ON(mrq->data->blocks * mrq->data->blksz >
316 #ifdef CONFIG_MMC_DEBUG
318 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
320 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
323 mrq->cmd->data = mrq->data;
324 mrq->data->error = 0;
325 mrq->data->mrq = mrq;
327 mrq->data->stop = mrq->stop;
328 mrq->stop->error = 0;
329 mrq->stop->mrq = mrq;
332 led_trigger_event(host->led, LED_FULL);
333 __mmc_start_request(host, mrq);
339 * mmc_start_bkops - start BKOPS for supported cards
340 * @card: MMC card to start BKOPS
341 * @form_exception: A flag to indicate if this function was
342 * called due to an exception raised by the card
344 * Start background operations whenever requested.
345 * When the urgent BKOPS bit is set in a R1 command response
346 * then background operations should be started immediately.
348 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
352 bool use_busy_signal;
356 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
359 err = mmc_read_bkops_status(card);
361 pr_err("%s: Failed to read bkops status: %d\n",
362 mmc_hostname(card->host), err);
366 if (!card->ext_csd.raw_bkops_status)
369 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
373 mmc_claim_host(card->host);
374 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
375 timeout = MMC_BKOPS_MAX_TIMEOUT;
376 use_busy_signal = true;
379 use_busy_signal = false;
382 mmc_retune_hold(card->host);
384 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
385 EXT_CSD_BKOPS_START, 1, timeout,
386 use_busy_signal, true, false);
388 pr_warn("%s: Error %d starting bkops\n",
389 mmc_hostname(card->host), err);
390 mmc_retune_release(card->host);
395 * For urgent bkops status (LEVEL_2 and more)
396 * bkops executed synchronously, otherwise
397 * the operation is in progress
399 if (!use_busy_signal)
400 mmc_card_set_doing_bkops(card);
402 mmc_retune_release(card->host);
404 mmc_release_host(card->host);
406 EXPORT_SYMBOL(mmc_start_bkops);
409 * mmc_wait_data_done() - done callback for data request
410 * @mrq: done data request
412 * Wakes up mmc context, passed as a callback to host controller driver
414 static void mmc_wait_data_done(struct mmc_request *mrq)
416 struct mmc_context_info *context_info = &mrq->host->context_info;
418 context_info->is_done_rcv = true;
419 wake_up_interruptible(&context_info->wait);
422 static void mmc_wait_done(struct mmc_request *mrq)
424 complete(&mrq->completion);
427 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
429 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
432 * If there is an ongoing transfer, wait for the command line to become
435 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
436 wait_for_completion(&ongoing_mrq->cmd_completion);
440 *__mmc_start_data_req() - starts data request
441 * @host: MMC host to start the request
442 * @mrq: data request to start
444 * Sets the done callback to be called when request is completed by the card.
445 * Starts data mmc request execution
446 * If an ongoing transfer is already in progress, wait for the command line
447 * to become available before sending another command.
449 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
453 mmc_wait_ongoing_tfr_cmd(host);
455 mrq->done = mmc_wait_data_done;
458 init_completion(&mrq->cmd_completion);
460 err = mmc_start_request(host, mrq);
462 mrq->cmd->error = err;
463 mmc_complete_cmd(mrq);
464 mmc_wait_data_done(mrq);
470 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
474 mmc_wait_ongoing_tfr_cmd(host);
476 init_completion(&mrq->completion);
477 mrq->done = mmc_wait_done;
479 init_completion(&mrq->cmd_completion);
481 err = mmc_start_request(host, mrq);
483 mrq->cmd->error = err;
484 mmc_complete_cmd(mrq);
485 complete(&mrq->completion);
492 * mmc_wait_for_data_req_done() - wait for request completed
493 * @host: MMC host to prepare the command.
494 * @mrq: MMC request to wait for
496 * Blocks MMC context till host controller will ack end of data request
497 * execution or new request notification arrives from the block layer.
498 * Handles command retries.
500 * Returns enum mmc_blk_status after checking errors.
502 static int mmc_wait_for_data_req_done(struct mmc_host *host,
503 struct mmc_request *mrq,
504 struct mmc_async_req *next_req)
506 struct mmc_command *cmd;
507 struct mmc_context_info *context_info = &host->context_info;
512 wait_event_interruptible(context_info->wait,
513 (context_info->is_done_rcv ||
514 context_info->is_new_req));
515 spin_lock_irqsave(&context_info->lock, flags);
516 context_info->is_waiting_last_req = false;
517 spin_unlock_irqrestore(&context_info->lock, flags);
518 if (context_info->is_done_rcv) {
519 context_info->is_done_rcv = false;
520 context_info->is_new_req = false;
523 if (!cmd->error || !cmd->retries ||
524 mmc_card_removed(host->card)) {
525 err = host->areq->err_check(host->card,
527 break; /* return err */
529 mmc_retune_recheck(host);
530 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
532 cmd->opcode, cmd->error);
535 __mmc_start_request(host, mrq);
536 continue; /* wait for done/new event again */
538 } else if (context_info->is_new_req) {
539 context_info->is_new_req = false;
541 return MMC_BLK_NEW_REQUEST;
544 mmc_retune_release(host);
548 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
550 struct mmc_command *cmd;
553 wait_for_completion(&mrq->completion);
558 * If host has timed out waiting for the sanitize
559 * to complete, card might be still in programming state
560 * so let's try to bring the card out of programming
563 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
564 if (!mmc_interrupt_hpi(host->card)) {
565 pr_warn("%s: %s: Interrupted sanitize\n",
566 mmc_hostname(host), __func__);
570 pr_err("%s: %s: Failed to interrupt sanitize\n",
571 mmc_hostname(host), __func__);
574 if (!cmd->error || !cmd->retries ||
575 mmc_card_removed(host->card))
578 mmc_retune_recheck(host);
580 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
581 mmc_hostname(host), cmd->opcode, cmd->error);
584 __mmc_start_request(host, mrq);
587 mmc_retune_release(host);
589 EXPORT_SYMBOL(mmc_wait_for_req_done);
592 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
596 * mmc_is_req_done() is used with requests that have
597 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
598 * starting a request and before waiting for it to complete. That is,
599 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
600 * and before mmc_wait_for_req_done(). If it is called at other times the
601 * result is not meaningful.
603 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
606 return host->context_info.is_done_rcv;
608 return completion_done(&mrq->completion);
610 EXPORT_SYMBOL(mmc_is_req_done);
613 * mmc_pre_req - Prepare for a new request
614 * @host: MMC host to prepare command
615 * @mrq: MMC request to prepare for
616 * @is_first_req: true if there is no previous started request
617 * that may run in parellel to this call, otherwise false
619 * mmc_pre_req() is called in prior to mmc_start_req() to let
620 * host prepare for the new request. Preparation of a request may be
621 * performed while another request is running on the host.
623 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
626 if (host->ops->pre_req)
627 host->ops->pre_req(host, mrq, is_first_req);
631 * mmc_post_req - Post process a completed request
632 * @host: MMC host to post process command
633 * @mrq: MMC request to post process for
634 * @err: Error, if non zero, clean up any resources made in pre_req
636 * Let the host post process a completed request. Post processing of
637 * a request may be performed while another reuqest is running.
639 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
642 if (host->ops->post_req)
643 host->ops->post_req(host, mrq, err);
647 * mmc_start_req - start a non-blocking request
648 * @host: MMC host to start command
649 * @areq: async request to start
650 * @error: out parameter returns 0 for success, otherwise non zero
652 * Start a new MMC custom command request for a host.
653 * If there is on ongoing async request wait for completion
654 * of that request and start the new one and return.
655 * Does not wait for the new request to complete.
657 * Returns the completed request, NULL in case of none completed.
658 * Wait for the an ongoing request (previoulsy started) to complete and
659 * return the completed request. If there is no ongoing request, NULL
660 * is returned without waiting. NULL is not an error condition.
662 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
663 struct mmc_async_req *areq, int *error)
667 struct mmc_async_req *data = host->areq;
669 /* Prepare a new request */
671 mmc_pre_req(host, areq->mrq, !host->areq);
674 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
675 if (err == MMC_BLK_NEW_REQUEST) {
679 * The previous request was not completed,
685 * Check BKOPS urgency for each R1 response
687 if (host->card && mmc_card_mmc(host->card) &&
688 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
689 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
690 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
692 /* Cancel the prepared request */
694 mmc_post_req(host, areq->mrq, -EINVAL);
696 mmc_start_bkops(host->card, true);
698 /* prepare the request again */
700 mmc_pre_req(host, areq->mrq, !host->areq);
705 start_err = __mmc_start_data_req(host, areq->mrq);
708 mmc_post_req(host, host->areq->mrq, 0);
710 /* Cancel a prepared request if it was not started. */
711 if ((err || start_err) && areq)
712 mmc_post_req(host, areq->mrq, -EINVAL);
723 EXPORT_SYMBOL(mmc_start_req);
726 * mmc_wait_for_req - start a request and wait for completion
727 * @host: MMC host to start command
728 * @mrq: MMC request to start
730 * Start a new MMC custom command request for a host, and wait
731 * for the command to complete. In the case of 'cap_cmd_during_tfr'
732 * requests, the transfer is ongoing and the caller can issue further
733 * commands that do not use the data lines, and then wait by calling
734 * mmc_wait_for_req_done().
735 * Does not attempt to parse the response.
737 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
739 __mmc_start_req(host, mrq);
741 if (!mrq->cap_cmd_during_tfr)
742 mmc_wait_for_req_done(host, mrq);
744 EXPORT_SYMBOL(mmc_wait_for_req);
747 * mmc_interrupt_hpi - Issue for High priority Interrupt
748 * @card: the MMC card associated with the HPI transfer
750 * Issued High Priority Interrupt, and check for card status
751 * until out-of prg-state.
753 int mmc_interrupt_hpi(struct mmc_card *card)
757 unsigned long prg_wait;
761 if (!card->ext_csd.hpi_en) {
762 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
766 mmc_claim_host(card->host);
767 err = mmc_send_status(card, &status);
769 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
773 switch (R1_CURRENT_STATE(status)) {
779 * In idle and transfer states, HPI is not needed and the caller
780 * can issue the next intended command immediately
786 /* In all other states, it's illegal to issue HPI */
787 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
788 mmc_hostname(card->host), R1_CURRENT_STATE(status));
793 err = mmc_send_hpi_cmd(card, &status);
797 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
799 err = mmc_send_status(card, &status);
801 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
803 if (time_after(jiffies, prg_wait))
808 mmc_release_host(card->host);
811 EXPORT_SYMBOL(mmc_interrupt_hpi);
814 * mmc_wait_for_cmd - start a command and wait for completion
815 * @host: MMC host to start command
816 * @cmd: MMC command to start
817 * @retries: maximum number of retries
819 * Start a new MMC command for a host, and wait for the command
820 * to complete. Return any error that occurred while the command
821 * was executing. Do not attempt to parse the response.
823 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
825 struct mmc_request mrq = {NULL};
827 WARN_ON(!host->claimed);
829 memset(cmd->resp, 0, sizeof(cmd->resp));
830 cmd->retries = retries;
835 mmc_wait_for_req(host, &mrq);
840 EXPORT_SYMBOL(mmc_wait_for_cmd);
843 * mmc_stop_bkops - stop ongoing BKOPS
844 * @card: MMC card to check BKOPS
846 * Send HPI command to stop ongoing background operations to
847 * allow rapid servicing of foreground operations, e.g. read/
848 * writes. Wait until the card comes out of the programming state
849 * to avoid errors in servicing read/write requests.
851 int mmc_stop_bkops(struct mmc_card *card)
856 err = mmc_interrupt_hpi(card);
859 * If err is EINVAL, we can't issue an HPI.
860 * It should complete the BKOPS.
862 if (!err || (err == -EINVAL)) {
863 mmc_card_clr_doing_bkops(card);
864 mmc_retune_release(card->host);
870 EXPORT_SYMBOL(mmc_stop_bkops);
872 int mmc_read_bkops_status(struct mmc_card *card)
877 mmc_claim_host(card->host);
878 err = mmc_get_ext_csd(card, &ext_csd);
879 mmc_release_host(card->host);
883 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
884 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
888 EXPORT_SYMBOL(mmc_read_bkops_status);
891 * mmc_set_data_timeout - set the timeout for a data command
892 * @data: data phase for command
893 * @card: the MMC card associated with the data transfer
895 * Computes the data timeout parameters according to the
896 * correct algorithm given the card type.
898 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
903 * SDIO cards only define an upper 1 s limit on access.
905 if (mmc_card_sdio(card)) {
906 data->timeout_ns = 1000000000;
907 data->timeout_clks = 0;
912 * SD cards use a 100 multiplier rather than 10
914 mult = mmc_card_sd(card) ? 100 : 10;
917 * Scale up the multiplier (and therefore the timeout) by
918 * the r2w factor for writes.
920 if (data->flags & MMC_DATA_WRITE)
921 mult <<= card->csd.r2w_factor;
923 data->timeout_ns = card->csd.tacc_ns * mult;
924 data->timeout_clks = card->csd.tacc_clks * mult;
927 * SD cards also have an upper limit on the timeout.
929 if (mmc_card_sd(card)) {
930 unsigned int timeout_us, limit_us;
932 timeout_us = data->timeout_ns / 1000;
933 if (card->host->ios.clock)
934 timeout_us += data->timeout_clks * 1000 /
935 (card->host->ios.clock / 1000);
937 if (data->flags & MMC_DATA_WRITE)
939 * The MMC spec "It is strongly recommended
940 * for hosts to implement more than 500ms
941 * timeout value even if the card indicates
942 * the 250ms maximum busy length." Even the
943 * previous value of 300ms is known to be
944 * insufficient for some cards.
951 * SDHC cards always use these fixed values.
953 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
954 data->timeout_ns = limit_us * 1000;
955 data->timeout_clks = 0;
958 /* assign limit value if invalid */
960 data->timeout_ns = limit_us * 1000;
964 * Some cards require longer data read timeout than indicated in CSD.
965 * Address this by setting the read timeout to a "reasonably high"
966 * value. For the cards tested, 600ms has proven enough. If necessary,
967 * this value can be increased if other problematic cards require this.
969 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
970 data->timeout_ns = 600000000;
971 data->timeout_clks = 0;
975 * Some cards need very high timeouts if driven in SPI mode.
976 * The worst observed timeout was 900ms after writing a
977 * continuous stream of data until the internal logic
980 if (mmc_host_is_spi(card->host)) {
981 if (data->flags & MMC_DATA_WRITE) {
982 if (data->timeout_ns < 1000000000)
983 data->timeout_ns = 1000000000; /* 1s */
985 if (data->timeout_ns < 100000000)
986 data->timeout_ns = 100000000; /* 100ms */
990 EXPORT_SYMBOL(mmc_set_data_timeout);
993 * mmc_align_data_size - pads a transfer size to a more optimal value
994 * @card: the MMC card associated with the data transfer
995 * @sz: original transfer size
997 * Pads the original data size with a number of extra bytes in
998 * order to avoid controller bugs and/or performance hits
999 * (e.g. some controllers revert to PIO for certain sizes).
1001 * Returns the improved size, which might be unmodified.
1003 * Note that this function is only relevant when issuing a
1004 * single scatter gather entry.
1006 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
1009 * FIXME: We don't have a system for the controller to tell
1010 * the core about its problems yet, so for now we just 32-bit
1013 sz = ((sz + 3) / 4) * 4;
1017 EXPORT_SYMBOL(mmc_align_data_size);
1020 * __mmc_claim_host - exclusively claim a host
1021 * @host: mmc host to claim
1022 * @abort: whether or not the operation should be aborted
1024 * Claim a host for a set of operations. If @abort is non null and
1025 * dereference a non-zero value then this will return prematurely with
1026 * that non-zero value without acquiring the lock. Returns zero
1027 * with the lock held otherwise.
1029 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1031 DECLARE_WAITQUEUE(wait, current);
1032 unsigned long flags;
1038 add_wait_queue(&host->wq, &wait);
1039 spin_lock_irqsave(&host->lock, flags);
1041 set_current_state(TASK_UNINTERRUPTIBLE);
1042 stop = abort ? atomic_read(abort) : 0;
1043 if (stop || !host->claimed || host->claimer == current)
1045 spin_unlock_irqrestore(&host->lock, flags);
1047 spin_lock_irqsave(&host->lock, flags);
1049 set_current_state(TASK_RUNNING);
1052 host->claimer = current;
1053 host->claim_cnt += 1;
1054 if (host->claim_cnt == 1)
1058 spin_unlock_irqrestore(&host->lock, flags);
1059 remove_wait_queue(&host->wq, &wait);
1062 pm_runtime_get_sync(mmc_dev(host));
1066 EXPORT_SYMBOL(__mmc_claim_host);
1069 * mmc_release_host - release a host
1070 * @host: mmc host to release
1072 * Release a MMC host, allowing others to claim the host
1073 * for their operations.
1075 void mmc_release_host(struct mmc_host *host)
1077 unsigned long flags;
1079 WARN_ON(!host->claimed);
1081 spin_lock_irqsave(&host->lock, flags);
1082 if (--host->claim_cnt) {
1083 /* Release for nested claim */
1084 spin_unlock_irqrestore(&host->lock, flags);
1087 host->claimer = NULL;
1088 spin_unlock_irqrestore(&host->lock, flags);
1090 pm_runtime_mark_last_busy(mmc_dev(host));
1091 pm_runtime_put_autosuspend(mmc_dev(host));
1094 EXPORT_SYMBOL(mmc_release_host);
1097 * This is a helper function, which fetches a runtime pm reference for the
1098 * card device and also claims the host.
1100 void mmc_get_card(struct mmc_card *card)
1102 pm_runtime_get_sync(&card->dev);
1103 mmc_claim_host(card->host);
1105 EXPORT_SYMBOL(mmc_get_card);
1108 * This is a helper function, which releases the host and drops the runtime
1109 * pm reference for the card device.
1111 void mmc_put_card(struct mmc_card *card)
1113 mmc_release_host(card->host);
1114 pm_runtime_mark_last_busy(&card->dev);
1115 pm_runtime_put_autosuspend(&card->dev);
1117 EXPORT_SYMBOL(mmc_put_card);
1120 * Internal function that does the actual ios call to the host driver,
1121 * optionally printing some debug output.
1123 static inline void mmc_set_ios(struct mmc_host *host)
1125 struct mmc_ios *ios = &host->ios;
1127 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1128 "width %u timing %u\n",
1129 mmc_hostname(host), ios->clock, ios->bus_mode,
1130 ios->power_mode, ios->chip_select, ios->vdd,
1131 1 << ios->bus_width, ios->timing);
1133 host->ops->set_ios(host, ios);
1137 * Control chip select pin on a host.
1139 void mmc_set_chip_select(struct mmc_host *host, int mode)
1141 host->ios.chip_select = mode;
1146 * Sets the host clock to the highest possible frequency that
1149 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1151 WARN_ON(hz && hz < host->f_min);
1153 if (hz > host->f_max)
1156 host->ios.clock = hz;
1160 int mmc_execute_tuning(struct mmc_card *card)
1162 struct mmc_host *host = card->host;
1166 if (!host->ops->execute_tuning)
1169 if (mmc_card_mmc(card))
1170 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1172 opcode = MMC_SEND_TUNING_BLOCK;
1174 err = host->ops->execute_tuning(host, opcode);
1177 pr_err("%s: tuning execution failed: %d\n",
1178 mmc_hostname(host), err);
1180 host->retune_now = 0;
1181 host->need_retune = 0;
1182 mmc_retune_enable(host);
1189 * Change the bus mode (open drain/push-pull) of a host.
1191 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1193 host->ios.bus_mode = mode;
1198 * Change data bus width of a host.
1200 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1202 host->ios.bus_width = width;
1207 * Set initial state after a power cycle or a hw_reset.
1209 void mmc_set_initial_state(struct mmc_host *host)
1211 mmc_retune_disable(host);
1213 if (mmc_host_is_spi(host))
1214 host->ios.chip_select = MMC_CS_HIGH;
1216 host->ios.chip_select = MMC_CS_DONTCARE;
1217 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1218 host->ios.bus_width = MMC_BUS_WIDTH_1;
1219 host->ios.timing = MMC_TIMING_LEGACY;
1220 host->ios.drv_type = 0;
1221 host->ios.enhanced_strobe = false;
1224 * Make sure we are in non-enhanced strobe mode before we
1225 * actually enable it in ext_csd.
1227 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1228 host->ops->hs400_enhanced_strobe)
1229 host->ops->hs400_enhanced_strobe(host, &host->ios);
1235 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1236 * @vdd: voltage (mV)
1237 * @low_bits: prefer low bits in boundary cases
1239 * This function returns the OCR bit number according to the provided @vdd
1240 * value. If conversion is not possible a negative errno value returned.
1242 * Depending on the @low_bits flag the function prefers low or high OCR bits
1243 * on boundary voltages. For example,
1244 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1245 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1247 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1249 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1251 const int max_bit = ilog2(MMC_VDD_35_36);
1254 if (vdd < 1650 || vdd > 3600)
1257 if (vdd >= 1650 && vdd <= 1950)
1258 return ilog2(MMC_VDD_165_195);
1263 /* Base 2000 mV, step 100 mV, bit's base 8. */
1264 bit = (vdd - 2000) / 100 + 8;
1271 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1272 * @vdd_min: minimum voltage value (mV)
1273 * @vdd_max: maximum voltage value (mV)
1275 * This function returns the OCR mask bits according to the provided @vdd_min
1276 * and @vdd_max values. If conversion is not possible the function returns 0.
1278 * Notes wrt boundary cases:
1279 * This function sets the OCR bits for all boundary voltages, for example
1280 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1281 * MMC_VDD_34_35 mask.
1283 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1287 if (vdd_max < vdd_min)
1290 /* Prefer high bits for the boundary vdd_max values. */
1291 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1295 /* Prefer low bits for the boundary vdd_min values. */
1296 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1300 /* Fill the mask, from max bit to min bit. */
1301 while (vdd_max >= vdd_min)
1302 mask |= 1 << vdd_max--;
1306 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1311 * mmc_of_parse_voltage - return mask of supported voltages
1312 * @np: The device node need to be parsed.
1313 * @mask: mask of voltages available for MMC/SD/SDIO
1315 * Parse the "voltage-ranges" DT property, returning zero if it is not
1316 * found, negative errno if the voltage-range specification is invalid,
1317 * or one if the voltage-range is specified and successfully parsed.
1319 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1321 const u32 *voltage_ranges;
1324 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1325 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1326 if (!voltage_ranges) {
1327 pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1331 pr_err("%s: voltage-ranges empty\n", np->full_name);
1335 for (i = 0; i < num_ranges; i++) {
1336 const int j = i * 2;
1339 ocr_mask = mmc_vddrange_to_ocrmask(
1340 be32_to_cpu(voltage_ranges[j]),
1341 be32_to_cpu(voltage_ranges[j + 1]));
1343 pr_err("%s: voltage-range #%d is invalid\n",
1352 EXPORT_SYMBOL(mmc_of_parse_voltage);
1354 #endif /* CONFIG_OF */
1356 static int mmc_of_get_func_num(struct device_node *node)
1361 ret = of_property_read_u32(node, "reg", ®);
1368 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1371 struct device_node *node;
1373 if (!host->parent || !host->parent->of_node)
1376 for_each_child_of_node(host->parent->of_node, node) {
1377 if (mmc_of_get_func_num(node) == func_num)
1384 #ifdef CONFIG_REGULATOR
1387 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1388 * @vdd_bit: OCR bit number
1389 * @min_uV: minimum voltage value (mV)
1390 * @max_uV: maximum voltage value (mV)
1392 * This function returns the voltage range according to the provided OCR
1393 * bit number. If conversion is not possible a negative errno value returned.
1395 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1403 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1404 * bits this regulator doesn't quite support ... don't
1405 * be too picky, most cards and regulators are OK with
1406 * a 0.1V range goof (it's a small error percentage).
1408 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1410 *min_uV = 1650 * 1000;
1411 *max_uV = 1950 * 1000;
1413 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1414 *max_uV = *min_uV + 100 * 1000;
1421 * mmc_regulator_get_ocrmask - return mask of supported voltages
1422 * @supply: regulator to use
1424 * This returns either a negative errno, or a mask of voltages that
1425 * can be provided to MMC/SD/SDIO devices using the specified voltage
1426 * regulator. This would normally be called before registering the
1429 int mmc_regulator_get_ocrmask(struct regulator *supply)
1437 count = regulator_count_voltages(supply);
1441 for (i = 0; i < count; i++) {
1442 vdd_uV = regulator_list_voltage(supply, i);
1446 vdd_mV = vdd_uV / 1000;
1447 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1451 vdd_uV = regulator_get_voltage(supply);
1455 vdd_mV = vdd_uV / 1000;
1456 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1461 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1464 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1465 * @mmc: the host to regulate
1466 * @supply: regulator to use
1467 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1469 * Returns zero on success, else negative errno.
1471 * MMC host drivers may use this to enable or disable a regulator using
1472 * a particular supply voltage. This would normally be called from the
1475 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1476 struct regulator *supply,
1477 unsigned short vdd_bit)
1483 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1485 result = regulator_set_voltage(supply, min_uV, max_uV);
1486 if (result == 0 && !mmc->regulator_enabled) {
1487 result = regulator_enable(supply);
1489 mmc->regulator_enabled = true;
1491 } else if (mmc->regulator_enabled) {
1492 result = regulator_disable(supply);
1494 mmc->regulator_enabled = false;
1498 dev_err(mmc_dev(mmc),
1499 "could not set regulator OCR (%d)\n", result);
1502 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1504 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1505 int min_uV, int target_uV,
1509 * Check if supported first to avoid errors since we may try several
1510 * signal levels during power up and don't want to show errors.
1512 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1515 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1520 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1522 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1523 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1524 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1525 * SD card spec also define VQMMC in terms of VMMC.
1526 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1528 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1529 * requested voltage. This is definitely a good idea for UHS where there's a
1530 * separate regulator on the card that's trying to make 1.8V and it's best if
1533 * This function is expected to be used by a controller's
1534 * start_signal_voltage_switch() function.
1536 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1538 struct device *dev = mmc_dev(mmc);
1539 int ret, volt, min_uV, max_uV;
1541 /* If no vqmmc supply then we can't change the voltage */
1542 if (IS_ERR(mmc->supply.vqmmc))
1545 switch (ios->signal_voltage) {
1546 case MMC_SIGNAL_VOLTAGE_120:
1547 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1548 1100000, 1200000, 1300000);
1549 case MMC_SIGNAL_VOLTAGE_180:
1550 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1551 1700000, 1800000, 1950000);
1552 case MMC_SIGNAL_VOLTAGE_330:
1553 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1557 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1558 __func__, volt, max_uV);
1560 min_uV = max(volt - 300000, 2700000);
1561 max_uV = min(max_uV + 200000, 3600000);
1564 * Due to a limitation in the current implementation of
1565 * regulator_set_voltage_triplet() which is taking the lowest
1566 * voltage possible if below the target, search for a suitable
1567 * voltage in two steps and try to stay close to vmmc
1568 * with a 0.3V tolerance at first.
1570 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1571 min_uV, volt, max_uV))
1574 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1575 2700000, volt, 3600000);
1580 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1582 #endif /* CONFIG_REGULATOR */
1584 int mmc_regulator_get_supply(struct mmc_host *mmc)
1586 struct device *dev = mmc_dev(mmc);
1589 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1590 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1592 if (IS_ERR(mmc->supply.vmmc)) {
1593 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1594 return -EPROBE_DEFER;
1595 dev_dbg(dev, "No vmmc regulator found\n");
1597 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1599 mmc->ocr_avail = ret;
1601 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1604 if (IS_ERR(mmc->supply.vqmmc)) {
1605 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1606 return -EPROBE_DEFER;
1607 dev_dbg(dev, "No vqmmc regulator found\n");
1612 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1615 * Mask off any voltages we don't support and select
1616 * the lowest voltage
1618 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1623 * Sanity check the voltages that the card claims to
1627 dev_warn(mmc_dev(host),
1628 "card claims to support voltages below defined range\n");
1632 ocr &= host->ocr_avail;
1634 dev_warn(mmc_dev(host), "no support for card's volts\n");
1638 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1641 mmc_power_cycle(host, ocr);
1645 if (bit != host->ios.vdd)
1646 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1652 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1655 int old_signal_voltage = host->ios.signal_voltage;
1657 host->ios.signal_voltage = signal_voltage;
1658 if (host->ops->start_signal_voltage_switch)
1659 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1662 host->ios.signal_voltage = old_signal_voltage;
1668 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1670 struct mmc_command cmd = {0};
1677 * Send CMD11 only if the request is to switch the card to
1680 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1681 return __mmc_set_signal_voltage(host, signal_voltage);
1684 * If we cannot switch voltages, return failure so the caller
1685 * can continue without UHS mode
1687 if (!host->ops->start_signal_voltage_switch)
1689 if (!host->ops->card_busy)
1690 pr_warn("%s: cannot verify signal voltage switch\n",
1691 mmc_hostname(host));
1693 cmd.opcode = SD_SWITCH_VOLTAGE;
1695 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1697 err = mmc_wait_for_cmd(host, &cmd, 0);
1701 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1705 * The card should drive cmd and dat[0:3] low immediately
1706 * after the response of cmd11, but wait 1 ms to be sure
1709 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1714 * During a signal voltage level switch, the clock must be gated
1715 * for 5 ms according to the SD spec
1717 clock = host->ios.clock;
1718 host->ios.clock = 0;
1721 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1723 * Voltages may not have been switched, but we've already
1724 * sent CMD11, so a power cycle is required anyway
1730 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1732 host->ios.clock = clock;
1735 /* Wait for at least 1 ms according to spec */
1739 * Failure to switch is indicated by the card holding
1742 if (host->ops->card_busy && host->ops->card_busy(host))
1747 pr_debug("%s: Signal voltage switch failed, "
1748 "power cycling card\n", mmc_hostname(host));
1749 mmc_power_cycle(host, ocr);
1756 * Select timing parameters for host.
1758 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1760 host->ios.timing = timing;
1765 * Select appropriate driver type for host.
1767 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1769 host->ios.drv_type = drv_type;
1773 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1774 int card_drv_type, int *drv_type)
1776 struct mmc_host *host = card->host;
1777 int host_drv_type = SD_DRIVER_TYPE_B;
1781 if (!host->ops->select_drive_strength)
1784 /* Use SD definition of driver strength for hosts */
1785 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1786 host_drv_type |= SD_DRIVER_TYPE_A;
1788 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1789 host_drv_type |= SD_DRIVER_TYPE_C;
1791 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1792 host_drv_type |= SD_DRIVER_TYPE_D;
1795 * The drive strength that the hardware can support
1796 * depends on the board design. Pass the appropriate
1797 * information and let the hardware specific code
1798 * return what is possible given the options
1800 return host->ops->select_drive_strength(card, max_dtr,
1807 * Apply power to the MMC stack. This is a two-stage process.
1808 * First, we enable power to the card without the clock running.
1809 * We then wait a bit for the power to stabilise. Finally,
1810 * enable the bus drivers and clock to the card.
1812 * We must _NOT_ enable the clock prior to power stablising.
1814 * If a host does all the power sequencing itself, ignore the
1815 * initial MMC_POWER_UP stage.
1817 void mmc_power_up(struct mmc_host *host, u32 ocr)
1819 if (host->ios.power_mode == MMC_POWER_ON)
1822 mmc_pwrseq_pre_power_on(host);
1824 host->ios.vdd = fls(ocr) - 1;
1825 host->ios.power_mode = MMC_POWER_UP;
1826 /* Set initial state and call mmc_set_ios */
1827 mmc_set_initial_state(host);
1829 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1830 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1831 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1832 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1833 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1834 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1835 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1838 * This delay should be sufficient to allow the power supply
1839 * to reach the minimum voltage.
1843 mmc_pwrseq_post_power_on(host);
1845 host->ios.clock = host->f_init;
1847 host->ios.power_mode = MMC_POWER_ON;
1851 * This delay must be at least 74 clock sizes, or 1 ms, or the
1852 * time required to reach a stable voltage.
1857 void mmc_power_off(struct mmc_host *host)
1859 if (host->ios.power_mode == MMC_POWER_OFF)
1862 mmc_pwrseq_power_off(host);
1864 host->ios.clock = 0;
1867 host->ios.power_mode = MMC_POWER_OFF;
1868 /* Set initial state and call mmc_set_ios */
1869 mmc_set_initial_state(host);
1872 * Some configurations, such as the 802.11 SDIO card in the OLPC
1873 * XO-1.5, require a short delay after poweroff before the card
1874 * can be successfully turned on again.
1879 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1881 mmc_power_off(host);
1882 /* Wait at least 1 ms according to SD spec */
1884 mmc_power_up(host, ocr);
1888 * Cleanup when the last reference to the bus operator is dropped.
1890 static void __mmc_release_bus(struct mmc_host *host)
1893 BUG_ON(host->bus_refs);
1894 BUG_ON(!host->bus_dead);
1896 host->bus_ops = NULL;
1900 * Increase reference count of bus operator
1902 static inline void mmc_bus_get(struct mmc_host *host)
1904 unsigned long flags;
1906 spin_lock_irqsave(&host->lock, flags);
1908 spin_unlock_irqrestore(&host->lock, flags);
1912 * Decrease reference count of bus operator and free it if
1913 * it is the last reference.
1915 static inline void mmc_bus_put(struct mmc_host *host)
1917 unsigned long flags;
1919 spin_lock_irqsave(&host->lock, flags);
1921 if ((host->bus_refs == 0) && host->bus_ops)
1922 __mmc_release_bus(host);
1923 spin_unlock_irqrestore(&host->lock, flags);
1927 * Assign a mmc bus handler to a host. Only one bus handler may control a
1928 * host at any given time.
1930 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1932 unsigned long flags;
1937 WARN_ON(!host->claimed);
1939 spin_lock_irqsave(&host->lock, flags);
1941 BUG_ON(host->bus_ops);
1942 BUG_ON(host->bus_refs);
1944 host->bus_ops = ops;
1948 spin_unlock_irqrestore(&host->lock, flags);
1952 * Remove the current bus handler from a host.
1954 void mmc_detach_bus(struct mmc_host *host)
1956 unsigned long flags;
1960 WARN_ON(!host->claimed);
1961 WARN_ON(!host->bus_ops);
1963 spin_lock_irqsave(&host->lock, flags);
1967 spin_unlock_irqrestore(&host->lock, flags);
1972 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1975 #ifdef CONFIG_MMC_DEBUG
1976 unsigned long flags;
1977 spin_lock_irqsave(&host->lock, flags);
1978 WARN_ON(host->removed);
1979 spin_unlock_irqrestore(&host->lock, flags);
1983 * If the device is configured as wakeup, we prevent a new sleep for
1984 * 5 s to give provision for user space to consume the event.
1986 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1987 device_can_wakeup(mmc_dev(host)))
1988 pm_wakeup_event(mmc_dev(host), 5000);
1990 host->detect_change = 1;
1991 mmc_schedule_delayed_work(&host->detect, delay);
1995 * mmc_detect_change - process change of state on a MMC socket
1996 * @host: host which changed state.
1997 * @delay: optional delay to wait before detection (jiffies)
1999 * MMC drivers should call this when they detect a card has been
2000 * inserted or removed. The MMC layer will confirm that any
2001 * present card is still functional, and initialize any newly
2004 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
2006 _mmc_detect_change(host, delay, true);
2008 EXPORT_SYMBOL(mmc_detect_change);
2010 void mmc_init_erase(struct mmc_card *card)
2014 if (is_power_of_2(card->erase_size))
2015 card->erase_shift = ffs(card->erase_size) - 1;
2017 card->erase_shift = 0;
2020 * It is possible to erase an arbitrarily large area of an SD or MMC
2021 * card. That is not desirable because it can take a long time
2022 * (minutes) potentially delaying more important I/O, and also the
2023 * timeout calculations become increasingly hugely over-estimated.
2024 * Consequently, 'pref_erase' is defined as a guide to limit erases
2025 * to that size and alignment.
2027 * For SD cards that define Allocation Unit size, limit erases to one
2028 * Allocation Unit at a time.
2029 * For MMC, have a stab at ai good value and for modern cards it will
2030 * end up being 4MiB. Note that if the value is too small, it can end
2031 * up taking longer to erase. Also note, erase_size is already set to
2032 * High Capacity Erase Size if available when this function is called.
2034 if (mmc_card_sd(card) && card->ssr.au) {
2035 card->pref_erase = card->ssr.au;
2036 card->erase_shift = ffs(card->ssr.au) - 1;
2037 } else if (card->erase_size) {
2038 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2040 card->pref_erase = 512 * 1024 / 512;
2042 card->pref_erase = 1024 * 1024 / 512;
2044 card->pref_erase = 2 * 1024 * 1024 / 512;
2046 card->pref_erase = 4 * 1024 * 1024 / 512;
2047 if (card->pref_erase < card->erase_size)
2048 card->pref_erase = card->erase_size;
2050 sz = card->pref_erase % card->erase_size;
2052 card->pref_erase += card->erase_size - sz;
2055 card->pref_erase = 0;
2058 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2059 unsigned int arg, unsigned int qty)
2061 unsigned int erase_timeout;
2063 if (arg == MMC_DISCARD_ARG ||
2064 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2065 erase_timeout = card->ext_csd.trim_timeout;
2066 } else if (card->ext_csd.erase_group_def & 1) {
2067 /* High Capacity Erase Group Size uses HC timeouts */
2068 if (arg == MMC_TRIM_ARG)
2069 erase_timeout = card->ext_csd.trim_timeout;
2071 erase_timeout = card->ext_csd.hc_erase_timeout;
2073 /* CSD Erase Group Size uses write timeout */
2074 unsigned int mult = (10 << card->csd.r2w_factor);
2075 unsigned int timeout_clks = card->csd.tacc_clks * mult;
2076 unsigned int timeout_us;
2078 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2079 if (card->csd.tacc_ns < 1000000)
2080 timeout_us = (card->csd.tacc_ns * mult) / 1000;
2082 timeout_us = (card->csd.tacc_ns / 1000) * mult;
2085 * ios.clock is only a target. The real clock rate might be
2086 * less but not that much less, so fudge it by multiplying by 2.
2089 timeout_us += (timeout_clks * 1000) /
2090 (card->host->ios.clock / 1000);
2092 erase_timeout = timeout_us / 1000;
2095 * Theoretically, the calculation could underflow so round up
2096 * to 1ms in that case.
2102 /* Multiplier for secure operations */
2103 if (arg & MMC_SECURE_ARGS) {
2104 if (arg == MMC_SECURE_ERASE_ARG)
2105 erase_timeout *= card->ext_csd.sec_erase_mult;
2107 erase_timeout *= card->ext_csd.sec_trim_mult;
2110 erase_timeout *= qty;
2113 * Ensure at least a 1 second timeout for SPI as per
2114 * 'mmc_set_data_timeout()'
2116 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2117 erase_timeout = 1000;
2119 return erase_timeout;
2122 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2126 unsigned int erase_timeout;
2128 if (card->ssr.erase_timeout) {
2129 /* Erase timeout specified in SD Status Register (SSR) */
2130 erase_timeout = card->ssr.erase_timeout * qty +
2131 card->ssr.erase_offset;
2134 * Erase timeout not specified in SD Status Register (SSR) so
2135 * use 250ms per write block.
2137 erase_timeout = 250 * qty;
2140 /* Must not be less than 1 second */
2141 if (erase_timeout < 1000)
2142 erase_timeout = 1000;
2144 return erase_timeout;
2147 static unsigned int mmc_erase_timeout(struct mmc_card *card,
2151 if (mmc_card_sd(card))
2152 return mmc_sd_erase_timeout(card, arg, qty);
2154 return mmc_mmc_erase_timeout(card, arg, qty);
2157 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2158 unsigned int to, unsigned int arg)
2160 struct mmc_command cmd = {0};
2161 unsigned int qty = 0, busy_timeout = 0;
2162 bool use_r1b_resp = false;
2163 unsigned long timeout;
2166 mmc_retune_hold(card->host);
2169 * qty is used to calculate the erase timeout which depends on how many
2170 * erase groups (or allocation units in SD terminology) are affected.
2171 * We count erasing part of an erase group as one erase group.
2172 * For SD, the allocation units are always a power of 2. For MMC, the
2173 * erase group size is almost certainly also power of 2, but it does not
2174 * seem to insist on that in the JEDEC standard, so we fall back to
2175 * division in that case. SD may not specify an allocation unit size,
2176 * in which case the timeout is based on the number of write blocks.
2178 * Note that the timeout for secure trim 2 will only be correct if the
2179 * number of erase groups specified is the same as the total of all
2180 * preceding secure trim 1 commands. Since the power may have been
2181 * lost since the secure trim 1 commands occurred, it is generally
2182 * impossible to calculate the secure trim 2 timeout correctly.
2184 if (card->erase_shift)
2185 qty += ((to >> card->erase_shift) -
2186 (from >> card->erase_shift)) + 1;
2187 else if (mmc_card_sd(card))
2188 qty += to - from + 1;
2190 qty += ((to / card->erase_size) -
2191 (from / card->erase_size)) + 1;
2193 if (!mmc_card_blockaddr(card)) {
2198 if (mmc_card_sd(card))
2199 cmd.opcode = SD_ERASE_WR_BLK_START;
2201 cmd.opcode = MMC_ERASE_GROUP_START;
2203 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2204 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2206 pr_err("mmc_erase: group start error %d, "
2207 "status %#x\n", err, cmd.resp[0]);
2212 memset(&cmd, 0, sizeof(struct mmc_command));
2213 if (mmc_card_sd(card))
2214 cmd.opcode = SD_ERASE_WR_BLK_END;
2216 cmd.opcode = MMC_ERASE_GROUP_END;
2218 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2219 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2221 pr_err("mmc_erase: group end error %d, status %#x\n",
2227 memset(&cmd, 0, sizeof(struct mmc_command));
2228 cmd.opcode = MMC_ERASE;
2230 busy_timeout = mmc_erase_timeout(card, arg, qty);
2232 * If the host controller supports busy signalling and the timeout for
2233 * the erase operation does not exceed the max_busy_timeout, we should
2234 * use R1B response. Or we need to prevent the host from doing hw busy
2235 * detection, which is done by converting to a R1 response instead.
2237 if (card->host->max_busy_timeout &&
2238 busy_timeout > card->host->max_busy_timeout) {
2239 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2241 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2242 cmd.busy_timeout = busy_timeout;
2243 use_r1b_resp = true;
2246 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2248 pr_err("mmc_erase: erase error %d, status %#x\n",
2254 if (mmc_host_is_spi(card->host))
2258 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2261 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2264 timeout = jiffies + msecs_to_jiffies(busy_timeout);
2266 memset(&cmd, 0, sizeof(struct mmc_command));
2267 cmd.opcode = MMC_SEND_STATUS;
2268 cmd.arg = card->rca << 16;
2269 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2270 /* Do not retry else we can't see errors */
2271 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2272 if (err || (cmd.resp[0] & 0xFDF92000)) {
2273 pr_err("error %d requesting status %#x\n",
2279 /* Timeout if the device never becomes ready for data and
2280 * never leaves the program state.
2282 if (time_after(jiffies, timeout)) {
2283 pr_err("%s: Card stuck in programming state! %s\n",
2284 mmc_hostname(card->host), __func__);
2289 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2290 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2292 mmc_retune_release(card->host);
2296 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2301 unsigned int from_new = *from, nr_new = nr, rem;
2304 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2305 * to align the erase size efficiently.
2307 if (is_power_of_2(card->erase_size)) {
2308 unsigned int temp = from_new;
2310 from_new = round_up(temp, card->erase_size);
2311 rem = from_new - temp;
2318 nr_new = round_down(nr_new, card->erase_size);
2320 rem = from_new % card->erase_size;
2322 rem = card->erase_size - rem;
2330 rem = nr_new % card->erase_size;
2338 *to = from_new + nr_new;
2345 * mmc_erase - erase sectors.
2346 * @card: card to erase
2347 * @from: first sector to erase
2348 * @nr: number of sectors to erase
2349 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2351 * Caller must claim host before calling this function.
2353 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2356 unsigned int rem, to = from + nr;
2359 if (!(card->host->caps & MMC_CAP_ERASE) ||
2360 !(card->csd.cmdclass & CCC_ERASE))
2363 if (!card->erase_size)
2366 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2369 if ((arg & MMC_SECURE_ARGS) &&
2370 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2373 if ((arg & MMC_TRIM_ARGS) &&
2374 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2377 if (arg == MMC_SECURE_ERASE_ARG) {
2378 if (from % card->erase_size || nr % card->erase_size)
2382 if (arg == MMC_ERASE_ARG)
2383 nr = mmc_align_erase_size(card, &from, &to, nr);
2391 /* 'from' and 'to' are inclusive */
2395 * Special case where only one erase-group fits in the timeout budget:
2396 * If the region crosses an erase-group boundary on this particular
2397 * case, we will be trimming more than one erase-group which, does not
2398 * fit in the timeout budget of the controller, so we need to split it
2399 * and call mmc_do_erase() twice if necessary. This special case is
2400 * identified by the card->eg_boundary flag.
2402 rem = card->erase_size - (from % card->erase_size);
2403 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2404 err = mmc_do_erase(card, from, from + rem - 1, arg);
2406 if ((err) || (to <= from))
2410 return mmc_do_erase(card, from, to, arg);
2412 EXPORT_SYMBOL(mmc_erase);
2414 int mmc_can_erase(struct mmc_card *card)
2416 if ((card->host->caps & MMC_CAP_ERASE) &&
2417 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2421 EXPORT_SYMBOL(mmc_can_erase);
2423 int mmc_can_trim(struct mmc_card *card)
2425 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2426 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2430 EXPORT_SYMBOL(mmc_can_trim);
2432 int mmc_can_discard(struct mmc_card *card)
2435 * As there's no way to detect the discard support bit at v4.5
2436 * use the s/w feature support filed.
2438 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2442 EXPORT_SYMBOL(mmc_can_discard);
2444 int mmc_can_sanitize(struct mmc_card *card)
2446 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2448 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2452 EXPORT_SYMBOL(mmc_can_sanitize);
2454 int mmc_can_secure_erase_trim(struct mmc_card *card)
2456 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2457 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2461 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2463 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2466 if (!card->erase_size)
2468 if (from % card->erase_size || nr % card->erase_size)
2472 EXPORT_SYMBOL(mmc_erase_group_aligned);
2474 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2477 struct mmc_host *host = card->host;
2478 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2479 unsigned int last_timeout = 0;
2480 unsigned int max_busy_timeout = host->max_busy_timeout ?
2481 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2483 if (card->erase_shift) {
2484 max_qty = UINT_MAX >> card->erase_shift;
2485 min_qty = card->pref_erase >> card->erase_shift;
2486 } else if (mmc_card_sd(card)) {
2488 min_qty = card->pref_erase;
2490 max_qty = UINT_MAX / card->erase_size;
2491 min_qty = card->pref_erase / card->erase_size;
2495 * We should not only use 'host->max_busy_timeout' as the limitation
2496 * when deciding the max discard sectors. We should set a balance value
2497 * to improve the erase speed, and it can not get too long timeout at
2500 * Here we set 'card->pref_erase' as the minimal discard sectors no
2501 * matter what size of 'host->max_busy_timeout', but if the
2502 * 'host->max_busy_timeout' is large enough for more discard sectors,
2503 * then we can continue to increase the max discard sectors until we
2504 * get a balance value. In cases when the 'host->max_busy_timeout'
2505 * isn't specified, use the default max erase timeout.
2509 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2510 timeout = mmc_erase_timeout(card, arg, qty + x);
2512 if (qty + x > min_qty && timeout > max_busy_timeout)
2515 if (timeout < last_timeout)
2517 last_timeout = timeout;
2527 * When specifying a sector range to trim, chances are we might cross
2528 * an erase-group boundary even if the amount of sectors is less than
2530 * If we can only fit one erase-group in the controller timeout budget,
2531 * we have to care that erase-group boundaries are not crossed by a
2532 * single trim operation. We flag that special case with "eg_boundary".
2533 * In all other cases we can just decrement qty and pretend that we
2534 * always touch (qty + 1) erase-groups as a simple optimization.
2537 card->eg_boundary = 1;
2541 /* Convert qty to sectors */
2542 if (card->erase_shift)
2543 max_discard = qty << card->erase_shift;
2544 else if (mmc_card_sd(card))
2545 max_discard = qty + 1;
2547 max_discard = qty * card->erase_size;
2552 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2554 struct mmc_host *host = card->host;
2555 unsigned int max_discard, max_trim;
2558 * Without erase_group_def set, MMC erase timeout depends on clock
2559 * frequence which can change. In that case, the best choice is
2560 * just the preferred erase size.
2562 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2563 return card->pref_erase;
2565 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2566 if (mmc_can_trim(card)) {
2567 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2568 if (max_trim < max_discard)
2569 max_discard = max_trim;
2570 } else if (max_discard < card->erase_size) {
2573 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2574 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2575 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2578 EXPORT_SYMBOL(mmc_calc_max_discard);
2580 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2582 struct mmc_command cmd = {0};
2584 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2585 mmc_card_hs400(card) || mmc_card_hs400es(card))
2588 cmd.opcode = MMC_SET_BLOCKLEN;
2590 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2591 return mmc_wait_for_cmd(card->host, &cmd, 5);
2593 EXPORT_SYMBOL(mmc_set_blocklen);
2595 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2598 struct mmc_command cmd = {0};
2600 cmd.opcode = MMC_SET_BLOCK_COUNT;
2601 cmd.arg = blockcount & 0x0000FFFF;
2604 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2605 return mmc_wait_for_cmd(card->host, &cmd, 5);
2607 EXPORT_SYMBOL(mmc_set_blockcount);
2609 static void mmc_hw_reset_for_init(struct mmc_host *host)
2611 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2613 host->ops->hw_reset(host);
2616 int mmc_hw_reset(struct mmc_host *host)
2624 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2629 ret = host->bus_ops->reset(host);
2633 pr_warn("%s: tried to reset card, got error %d\n",
2634 mmc_hostname(host), ret);
2638 EXPORT_SYMBOL(mmc_hw_reset);
2640 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2642 host->f_init = freq;
2644 #ifdef CONFIG_MMC_DEBUG
2645 pr_info("%s: %s: trying to init card at %u Hz\n",
2646 mmc_hostname(host), __func__, host->f_init);
2648 mmc_power_up(host, host->ocr_avail);
2651 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2652 * do a hardware reset if possible.
2654 mmc_hw_reset_for_init(host);
2657 * sdio_reset sends CMD52 to reset card. Since we do not know
2658 * if the card is being re-initialized, just send it. CMD52
2659 * should be ignored by SD/eMMC cards.
2660 * Skip it if we already know that we do not support SDIO commands
2662 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2667 if (!(host->caps2 & MMC_CAP2_NO_SD))
2668 mmc_send_if_cond(host, host->ocr_avail);
2670 /* Order's important: probe SDIO, then SD, then MMC */
2671 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2672 if (!mmc_attach_sdio(host))
2675 if (!(host->caps2 & MMC_CAP2_NO_SD))
2676 if (!mmc_attach_sd(host))
2679 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2680 if (!mmc_attach_mmc(host))
2683 mmc_power_off(host);
2687 int _mmc_detect_card_removed(struct mmc_host *host)
2691 if (!host->card || mmc_card_removed(host->card))
2694 ret = host->bus_ops->alive(host);
2697 * Card detect status and alive check may be out of sync if card is
2698 * removed slowly, when card detect switch changes while card/slot
2699 * pads are still contacted in hardware (refer to "SD Card Mechanical
2700 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2701 * detect work 200ms later for this case.
2703 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2704 mmc_detect_change(host, msecs_to_jiffies(200));
2705 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2709 mmc_card_set_removed(host->card);
2710 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2716 int mmc_detect_card_removed(struct mmc_host *host)
2718 struct mmc_card *card = host->card;
2721 WARN_ON(!host->claimed);
2726 if (!mmc_card_is_removable(host))
2729 ret = mmc_card_removed(card);
2731 * The card will be considered unchanged unless we have been asked to
2732 * detect a change or host requires polling to provide card detection.
2734 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2737 host->detect_change = 0;
2739 ret = _mmc_detect_card_removed(host);
2740 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2742 * Schedule a detect work as soon as possible to let a
2743 * rescan handle the card removal.
2745 cancel_delayed_work(&host->detect);
2746 _mmc_detect_change(host, 0, false);
2752 EXPORT_SYMBOL(mmc_detect_card_removed);
2754 void mmc_rescan(struct work_struct *work)
2756 struct mmc_host *host =
2757 container_of(work, struct mmc_host, detect.work);
2760 if (host->rescan_disable)
2763 /* If there is a non-removable card registered, only scan once */
2764 if (!mmc_card_is_removable(host) && host->rescan_entered)
2766 host->rescan_entered = 1;
2768 if (host->trigger_card_event && host->ops->card_event) {
2769 mmc_claim_host(host);
2770 host->ops->card_event(host);
2771 mmc_release_host(host);
2772 host->trigger_card_event = false;
2778 * if there is a _removable_ card registered, check whether it is
2781 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2782 host->bus_ops->detect(host);
2784 host->detect_change = 0;
2787 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2788 * the card is no longer present.
2793 /* if there still is a card present, stop here */
2794 if (host->bus_ops != NULL) {
2800 * Only we can add a new handler, so it's safe to
2801 * release the lock here.
2805 mmc_claim_host(host);
2806 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2807 host->ops->get_cd(host) == 0) {
2808 mmc_power_off(host);
2809 mmc_release_host(host);
2813 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2814 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2816 if (freqs[i] <= host->f_min)
2819 mmc_release_host(host);
2822 if (host->caps & MMC_CAP_NEEDS_POLL)
2823 mmc_schedule_delayed_work(&host->detect, HZ);
2826 void mmc_start_host(struct mmc_host *host)
2828 host->f_init = max(freqs[0], host->f_min);
2829 host->rescan_disable = 0;
2830 host->ios.power_mode = MMC_POWER_UNDEFINED;
2832 mmc_claim_host(host);
2833 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2834 mmc_power_off(host);
2836 mmc_power_up(host, host->ocr_avail);
2837 mmc_release_host(host);
2839 mmc_gpiod_request_cd_irq(host);
2840 _mmc_detect_change(host, 0, false);
2843 void mmc_stop_host(struct mmc_host *host)
2845 #ifdef CONFIG_MMC_DEBUG
2846 unsigned long flags;
2847 spin_lock_irqsave(&host->lock, flags);
2849 spin_unlock_irqrestore(&host->lock, flags);
2851 if (host->slot.cd_irq >= 0)
2852 disable_irq(host->slot.cd_irq);
2854 host->rescan_disable = 1;
2855 cancel_delayed_work_sync(&host->detect);
2857 /* clear pm flags now and let card drivers set them as needed */
2861 if (host->bus_ops && !host->bus_dead) {
2862 /* Calling bus_ops->remove() with a claimed host can deadlock */
2863 host->bus_ops->remove(host);
2864 mmc_claim_host(host);
2865 mmc_detach_bus(host);
2866 mmc_power_off(host);
2867 mmc_release_host(host);
2875 mmc_claim_host(host);
2876 mmc_power_off(host);
2877 mmc_release_host(host);
2880 int mmc_power_save_host(struct mmc_host *host)
2884 #ifdef CONFIG_MMC_DEBUG
2885 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2890 if (!host->bus_ops || host->bus_dead) {
2895 if (host->bus_ops->power_save)
2896 ret = host->bus_ops->power_save(host);
2900 mmc_power_off(host);
2904 EXPORT_SYMBOL(mmc_power_save_host);
2906 int mmc_power_restore_host(struct mmc_host *host)
2910 #ifdef CONFIG_MMC_DEBUG
2911 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2916 if (!host->bus_ops || host->bus_dead) {
2921 mmc_power_up(host, host->card->ocr);
2922 ret = host->bus_ops->power_restore(host);
2928 EXPORT_SYMBOL(mmc_power_restore_host);
2931 * Flush the cache to the non-volatile storage.
2933 int mmc_flush_cache(struct mmc_card *card)
2937 if (mmc_card_mmc(card) &&
2938 (card->ext_csd.cache_size > 0) &&
2939 (card->ext_csd.cache_ctrl & 1)) {
2940 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2941 EXT_CSD_FLUSH_CACHE, 1,
2942 MMC_CACHE_FLUSH_TIMEOUT_MS);
2944 pr_err("%s: cache flush error %d\n",
2945 mmc_hostname(card->host), err);
2950 EXPORT_SYMBOL(mmc_flush_cache);
2952 #ifdef CONFIG_PM_SLEEP
2953 /* Do the card removal on suspend if card is assumed removeable
2954 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2957 static int mmc_pm_notify(struct notifier_block *notify_block,
2958 unsigned long mode, void *unused)
2960 struct mmc_host *host = container_of(
2961 notify_block, struct mmc_host, pm_notify);
2962 unsigned long flags;
2966 case PM_HIBERNATION_PREPARE:
2967 case PM_SUSPEND_PREPARE:
2968 case PM_RESTORE_PREPARE:
2969 spin_lock_irqsave(&host->lock, flags);
2970 host->rescan_disable = 1;
2971 spin_unlock_irqrestore(&host->lock, flags);
2972 cancel_delayed_work_sync(&host->detect);
2977 /* Validate prerequisites for suspend */
2978 if (host->bus_ops->pre_suspend)
2979 err = host->bus_ops->pre_suspend(host);
2983 if (!mmc_card_is_removable(host)) {
2984 dev_warn(mmc_dev(host),
2985 "pre_suspend failed for non-removable host: "
2987 /* Avoid removing non-removable hosts */
2991 /* Calling bus_ops->remove() with a claimed host can deadlock */
2992 host->bus_ops->remove(host);
2993 mmc_claim_host(host);
2994 mmc_detach_bus(host);
2995 mmc_power_off(host);
2996 mmc_release_host(host);
3000 case PM_POST_SUSPEND:
3001 case PM_POST_HIBERNATION:
3002 case PM_POST_RESTORE:
3004 spin_lock_irqsave(&host->lock, flags);
3005 host->rescan_disable = 0;
3006 spin_unlock_irqrestore(&host->lock, flags);
3007 _mmc_detect_change(host, 0, false);
3014 void mmc_register_pm_notifier(struct mmc_host *host)
3016 host->pm_notify.notifier_call = mmc_pm_notify;
3017 register_pm_notifier(&host->pm_notify);
3020 void mmc_unregister_pm_notifier(struct mmc_host *host)
3022 unregister_pm_notifier(&host->pm_notify);
3027 * mmc_init_context_info() - init synchronization context
3030 * Init struct context_info needed to implement asynchronous
3031 * request mechanism, used by mmc core, host driver and mmc requests
3034 void mmc_init_context_info(struct mmc_host *host)
3036 spin_lock_init(&host->context_info.lock);
3037 host->context_info.is_new_req = false;
3038 host->context_info.is_done_rcv = false;
3039 host->context_info.is_waiting_last_req = false;
3040 init_waitqueue_head(&host->context_info.wait);
3043 static int __init mmc_init(void)
3047 ret = mmc_register_bus();
3051 ret = mmc_register_host_class();
3053 goto unregister_bus;
3055 ret = sdio_register_bus();
3057 goto unregister_host_class;
3061 unregister_host_class:
3062 mmc_unregister_host_class();
3064 mmc_unregister_bus();
3068 static void __exit mmc_exit(void)
3070 sdio_unregister_bus();
3071 mmc_unregister_host_class();
3072 mmc_unregister_bus();
3075 subsys_initcall(mmc_init);
3076 module_exit(mmc_exit);
3078 MODULE_LICENSE("GPL");