5cb7c2cc501afcb04884cd5cd162e5f98c4f4dd0
[releases.git] / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/of.h>
29
30 #include <linux/leds.h>
31
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
37
38 #include "sdhci.h"
39
40 #define DRIVER_NAME "sdhci"
41
42 #define DBG(f, x...) \
43         pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define SDHCI_DUMP(f, x...) \
46         pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48 #define MAX_TUNING_LOOP 40
49
50 static unsigned int debug_quirks = 0;
51 static unsigned int debug_quirks2;
52
53 static void sdhci_finish_data(struct sdhci_host *);
54
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57 void sdhci_dumpregs(struct sdhci_host *host)
58 {
59         SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61         SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
62                    sdhci_readl(host, SDHCI_DMA_ADDRESS),
63                    sdhci_readw(host, SDHCI_HOST_VERSION));
64         SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
65                    sdhci_readw(host, SDHCI_BLOCK_SIZE),
66                    sdhci_readw(host, SDHCI_BLOCK_COUNT));
67         SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
68                    sdhci_readl(host, SDHCI_ARGUMENT),
69                    sdhci_readw(host, SDHCI_TRANSFER_MODE));
70         SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
71                    sdhci_readl(host, SDHCI_PRESENT_STATE),
72                    sdhci_readb(host, SDHCI_HOST_CONTROL));
73         SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
74                    sdhci_readb(host, SDHCI_POWER_CONTROL),
75                    sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76         SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
77                    sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78                    sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79         SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
80                    sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81                    sdhci_readl(host, SDHCI_INT_STATUS));
82         SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
83                    sdhci_readl(host, SDHCI_INT_ENABLE),
84                    sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85         SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86                    sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87                    sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88         SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
89                    sdhci_readl(host, SDHCI_CAPABILITIES),
90                    sdhci_readl(host, SDHCI_CAPABILITIES_1));
91         SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
92                    sdhci_readw(host, SDHCI_COMMAND),
93                    sdhci_readl(host, SDHCI_MAX_CURRENT));
94         SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95                    sdhci_readl(host, SDHCI_RESPONSE),
96                    sdhci_readl(host, SDHCI_RESPONSE + 4));
97         SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98                    sdhci_readl(host, SDHCI_RESPONSE + 8),
99                    sdhci_readl(host, SDHCI_RESPONSE + 12));
100         SDHCI_DUMP("Host ctl2: 0x%08x\n",
101                    sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103         if (host->flags & SDHCI_USE_ADMA) {
104                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
106                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
107                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109                 } else {
110                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
111                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
112                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113                 }
114         }
115
116         SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121  *                                                                           *
122  * Low level functions                                                       *
123  *                                                                           *
124 \*****************************************************************************/
125
126 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127 {
128         return cmd->data || cmd->flags & MMC_RSP_BUSY;
129 }
130
131 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132 {
133         u32 present;
134
135         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136             !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
137                 return;
138
139         if (enable) {
140                 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141                                       SDHCI_CARD_PRESENT;
142
143                 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144                                        SDHCI_INT_CARD_INSERT;
145         } else {
146                 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147         }
148
149         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151 }
152
153 static void sdhci_enable_card_detection(struct sdhci_host *host)
154 {
155         sdhci_set_card_detection(host, true);
156 }
157
158 static void sdhci_disable_card_detection(struct sdhci_host *host)
159 {
160         sdhci_set_card_detection(host, false);
161 }
162
163 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164 {
165         if (host->bus_on)
166                 return;
167         host->bus_on = true;
168         pm_runtime_get_noresume(host->mmc->parent);
169 }
170
171 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172 {
173         if (!host->bus_on)
174                 return;
175         host->bus_on = false;
176         pm_runtime_put_noidle(host->mmc->parent);
177 }
178
179 void sdhci_reset(struct sdhci_host *host, u8 mask)
180 {
181         ktime_t timeout;
182
183         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184
185         if (mask & SDHCI_RESET_ALL) {
186                 host->clock = 0;
187                 /* Reset-all turns off SD Bus Power */
188                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189                         sdhci_runtime_pm_bus_off(host);
190         }
191
192         /* Wait max 100 ms */
193         timeout = ktime_add_ms(ktime_get(), 100);
194
195         /* hw clears the bit when it's done */
196         while (1) {
197                 bool timedout = ktime_after(ktime_get(), timeout);
198
199                 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
200                         break;
201                 if (timedout) {
202                         pr_err("%s: Reset 0x%x never completed.\n",
203                                 mmc_hostname(host->mmc), (int)mask);
204                         sdhci_dumpregs(host);
205                         return;
206                 }
207                 udelay(10);
208         }
209 }
210 EXPORT_SYMBOL_GPL(sdhci_reset);
211
212 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
213 {
214         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
215                 struct mmc_host *mmc = host->mmc;
216
217                 if (!mmc->ops->get_cd(mmc))
218                         return;
219         }
220
221         host->ops->reset(host, mask);
222
223         if (mask & SDHCI_RESET_ALL) {
224                 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
225                         if (host->ops->enable_dma)
226                                 host->ops->enable_dma(host);
227                 }
228
229                 /* Resetting the controller clears many */
230                 host->preset_enabled = false;
231         }
232 }
233
234 static void sdhci_set_default_irqs(struct sdhci_host *host)
235 {
236         host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
237                     SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
238                     SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
239                     SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
240                     SDHCI_INT_RESPONSE;
241
242         if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
243             host->tuning_mode == SDHCI_TUNING_MODE_3)
244                 host->ier |= SDHCI_INT_RETUNE;
245
246         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
247         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
248 }
249
250 static void sdhci_init(struct sdhci_host *host, int soft)
251 {
252         struct mmc_host *mmc = host->mmc;
253
254         if (soft)
255                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
256         else
257                 sdhci_do_reset(host, SDHCI_RESET_ALL);
258
259         sdhci_set_default_irqs(host);
260
261         host->cqe_on = false;
262
263         if (soft) {
264                 /* force clock reconfiguration */
265                 host->clock = 0;
266                 mmc->ops->set_ios(mmc, &mmc->ios);
267         }
268 }
269
270 static void sdhci_reinit(struct sdhci_host *host)
271 {
272         sdhci_init(host, 0);
273         sdhci_enable_card_detection(host);
274 }
275
276 static void __sdhci_led_activate(struct sdhci_host *host)
277 {
278         u8 ctrl;
279
280         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281         ctrl |= SDHCI_CTRL_LED;
282         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
283 }
284
285 static void __sdhci_led_deactivate(struct sdhci_host *host)
286 {
287         u8 ctrl;
288
289         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
290         ctrl &= ~SDHCI_CTRL_LED;
291         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
292 }
293
294 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
295 static void sdhci_led_control(struct led_classdev *led,
296                               enum led_brightness brightness)
297 {
298         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
299         unsigned long flags;
300
301         spin_lock_irqsave(&host->lock, flags);
302
303         if (host->runtime_suspended)
304                 goto out;
305
306         if (brightness == LED_OFF)
307                 __sdhci_led_deactivate(host);
308         else
309                 __sdhci_led_activate(host);
310 out:
311         spin_unlock_irqrestore(&host->lock, flags);
312 }
313
314 static int sdhci_led_register(struct sdhci_host *host)
315 {
316         struct mmc_host *mmc = host->mmc;
317
318         snprintf(host->led_name, sizeof(host->led_name),
319                  "%s::", mmc_hostname(mmc));
320
321         host->led.name = host->led_name;
322         host->led.brightness = LED_OFF;
323         host->led.default_trigger = mmc_hostname(mmc);
324         host->led.brightness_set = sdhci_led_control;
325
326         return led_classdev_register(mmc_dev(mmc), &host->led);
327 }
328
329 static void sdhci_led_unregister(struct sdhci_host *host)
330 {
331         led_classdev_unregister(&host->led);
332 }
333
334 static inline void sdhci_led_activate(struct sdhci_host *host)
335 {
336 }
337
338 static inline void sdhci_led_deactivate(struct sdhci_host *host)
339 {
340 }
341
342 #else
343
344 static inline int sdhci_led_register(struct sdhci_host *host)
345 {
346         return 0;
347 }
348
349 static inline void sdhci_led_unregister(struct sdhci_host *host)
350 {
351 }
352
353 static inline void sdhci_led_activate(struct sdhci_host *host)
354 {
355         __sdhci_led_activate(host);
356 }
357
358 static inline void sdhci_led_deactivate(struct sdhci_host *host)
359 {
360         __sdhci_led_deactivate(host);
361 }
362
363 #endif
364
365 /*****************************************************************************\
366  *                                                                           *
367  * Core functions                                                            *
368  *                                                                           *
369 \*****************************************************************************/
370
371 static void sdhci_read_block_pio(struct sdhci_host *host)
372 {
373         unsigned long flags;
374         size_t blksize, len, chunk;
375         u32 uninitialized_var(scratch);
376         u8 *buf;
377
378         DBG("PIO reading\n");
379
380         blksize = host->data->blksz;
381         chunk = 0;
382
383         local_irq_save(flags);
384
385         while (blksize) {
386                 BUG_ON(!sg_miter_next(&host->sg_miter));
387
388                 len = min(host->sg_miter.length, blksize);
389
390                 blksize -= len;
391                 host->sg_miter.consumed = len;
392
393                 buf = host->sg_miter.addr;
394
395                 while (len) {
396                         if (chunk == 0) {
397                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
398                                 chunk = 4;
399                         }
400
401                         *buf = scratch & 0xFF;
402
403                         buf++;
404                         scratch >>= 8;
405                         chunk--;
406                         len--;
407                 }
408         }
409
410         sg_miter_stop(&host->sg_miter);
411
412         local_irq_restore(flags);
413 }
414
415 static void sdhci_write_block_pio(struct sdhci_host *host)
416 {
417         unsigned long flags;
418         size_t blksize, len, chunk;
419         u32 scratch;
420         u8 *buf;
421
422         DBG("PIO writing\n");
423
424         blksize = host->data->blksz;
425         chunk = 0;
426         scratch = 0;
427
428         local_irq_save(flags);
429
430         while (blksize) {
431                 BUG_ON(!sg_miter_next(&host->sg_miter));
432
433                 len = min(host->sg_miter.length, blksize);
434
435                 blksize -= len;
436                 host->sg_miter.consumed = len;
437
438                 buf = host->sg_miter.addr;
439
440                 while (len) {
441                         scratch |= (u32)*buf << (chunk * 8);
442
443                         buf++;
444                         chunk++;
445                         len--;
446
447                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
448                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
449                                 chunk = 0;
450                                 scratch = 0;
451                         }
452                 }
453         }
454
455         sg_miter_stop(&host->sg_miter);
456
457         local_irq_restore(flags);
458 }
459
460 static void sdhci_transfer_pio(struct sdhci_host *host)
461 {
462         u32 mask;
463
464         if (host->blocks == 0)
465                 return;
466
467         if (host->data->flags & MMC_DATA_READ)
468                 mask = SDHCI_DATA_AVAILABLE;
469         else
470                 mask = SDHCI_SPACE_AVAILABLE;
471
472         /*
473          * Some controllers (JMicron JMB38x) mess up the buffer bits
474          * for transfers < 4 bytes. As long as it is just one block,
475          * we can ignore the bits.
476          */
477         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
478                 (host->data->blocks == 1))
479                 mask = ~0;
480
481         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
482                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
483                         udelay(100);
484
485                 if (host->data->flags & MMC_DATA_READ)
486                         sdhci_read_block_pio(host);
487                 else
488                         sdhci_write_block_pio(host);
489
490                 host->blocks--;
491                 if (host->blocks == 0)
492                         break;
493         }
494
495         DBG("PIO transfer complete.\n");
496 }
497
498 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
499                                   struct mmc_data *data, int cookie)
500 {
501         int sg_count;
502
503         /*
504          * If the data buffers are already mapped, return the previous
505          * dma_map_sg() result.
506          */
507         if (data->host_cookie == COOKIE_PRE_MAPPED)
508                 return data->sg_count;
509
510         /* Bounce write requests to the bounce buffer */
511         if (host->bounce_buffer) {
512                 unsigned int length = data->blksz * data->blocks;
513
514                 if (length > host->bounce_buffer_size) {
515                         pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
516                                mmc_hostname(host->mmc), length,
517                                host->bounce_buffer_size);
518                         return -EIO;
519                 }
520                 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
521                         /* Copy the data to the bounce buffer */
522                         sg_copy_to_buffer(data->sg, data->sg_len,
523                                           host->bounce_buffer,
524                                           length);
525                 }
526                 /* Switch ownership to the DMA */
527                 dma_sync_single_for_device(host->mmc->parent,
528                                            host->bounce_addr,
529                                            host->bounce_buffer_size,
530                                            mmc_get_dma_dir(data));
531                 /* Just a dummy value */
532                 sg_count = 1;
533         } else {
534                 /* Just access the data directly from memory */
535                 sg_count = dma_map_sg(mmc_dev(host->mmc),
536                                       data->sg, data->sg_len,
537                                       mmc_get_dma_dir(data));
538         }
539
540         if (sg_count == 0)
541                 return -ENOSPC;
542
543         data->sg_count = sg_count;
544         data->host_cookie = cookie;
545
546         return sg_count;
547 }
548
549 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
550 {
551         local_irq_save(*flags);
552         return kmap_atomic(sg_page(sg)) + sg->offset;
553 }
554
555 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
556 {
557         kunmap_atomic(buffer);
558         local_irq_restore(*flags);
559 }
560
561 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
562                                   dma_addr_t addr, int len, unsigned cmd)
563 {
564         struct sdhci_adma2_64_desc *dma_desc = desc;
565
566         /* 32-bit and 64-bit descriptors have these members in same position */
567         dma_desc->cmd = cpu_to_le16(cmd);
568         dma_desc->len = cpu_to_le16(len);
569         dma_desc->addr_lo = cpu_to_le32((u32)addr);
570
571         if (host->flags & SDHCI_USE_64_BIT_DMA)
572                 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
573 }
574
575 static void sdhci_adma_mark_end(void *desc)
576 {
577         struct sdhci_adma2_64_desc *dma_desc = desc;
578
579         /* 32-bit and 64-bit descriptors have 'cmd' in same position */
580         dma_desc->cmd |= cpu_to_le16(ADMA2_END);
581 }
582
583 static void sdhci_adma_table_pre(struct sdhci_host *host,
584         struct mmc_data *data, int sg_count)
585 {
586         struct scatterlist *sg;
587         unsigned long flags;
588         dma_addr_t addr, align_addr;
589         void *desc, *align;
590         char *buffer;
591         int len, offset, i;
592
593         /*
594          * The spec does not specify endianness of descriptor table.
595          * We currently guess that it is LE.
596          */
597
598         host->sg_count = sg_count;
599
600         desc = host->adma_table;
601         align = host->align_buffer;
602
603         align_addr = host->align_addr;
604
605         for_each_sg(data->sg, sg, host->sg_count, i) {
606                 addr = sg_dma_address(sg);
607                 len = sg_dma_len(sg);
608
609                 /*
610                  * The SDHCI specification states that ADMA addresses must
611                  * be 32-bit aligned. If they aren't, then we use a bounce
612                  * buffer for the (up to three) bytes that screw up the
613                  * alignment.
614                  */
615                 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
616                          SDHCI_ADMA2_MASK;
617                 if (offset) {
618                         if (data->flags & MMC_DATA_WRITE) {
619                                 buffer = sdhci_kmap_atomic(sg, &flags);
620                                 memcpy(align, buffer, offset);
621                                 sdhci_kunmap_atomic(buffer, &flags);
622                         }
623
624                         /* tran, valid */
625                         sdhci_adma_write_desc(host, desc, align_addr, offset,
626                                               ADMA2_TRAN_VALID);
627
628                         BUG_ON(offset > 65536);
629
630                         align += SDHCI_ADMA2_ALIGN;
631                         align_addr += SDHCI_ADMA2_ALIGN;
632
633                         desc += host->desc_sz;
634
635                         addr += offset;
636                         len -= offset;
637                 }
638
639                 BUG_ON(len > 65536);
640
641                 if (len) {
642                         /* tran, valid */
643                         sdhci_adma_write_desc(host, desc, addr, len,
644                                               ADMA2_TRAN_VALID);
645                         desc += host->desc_sz;
646                 }
647
648                 /*
649                  * If this triggers then we have a calculation bug
650                  * somewhere. :/
651                  */
652                 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
653         }
654
655         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
656                 /* Mark the last descriptor as the terminating descriptor */
657                 if (desc != host->adma_table) {
658                         desc -= host->desc_sz;
659                         sdhci_adma_mark_end(desc);
660                 }
661         } else {
662                 /* Add a terminating entry - nop, end, valid */
663                 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
664         }
665 }
666
667 static void sdhci_adma_table_post(struct sdhci_host *host,
668         struct mmc_data *data)
669 {
670         struct scatterlist *sg;
671         int i, size;
672         void *align;
673         char *buffer;
674         unsigned long flags;
675
676         if (data->flags & MMC_DATA_READ) {
677                 bool has_unaligned = false;
678
679                 /* Do a quick scan of the SG list for any unaligned mappings */
680                 for_each_sg(data->sg, sg, host->sg_count, i)
681                         if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
682                                 has_unaligned = true;
683                                 break;
684                         }
685
686                 if (has_unaligned) {
687                         dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
688                                             data->sg_len, DMA_FROM_DEVICE);
689
690                         align = host->align_buffer;
691
692                         for_each_sg(data->sg, sg, host->sg_count, i) {
693                                 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
694                                         size = SDHCI_ADMA2_ALIGN -
695                                                (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
696
697                                         buffer = sdhci_kmap_atomic(sg, &flags);
698                                         memcpy(buffer, align, size);
699                                         sdhci_kunmap_atomic(buffer, &flags);
700
701                                         align += SDHCI_ADMA2_ALIGN;
702                                 }
703                         }
704                 }
705         }
706 }
707
708 static u32 sdhci_sdma_address(struct sdhci_host *host)
709 {
710         if (host->bounce_buffer)
711                 return host->bounce_addr;
712         else
713                 return sg_dma_address(host->data->sg);
714 }
715
716 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
717                                          struct mmc_command *cmd,
718                                          struct mmc_data *data)
719 {
720         unsigned int target_timeout;
721
722         /* timeout in us */
723         if (!data) {
724                 target_timeout = cmd->busy_timeout * 1000;
725         } else {
726                 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
727                 if (host->clock && data->timeout_clks) {
728                         unsigned long long val;
729
730                         /*
731                          * data->timeout_clks is in units of clock cycles.
732                          * host->clock is in Hz.  target_timeout is in us.
733                          * Hence, us = 1000000 * cycles / Hz.  Round up.
734                          */
735                         val = 1000000ULL * data->timeout_clks;
736                         if (do_div(val, host->clock))
737                                 target_timeout++;
738                         target_timeout += val;
739                 }
740         }
741
742         return target_timeout;
743 }
744
745 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
746                                   struct mmc_command *cmd)
747 {
748         struct mmc_data *data = cmd->data;
749         struct mmc_host *mmc = host->mmc;
750         struct mmc_ios *ios = &mmc->ios;
751         unsigned char bus_width = 1 << ios->bus_width;
752         unsigned int blksz;
753         unsigned int freq;
754         u64 target_timeout;
755         u64 transfer_time;
756
757         target_timeout = sdhci_target_timeout(host, cmd, data);
758         target_timeout *= NSEC_PER_USEC;
759
760         if (data) {
761                 blksz = data->blksz;
762                 freq = host->mmc->actual_clock ? : host->clock;
763                 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
764                 do_div(transfer_time, freq);
765                 /* multiply by '2' to account for any unknowns */
766                 transfer_time = transfer_time * 2;
767                 /* calculate timeout for the entire data */
768                 host->data_timeout = data->blocks * target_timeout +
769                                      transfer_time;
770         } else {
771                 host->data_timeout = target_timeout;
772         }
773
774         if (host->data_timeout)
775                 host->data_timeout += MMC_CMD_TRANSFER_TIME;
776 }
777
778 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
779                              bool *too_big)
780 {
781         u8 count;
782         struct mmc_data *data = cmd->data;
783         unsigned target_timeout, current_timeout;
784
785         *too_big = true;
786
787         /*
788          * If the host controller provides us with an incorrect timeout
789          * value, just skip the check and use 0xE.  The hardware may take
790          * longer to time out, but that's much better than having a too-short
791          * timeout value.
792          */
793         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
794                 return 0xE;
795
796         /* Unspecified timeout, assume max */
797         if (!data && !cmd->busy_timeout)
798                 return 0xE;
799
800         /* timeout in us */
801         target_timeout = sdhci_target_timeout(host, cmd, data);
802
803         /*
804          * Figure out needed cycles.
805          * We do this in steps in order to fit inside a 32 bit int.
806          * The first step is the minimum timeout, which will have a
807          * minimum resolution of 6 bits:
808          * (1) 2^13*1000 > 2^22,
809          * (2) host->timeout_clk < 2^16
810          *     =>
811          *     (1) / (2) > 2^6
812          */
813         count = 0;
814         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
815         while (current_timeout < target_timeout) {
816                 count++;
817                 current_timeout <<= 1;
818                 if (count >= 0xF)
819                         break;
820         }
821
822         if (count >= 0xF) {
823                 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
824                         DBG("Too large timeout 0x%x requested for CMD%d!\n",
825                             count, cmd->opcode);
826                 count = 0xE;
827         } else {
828                 *too_big = false;
829         }
830
831         return count;
832 }
833
834 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
835 {
836         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
837         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
838
839         if (host->flags & SDHCI_REQ_USE_DMA)
840                 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
841         else
842                 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
843
844         if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
845                 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
846         else
847                 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
848
849         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
850         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
851 }
852
853 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
854 {
855         if (enable)
856                 host->ier |= SDHCI_INT_DATA_TIMEOUT;
857         else
858                 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
859         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
860         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
861 }
862
863 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
864 {
865         u8 count;
866
867         if (host->ops->set_timeout) {
868                 host->ops->set_timeout(host, cmd);
869         } else {
870                 bool too_big = false;
871
872                 count = sdhci_calc_timeout(host, cmd, &too_big);
873
874                 if (too_big &&
875                     host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
876                         sdhci_calc_sw_timeout(host, cmd);
877                         sdhci_set_data_timeout_irq(host, false);
878                 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
879                         sdhci_set_data_timeout_irq(host, true);
880                 }
881
882                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
883         }
884 }
885
886 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
887 {
888         u8 ctrl;
889         struct mmc_data *data = cmd->data;
890
891         host->data_timeout = 0;
892
893         if (sdhci_data_line_cmd(cmd))
894                 sdhci_set_timeout(host, cmd);
895
896         if (!data)
897                 return;
898
899         WARN_ON(host->data);
900
901         /* Sanity checks */
902         BUG_ON(data->blksz * data->blocks > 524288);
903         BUG_ON(data->blksz > host->mmc->max_blk_size);
904         BUG_ON(data->blocks > 65535);
905
906         host->data = data;
907         host->data_early = 0;
908         host->data->bytes_xfered = 0;
909
910         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
911                 struct scatterlist *sg;
912                 unsigned int length_mask, offset_mask;
913                 int i;
914
915                 host->flags |= SDHCI_REQ_USE_DMA;
916
917                 /*
918                  * FIXME: This doesn't account for merging when mapping the
919                  * scatterlist.
920                  *
921                  * The assumption here being that alignment and lengths are
922                  * the same after DMA mapping to device address space.
923                  */
924                 length_mask = 0;
925                 offset_mask = 0;
926                 if (host->flags & SDHCI_USE_ADMA) {
927                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
928                                 length_mask = 3;
929                                 /*
930                                  * As we use up to 3 byte chunks to work
931                                  * around alignment problems, we need to
932                                  * check the offset as well.
933                                  */
934                                 offset_mask = 3;
935                         }
936                 } else {
937                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
938                                 length_mask = 3;
939                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
940                                 offset_mask = 3;
941                 }
942
943                 if (unlikely(length_mask | offset_mask)) {
944                         for_each_sg(data->sg, sg, data->sg_len, i) {
945                                 if (sg->length & length_mask) {
946                                         DBG("Reverting to PIO because of transfer size (%d)\n",
947                                             sg->length);
948                                         host->flags &= ~SDHCI_REQ_USE_DMA;
949                                         break;
950                                 }
951                                 if (sg->offset & offset_mask) {
952                                         DBG("Reverting to PIO because of bad alignment\n");
953                                         host->flags &= ~SDHCI_REQ_USE_DMA;
954                                         break;
955                                 }
956                         }
957                 }
958         }
959
960         if (host->flags & SDHCI_REQ_USE_DMA) {
961                 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
962
963                 if (sg_cnt <= 0) {
964                         /*
965                          * This only happens when someone fed
966                          * us an invalid request.
967                          */
968                         WARN_ON(1);
969                         host->flags &= ~SDHCI_REQ_USE_DMA;
970                 } else if (host->flags & SDHCI_USE_ADMA) {
971                         sdhci_adma_table_pre(host, data, sg_cnt);
972
973                         sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
974                         if (host->flags & SDHCI_USE_64_BIT_DMA)
975                                 sdhci_writel(host,
976                                              (u64)host->adma_addr >> 32,
977                                              SDHCI_ADMA_ADDRESS_HI);
978                 } else {
979                         WARN_ON(sg_cnt != 1);
980                         sdhci_writel(host, sdhci_sdma_address(host),
981                                      SDHCI_DMA_ADDRESS);
982                 }
983         }
984
985         /*
986          * Always adjust the DMA selection as some controllers
987          * (e.g. JMicron) can't do PIO properly when the selection
988          * is ADMA.
989          */
990         if (host->version >= SDHCI_SPEC_200) {
991                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
992                 ctrl &= ~SDHCI_CTRL_DMA_MASK;
993                 if ((host->flags & SDHCI_REQ_USE_DMA) &&
994                         (host->flags & SDHCI_USE_ADMA)) {
995                         if (host->flags & SDHCI_USE_64_BIT_DMA)
996                                 ctrl |= SDHCI_CTRL_ADMA64;
997                         else
998                                 ctrl |= SDHCI_CTRL_ADMA32;
999                 } else {
1000                         ctrl |= SDHCI_CTRL_SDMA;
1001                 }
1002                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1003         }
1004
1005         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1006                 int flags;
1007
1008                 flags = SG_MITER_ATOMIC;
1009                 if (host->data->flags & MMC_DATA_READ)
1010                         flags |= SG_MITER_TO_SG;
1011                 else
1012                         flags |= SG_MITER_FROM_SG;
1013                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1014                 host->blocks = data->blocks;
1015         }
1016
1017         sdhci_set_transfer_irqs(host);
1018
1019         /* Set the DMA boundary value and block size */
1020         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1021                      SDHCI_BLOCK_SIZE);
1022         sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1023 }
1024
1025 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1026                                     struct mmc_request *mrq)
1027 {
1028         return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1029                !mrq->cap_cmd_during_tfr;
1030 }
1031
1032 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1033         struct mmc_command *cmd)
1034 {
1035         u16 mode = 0;
1036         struct mmc_data *data = cmd->data;
1037
1038         if (data == NULL) {
1039                 if (host->quirks2 &
1040                         SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1041                         /* must not clear SDHCI_TRANSFER_MODE when tuning */
1042                         if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1043                                 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1044                 } else {
1045                 /* clear Auto CMD settings for no data CMDs */
1046                         mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1047                         sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1048                                 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1049                 }
1050                 return;
1051         }
1052
1053         WARN_ON(!host->data);
1054
1055         if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1056                 mode = SDHCI_TRNS_BLK_CNT_EN;
1057
1058         if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1059                 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1060                 /*
1061                  * If we are sending CMD23, CMD12 never gets sent
1062                  * on successful completion (so no Auto-CMD12).
1063                  */
1064                 if (sdhci_auto_cmd12(host, cmd->mrq) &&
1065                     (cmd->opcode != SD_IO_RW_EXTENDED))
1066                         mode |= SDHCI_TRNS_AUTO_CMD12;
1067                 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1068                         mode |= SDHCI_TRNS_AUTO_CMD23;
1069                         sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1070                 }
1071         }
1072
1073         if (data->flags & MMC_DATA_READ)
1074                 mode |= SDHCI_TRNS_READ;
1075         if (host->flags & SDHCI_REQ_USE_DMA)
1076                 mode |= SDHCI_TRNS_DMA;
1077
1078         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1079 }
1080
1081 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1082 {
1083         return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1084                 ((mrq->cmd && mrq->cmd->error) ||
1085                  (mrq->sbc && mrq->sbc->error) ||
1086                  (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1087                  (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1088 }
1089
1090 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1091 {
1092         int i;
1093
1094         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1095                 if (host->mrqs_done[i] == mrq) {
1096                         WARN_ON(1);
1097                         return;
1098                 }
1099         }
1100
1101         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1102                 if (!host->mrqs_done[i]) {
1103                         host->mrqs_done[i] = mrq;
1104                         break;
1105                 }
1106         }
1107
1108         WARN_ON(i >= SDHCI_MAX_MRQS);
1109
1110         tasklet_schedule(&host->finish_tasklet);
1111 }
1112
1113 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1114 {
1115         if (host->cmd && host->cmd->mrq == mrq)
1116                 host->cmd = NULL;
1117
1118         if (host->data_cmd && host->data_cmd->mrq == mrq)
1119                 host->data_cmd = NULL;
1120
1121         if (host->data && host->data->mrq == mrq)
1122                 host->data = NULL;
1123
1124         if (sdhci_needs_reset(host, mrq))
1125                 host->pending_reset = true;
1126
1127         __sdhci_finish_mrq(host, mrq);
1128 }
1129
1130 static void sdhci_finish_data(struct sdhci_host *host)
1131 {
1132         struct mmc_command *data_cmd = host->data_cmd;
1133         struct mmc_data *data = host->data;
1134
1135         host->data = NULL;
1136         host->data_cmd = NULL;
1137
1138         /*
1139          * The controller needs a reset of internal state machines upon error
1140          * conditions.
1141          */
1142         if (data->error) {
1143                 if (!host->cmd || host->cmd == data_cmd)
1144                         sdhci_do_reset(host, SDHCI_RESET_CMD);
1145                 sdhci_do_reset(host, SDHCI_RESET_DATA);
1146         }
1147
1148         if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1149             (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1150                 sdhci_adma_table_post(host, data);
1151
1152         /*
1153          * The specification states that the block count register must
1154          * be updated, but it does not specify at what point in the
1155          * data flow. That makes the register entirely useless to read
1156          * back so we have to assume that nothing made it to the card
1157          * in the event of an error.
1158          */
1159         if (data->error)
1160                 data->bytes_xfered = 0;
1161         else
1162                 data->bytes_xfered = data->blksz * data->blocks;
1163
1164         /*
1165          * Need to send CMD12 if -
1166          * a) open-ended multiblock transfer (no CMD23)
1167          * b) error in multiblock transfer
1168          */
1169         if (data->stop &&
1170             (data->error ||
1171              !data->mrq->sbc)) {
1172                 /*
1173                  * 'cap_cmd_during_tfr' request must not use the command line
1174                  * after mmc_command_done() has been called. It is upper layer's
1175                  * responsibility to send the stop command if required.
1176                  */
1177                 if (data->mrq->cap_cmd_during_tfr) {
1178                         sdhci_finish_mrq(host, data->mrq);
1179                 } else {
1180                         /* Avoid triggering warning in sdhci_send_command() */
1181                         host->cmd = NULL;
1182                         sdhci_send_command(host, data->stop);
1183                 }
1184         } else {
1185                 sdhci_finish_mrq(host, data->mrq);
1186         }
1187 }
1188
1189 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1190                             unsigned long timeout)
1191 {
1192         if (sdhci_data_line_cmd(mrq->cmd))
1193                 mod_timer(&host->data_timer, timeout);
1194         else
1195                 mod_timer(&host->timer, timeout);
1196 }
1197
1198 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1199 {
1200         if (sdhci_data_line_cmd(mrq->cmd))
1201                 del_timer(&host->data_timer);
1202         else
1203                 del_timer(&host->timer);
1204 }
1205
1206 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1207 {
1208         int flags;
1209         u32 mask;
1210         unsigned long timeout;
1211
1212         WARN_ON(host->cmd);
1213
1214         /* Initially, a command has no error */
1215         cmd->error = 0;
1216
1217         if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1218             cmd->opcode == MMC_STOP_TRANSMISSION)
1219                 cmd->flags |= MMC_RSP_BUSY;
1220
1221         /* Wait max 10 ms */
1222         timeout = 10;
1223
1224         mask = SDHCI_CMD_INHIBIT;
1225         if (sdhci_data_line_cmd(cmd))
1226                 mask |= SDHCI_DATA_INHIBIT;
1227
1228         /* We shouldn't wait for data inihibit for stop commands, even
1229            though they might use busy signaling */
1230         if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1231                 mask &= ~SDHCI_DATA_INHIBIT;
1232
1233         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1234                 if (timeout == 0) {
1235                         pr_err("%s: Controller never released inhibit bit(s).\n",
1236                                mmc_hostname(host->mmc));
1237                         sdhci_dumpregs(host);
1238                         cmd->error = -EIO;
1239                         sdhci_finish_mrq(host, cmd->mrq);
1240                         return;
1241                 }
1242                 timeout--;
1243                 mdelay(1);
1244         }
1245
1246         host->cmd = cmd;
1247         if (sdhci_data_line_cmd(cmd)) {
1248                 WARN_ON(host->data_cmd);
1249                 host->data_cmd = cmd;
1250         }
1251
1252         sdhci_prepare_data(host, cmd);
1253
1254         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1255
1256         sdhci_set_transfer_mode(host, cmd);
1257
1258         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1259                 pr_err("%s: Unsupported response type!\n",
1260                         mmc_hostname(host->mmc));
1261                 cmd->error = -EINVAL;
1262                 sdhci_finish_mrq(host, cmd->mrq);
1263                 return;
1264         }
1265
1266         if (!(cmd->flags & MMC_RSP_PRESENT))
1267                 flags = SDHCI_CMD_RESP_NONE;
1268         else if (cmd->flags & MMC_RSP_136)
1269                 flags = SDHCI_CMD_RESP_LONG;
1270         else if (cmd->flags & MMC_RSP_BUSY)
1271                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1272         else
1273                 flags = SDHCI_CMD_RESP_SHORT;
1274
1275         if (cmd->flags & MMC_RSP_CRC)
1276                 flags |= SDHCI_CMD_CRC;
1277         if (cmd->flags & MMC_RSP_OPCODE)
1278                 flags |= SDHCI_CMD_INDEX;
1279
1280         /* CMD19 is special in that the Data Present Select should be set */
1281         if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1282             cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1283                 flags |= SDHCI_CMD_DATA;
1284
1285         timeout = jiffies;
1286         if (host->data_timeout)
1287                 timeout += nsecs_to_jiffies(host->data_timeout);
1288         else if (!cmd->data && cmd->busy_timeout > 9000)
1289                 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1290         else
1291                 timeout += 10 * HZ;
1292         sdhci_mod_timer(host, cmd->mrq, timeout);
1293
1294         sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1295 }
1296 EXPORT_SYMBOL_GPL(sdhci_send_command);
1297
1298 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1299 {
1300         int i, reg;
1301
1302         for (i = 0; i < 4; i++) {
1303                 reg = SDHCI_RESPONSE + (3 - i) * 4;
1304                 cmd->resp[i] = sdhci_readl(host, reg);
1305         }
1306
1307         if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1308                 return;
1309
1310         /* CRC is stripped so we need to do some shifting */
1311         for (i = 0; i < 4; i++) {
1312                 cmd->resp[i] <<= 8;
1313                 if (i != 3)
1314                         cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1315         }
1316 }
1317
1318 static void sdhci_finish_command(struct sdhci_host *host)
1319 {
1320         struct mmc_command *cmd = host->cmd;
1321
1322         host->cmd = NULL;
1323
1324         if (cmd->flags & MMC_RSP_PRESENT) {
1325                 if (cmd->flags & MMC_RSP_136) {
1326                         sdhci_read_rsp_136(host, cmd);
1327                 } else {
1328                         cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1329                 }
1330         }
1331
1332         if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1333                 mmc_command_done(host->mmc, cmd->mrq);
1334
1335         /*
1336          * The host can send and interrupt when the busy state has
1337          * ended, allowing us to wait without wasting CPU cycles.
1338          * The busy signal uses DAT0 so this is similar to waiting
1339          * for data to complete.
1340          *
1341          * Note: The 1.0 specification is a bit ambiguous about this
1342          *       feature so there might be some problems with older
1343          *       controllers.
1344          */
1345         if (cmd->flags & MMC_RSP_BUSY) {
1346                 if (cmd->data) {
1347                         DBG("Cannot wait for busy signal when also doing a data transfer");
1348                 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1349                            cmd == host->data_cmd) {
1350                         /* Command complete before busy is ended */
1351                         return;
1352                 }
1353         }
1354
1355         /* Finished CMD23, now send actual command. */
1356         if (cmd == cmd->mrq->sbc) {
1357                 sdhci_send_command(host, cmd->mrq->cmd);
1358         } else {
1359
1360                 /* Processed actual command. */
1361                 if (host->data && host->data_early)
1362                         sdhci_finish_data(host);
1363
1364                 if (!cmd->data)
1365                         sdhci_finish_mrq(host, cmd->mrq);
1366         }
1367 }
1368
1369 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1370 {
1371         u16 preset = 0;
1372
1373         switch (host->timing) {
1374         case MMC_TIMING_MMC_HS:
1375         case MMC_TIMING_SD_HS:
1376                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1377                 break;
1378         case MMC_TIMING_UHS_SDR12:
1379                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1380                 break;
1381         case MMC_TIMING_UHS_SDR25:
1382                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1383                 break;
1384         case MMC_TIMING_UHS_SDR50:
1385                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1386                 break;
1387         case MMC_TIMING_UHS_SDR104:
1388         case MMC_TIMING_MMC_HS200:
1389                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1390                 break;
1391         case MMC_TIMING_UHS_DDR50:
1392         case MMC_TIMING_MMC_DDR52:
1393                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1394                 break;
1395         case MMC_TIMING_MMC_HS400:
1396                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1397                 break;
1398         default:
1399                 pr_warn("%s: Invalid UHS-I mode selected\n",
1400                         mmc_hostname(host->mmc));
1401                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1402                 break;
1403         }
1404         return preset;
1405 }
1406
1407 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1408                    unsigned int *actual_clock)
1409 {
1410         int div = 0; /* Initialized for compiler warning */
1411         int real_div = div, clk_mul = 1;
1412         u16 clk = 0;
1413         bool switch_base_clk = false;
1414
1415         if (host->version >= SDHCI_SPEC_300) {
1416                 if (host->preset_enabled) {
1417                         u16 pre_val;
1418
1419                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1420                         pre_val = sdhci_get_preset_value(host);
1421                         div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1422                                 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1423                         if (host->clk_mul &&
1424                                 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1425                                 clk = SDHCI_PROG_CLOCK_MODE;
1426                                 real_div = div + 1;
1427                                 clk_mul = host->clk_mul;
1428                         } else {
1429                                 real_div = max_t(int, 1, div << 1);
1430                         }
1431                         goto clock_set;
1432                 }
1433
1434                 /*
1435                  * Check if the Host Controller supports Programmable Clock
1436                  * Mode.
1437                  */
1438                 if (host->clk_mul) {
1439                         for (div = 1; div <= 1024; div++) {
1440                                 if ((host->max_clk * host->clk_mul / div)
1441                                         <= clock)
1442                                         break;
1443                         }
1444                         if ((host->max_clk * host->clk_mul / div) <= clock) {
1445                                 /*
1446                                  * Set Programmable Clock Mode in the Clock
1447                                  * Control register.
1448                                  */
1449                                 clk = SDHCI_PROG_CLOCK_MODE;
1450                                 real_div = div;
1451                                 clk_mul = host->clk_mul;
1452                                 div--;
1453                         } else {
1454                                 /*
1455                                  * Divisor can be too small to reach clock
1456                                  * speed requirement. Then use the base clock.
1457                                  */
1458                                 switch_base_clk = true;
1459                         }
1460                 }
1461
1462                 if (!host->clk_mul || switch_base_clk) {
1463                         /* Version 3.00 divisors must be a multiple of 2. */
1464                         if (host->max_clk <= clock)
1465                                 div = 1;
1466                         else {
1467                                 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1468                                      div += 2) {
1469                                         if ((host->max_clk / div) <= clock)
1470                                                 break;
1471                                 }
1472                         }
1473                         real_div = div;
1474                         div >>= 1;
1475                         if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1476                                 && !div && host->max_clk <= 25000000)
1477                                 div = 1;
1478                 }
1479         } else {
1480                 /* Version 2.00 divisors must be a power of 2. */
1481                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1482                         if ((host->max_clk / div) <= clock)
1483                                 break;
1484                 }
1485                 real_div = div;
1486                 div >>= 1;
1487         }
1488
1489 clock_set:
1490         if (real_div)
1491                 *actual_clock = (host->max_clk * clk_mul) / real_div;
1492         clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1493         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1494                 << SDHCI_DIVIDER_HI_SHIFT;
1495
1496         return clk;
1497 }
1498 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1499
1500 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1501 {
1502         ktime_t timeout;
1503
1504         clk |= SDHCI_CLOCK_INT_EN;
1505         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1506
1507         /* Wait max 20 ms */
1508         timeout = ktime_add_ms(ktime_get(), 20);
1509         while (1) {
1510                 bool timedout = ktime_after(ktime_get(), timeout);
1511
1512                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1513                 if (clk & SDHCI_CLOCK_INT_STABLE)
1514                         break;
1515                 if (timedout) {
1516                         pr_err("%s: Internal clock never stabilised.\n",
1517                                mmc_hostname(host->mmc));
1518                         sdhci_dumpregs(host);
1519                         return;
1520                 }
1521                 udelay(10);
1522         }
1523
1524         clk |= SDHCI_CLOCK_CARD_EN;
1525         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1526 }
1527 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1528
1529 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1530 {
1531         u16 clk;
1532
1533         host->mmc->actual_clock = 0;
1534
1535         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1536
1537         if (clock == 0)
1538                 return;
1539
1540         clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1541         sdhci_enable_clk(host, clk);
1542 }
1543 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1544
1545 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1546                                 unsigned short vdd)
1547 {
1548         struct mmc_host *mmc = host->mmc;
1549
1550         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1551
1552         if (mode != MMC_POWER_OFF)
1553                 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1554         else
1555                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1556 }
1557
1558 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1559                            unsigned short vdd)
1560 {
1561         u8 pwr = 0;
1562
1563         if (mode != MMC_POWER_OFF) {
1564                 switch (1 << vdd) {
1565                 case MMC_VDD_165_195:
1566                 /*
1567                  * Without a regulator, SDHCI does not support 2.0v
1568                  * so we only get here if the driver deliberately
1569                  * added the 2.0v range to ocr_avail. Map it to 1.8v
1570                  * for the purpose of turning on the power.
1571                  */
1572                 case MMC_VDD_20_21:
1573                         pwr = SDHCI_POWER_180;
1574                         break;
1575                 case MMC_VDD_29_30:
1576                 case MMC_VDD_30_31:
1577                         pwr = SDHCI_POWER_300;
1578                         break;
1579                 case MMC_VDD_32_33:
1580                 case MMC_VDD_33_34:
1581                         pwr = SDHCI_POWER_330;
1582                         break;
1583                 default:
1584                         WARN(1, "%s: Invalid vdd %#x\n",
1585                              mmc_hostname(host->mmc), vdd);
1586                         break;
1587                 }
1588         }
1589
1590         if (host->pwr == pwr)
1591                 return;
1592
1593         host->pwr = pwr;
1594
1595         if (pwr == 0) {
1596                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1597                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1598                         sdhci_runtime_pm_bus_off(host);
1599         } else {
1600                 /*
1601                  * Spec says that we should clear the power reg before setting
1602                  * a new value. Some controllers don't seem to like this though.
1603                  */
1604                 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1605                         sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1606
1607                 /*
1608                  * At least the Marvell CaFe chip gets confused if we set the
1609                  * voltage and set turn on power at the same time, so set the
1610                  * voltage first.
1611                  */
1612                 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1613                         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1614
1615                 pwr |= SDHCI_POWER_ON;
1616
1617                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1618
1619                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1620                         sdhci_runtime_pm_bus_on(host);
1621
1622                 /*
1623                  * Some controllers need an extra 10ms delay of 10ms before
1624                  * they can apply clock after applying power
1625                  */
1626                 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1627                         mdelay(10);
1628         }
1629 }
1630 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1631
1632 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1633                      unsigned short vdd)
1634 {
1635         if (IS_ERR(host->mmc->supply.vmmc))
1636                 sdhci_set_power_noreg(host, mode, vdd);
1637         else
1638                 sdhci_set_power_reg(host, mode, vdd);
1639 }
1640 EXPORT_SYMBOL_GPL(sdhci_set_power);
1641
1642 /*****************************************************************************\
1643  *                                                                           *
1644  * MMC callbacks                                                             *
1645  *                                                                           *
1646 \*****************************************************************************/
1647
1648 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1649 {
1650         struct sdhci_host *host;
1651         int present;
1652         unsigned long flags;
1653
1654         host = mmc_priv(mmc);
1655
1656         /* Firstly check card presence */
1657         present = mmc->ops->get_cd(mmc);
1658
1659         spin_lock_irqsave(&host->lock, flags);
1660
1661         sdhci_led_activate(host);
1662
1663         /*
1664          * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1665          * requests if Auto-CMD12 is enabled.
1666          */
1667         if (sdhci_auto_cmd12(host, mrq)) {
1668                 if (mrq->stop) {
1669                         mrq->data->stop = NULL;
1670                         mrq->stop = NULL;
1671                 }
1672         }
1673
1674         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1675                 mrq->cmd->error = -ENOMEDIUM;
1676                 sdhci_finish_mrq(host, mrq);
1677         } else {
1678                 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1679                         sdhci_send_command(host, mrq->sbc);
1680                 else
1681                         sdhci_send_command(host, mrq->cmd);
1682         }
1683
1684         mmiowb();
1685         spin_unlock_irqrestore(&host->lock, flags);
1686 }
1687
1688 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1689 {
1690         u8 ctrl;
1691
1692         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1693         if (width == MMC_BUS_WIDTH_8) {
1694                 ctrl &= ~SDHCI_CTRL_4BITBUS;
1695                 ctrl |= SDHCI_CTRL_8BITBUS;
1696         } else {
1697                 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1698                         ctrl &= ~SDHCI_CTRL_8BITBUS;
1699                 if (width == MMC_BUS_WIDTH_4)
1700                         ctrl |= SDHCI_CTRL_4BITBUS;
1701                 else
1702                         ctrl &= ~SDHCI_CTRL_4BITBUS;
1703         }
1704         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1705 }
1706 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1707
1708 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1709 {
1710         u16 ctrl_2;
1711
1712         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1713         /* Select Bus Speed Mode for host */
1714         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1715         if ((timing == MMC_TIMING_MMC_HS200) ||
1716             (timing == MMC_TIMING_UHS_SDR104))
1717                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1718         else if (timing == MMC_TIMING_UHS_SDR12)
1719                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1720         else if (timing == MMC_TIMING_UHS_SDR25)
1721                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1722         else if (timing == MMC_TIMING_UHS_SDR50)
1723                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1724         else if ((timing == MMC_TIMING_UHS_DDR50) ||
1725                  (timing == MMC_TIMING_MMC_DDR52))
1726                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1727         else if (timing == MMC_TIMING_MMC_HS400)
1728                 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1729         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1730 }
1731 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1732
1733 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1734 {
1735         struct sdhci_host *host = mmc_priv(mmc);
1736         u8 ctrl;
1737
1738         if (ios->power_mode == MMC_POWER_UNDEFINED)
1739                 return;
1740
1741         if (host->flags & SDHCI_DEVICE_DEAD) {
1742                 if (!IS_ERR(mmc->supply.vmmc) &&
1743                     ios->power_mode == MMC_POWER_OFF)
1744                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1745                 return;
1746         }
1747
1748         /*
1749          * Reset the chip on each power off.
1750          * Should clear out any weird states.
1751          */
1752         if (ios->power_mode == MMC_POWER_OFF) {
1753                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1754                 sdhci_reinit(host);
1755         }
1756
1757         if (host->version >= SDHCI_SPEC_300 &&
1758                 (ios->power_mode == MMC_POWER_UP) &&
1759                 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1760                 sdhci_enable_preset_value(host, false);
1761
1762         if (!ios->clock || ios->clock != host->clock) {
1763                 host->ops->set_clock(host, ios->clock);
1764                 host->clock = ios->clock;
1765
1766                 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1767                     host->clock) {
1768                         host->timeout_clk = host->mmc->actual_clock ?
1769                                                 host->mmc->actual_clock / 1000 :
1770                                                 host->clock / 1000;
1771                         host->mmc->max_busy_timeout =
1772                                 host->ops->get_max_timeout_count ?
1773                                 host->ops->get_max_timeout_count(host) :
1774                                 1 << 27;
1775                         host->mmc->max_busy_timeout /= host->timeout_clk;
1776                 }
1777         }
1778
1779         if (host->ops->set_power)
1780                 host->ops->set_power(host, ios->power_mode, ios->vdd);
1781         else
1782                 sdhci_set_power(host, ios->power_mode, ios->vdd);
1783
1784         if (host->ops->platform_send_init_74_clocks)
1785                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1786
1787         host->ops->set_bus_width(host, ios->bus_width);
1788
1789         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1790
1791         if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1792                 if (ios->timing == MMC_TIMING_SD_HS ||
1793                      ios->timing == MMC_TIMING_MMC_HS ||
1794                      ios->timing == MMC_TIMING_MMC_HS400 ||
1795                      ios->timing == MMC_TIMING_MMC_HS200 ||
1796                      ios->timing == MMC_TIMING_MMC_DDR52 ||
1797                      ios->timing == MMC_TIMING_UHS_SDR50 ||
1798                      ios->timing == MMC_TIMING_UHS_SDR104 ||
1799                      ios->timing == MMC_TIMING_UHS_DDR50 ||
1800                      ios->timing == MMC_TIMING_UHS_SDR25)
1801                         ctrl |= SDHCI_CTRL_HISPD;
1802                 else
1803                         ctrl &= ~SDHCI_CTRL_HISPD;
1804         }
1805
1806         if (host->version >= SDHCI_SPEC_300) {
1807                 u16 clk, ctrl_2;
1808
1809                 if (!host->preset_enabled) {
1810                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1811                         /*
1812                          * We only need to set Driver Strength if the
1813                          * preset value enable is not set.
1814                          */
1815                         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1816                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1817                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1818                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1819                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1820                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1821                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1822                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1823                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1824                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1825                         else {
1826                                 pr_warn("%s: invalid driver type, default to driver type B\n",
1827                                         mmc_hostname(mmc));
1828                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1829                         }
1830
1831                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1832                 } else {
1833                         /*
1834                          * According to SDHC Spec v3.00, if the Preset Value
1835                          * Enable in the Host Control 2 register is set, we
1836                          * need to reset SD Clock Enable before changing High
1837                          * Speed Enable to avoid generating clock gliches.
1838                          */
1839
1840                         /* Reset SD Clock Enable */
1841                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1842                         clk &= ~SDHCI_CLOCK_CARD_EN;
1843                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1844
1845                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1846
1847                         /* Re-enable SD Clock */
1848                         host->ops->set_clock(host, host->clock);
1849                 }
1850
1851                 /* Reset SD Clock Enable */
1852                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1853                 clk &= ~SDHCI_CLOCK_CARD_EN;
1854                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1855
1856                 host->ops->set_uhs_signaling(host, ios->timing);
1857                 host->timing = ios->timing;
1858
1859                 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1860                                 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1861                                  (ios->timing == MMC_TIMING_UHS_SDR25) ||
1862                                  (ios->timing == MMC_TIMING_UHS_SDR50) ||
1863                                  (ios->timing == MMC_TIMING_UHS_SDR104) ||
1864                                  (ios->timing == MMC_TIMING_UHS_DDR50) ||
1865                                  (ios->timing == MMC_TIMING_MMC_DDR52))) {
1866                         u16 preset;
1867
1868                         sdhci_enable_preset_value(host, true);
1869                         preset = sdhci_get_preset_value(host);
1870                         ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1871                                 >> SDHCI_PRESET_DRV_SHIFT;
1872                 }
1873
1874                 /* Re-enable SD Clock */
1875                 host->ops->set_clock(host, host->clock);
1876         } else
1877                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1878
1879         /*
1880          * Some (ENE) controllers go apeshit on some ios operation,
1881          * signalling timeout and CRC errors even on CMD0. Resetting
1882          * it on each ios seems to solve the problem.
1883          */
1884         if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1885                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1886
1887         mmiowb();
1888 }
1889 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1890
1891 static int sdhci_get_cd(struct mmc_host *mmc)
1892 {
1893         struct sdhci_host *host = mmc_priv(mmc);
1894         int gpio_cd = mmc_gpio_get_cd(mmc);
1895
1896         if (host->flags & SDHCI_DEVICE_DEAD)
1897                 return 0;
1898
1899         /* If nonremovable, assume that the card is always present. */
1900         if (!mmc_card_is_removable(host->mmc))
1901                 return 1;
1902
1903         /*
1904          * Try slot gpio detect, if defined it take precedence
1905          * over build in controller functionality
1906          */
1907         if (gpio_cd >= 0)
1908                 return !!gpio_cd;
1909
1910         /* If polling, assume that the card is always present. */
1911         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1912                 return 1;
1913
1914         /* Host native card detect */
1915         return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1916 }
1917
1918 static int sdhci_check_ro(struct sdhci_host *host)
1919 {
1920         unsigned long flags;
1921         int is_readonly;
1922
1923         spin_lock_irqsave(&host->lock, flags);
1924
1925         if (host->flags & SDHCI_DEVICE_DEAD)
1926                 is_readonly = 0;
1927         else if (host->ops->get_ro)
1928                 is_readonly = host->ops->get_ro(host);
1929         else
1930                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1931                                 & SDHCI_WRITE_PROTECT);
1932
1933         spin_unlock_irqrestore(&host->lock, flags);
1934
1935         /* This quirk needs to be replaced by a callback-function later */
1936         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1937                 !is_readonly : is_readonly;
1938 }
1939
1940 #define SAMPLE_COUNT    5
1941
1942 static int sdhci_get_ro(struct mmc_host *mmc)
1943 {
1944         struct sdhci_host *host = mmc_priv(mmc);
1945         int i, ro_count;
1946
1947         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1948                 return sdhci_check_ro(host);
1949
1950         ro_count = 0;
1951         for (i = 0; i < SAMPLE_COUNT; i++) {
1952                 if (sdhci_check_ro(host)) {
1953                         if (++ro_count > SAMPLE_COUNT / 2)
1954                                 return 1;
1955                 }
1956                 msleep(30);
1957         }
1958         return 0;
1959 }
1960
1961 static void sdhci_hw_reset(struct mmc_host *mmc)
1962 {
1963         struct sdhci_host *host = mmc_priv(mmc);
1964
1965         if (host->ops && host->ops->hw_reset)
1966                 host->ops->hw_reset(host);
1967 }
1968
1969 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1970 {
1971         if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1972                 if (enable)
1973                         host->ier |= SDHCI_INT_CARD_INT;
1974                 else
1975                         host->ier &= ~SDHCI_INT_CARD_INT;
1976
1977                 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1978                 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1979                 mmiowb();
1980         }
1981 }
1982
1983 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1984 {
1985         struct sdhci_host *host = mmc_priv(mmc);
1986         unsigned long flags;
1987
1988         if (enable)
1989                 pm_runtime_get_noresume(host->mmc->parent);
1990
1991         spin_lock_irqsave(&host->lock, flags);
1992         if (enable)
1993                 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1994         else
1995                 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1996
1997         sdhci_enable_sdio_irq_nolock(host, enable);
1998         spin_unlock_irqrestore(&host->lock, flags);
1999
2000         if (!enable)
2001                 pm_runtime_put_noidle(host->mmc->parent);
2002 }
2003 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2004
2005 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2006                                       struct mmc_ios *ios)
2007 {
2008         struct sdhci_host *host = mmc_priv(mmc);
2009         u16 ctrl;
2010         int ret;
2011
2012         /*
2013          * Signal Voltage Switching is only applicable for Host Controllers
2014          * v3.00 and above.
2015          */
2016         if (host->version < SDHCI_SPEC_300)
2017                 return 0;
2018
2019         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2020
2021         switch (ios->signal_voltage) {
2022         case MMC_SIGNAL_VOLTAGE_330:
2023                 if (!(host->flags & SDHCI_SIGNALING_330))
2024                         return -EINVAL;
2025                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2026                 ctrl &= ~SDHCI_CTRL_VDD_180;
2027                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2028
2029                 if (!IS_ERR(mmc->supply.vqmmc)) {
2030                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2031                         if (ret) {
2032                                 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2033                                         mmc_hostname(mmc));
2034                                 return -EIO;
2035                         }
2036                 }
2037                 /* Wait for 5ms */
2038                 usleep_range(5000, 5500);
2039
2040                 /* 3.3V regulator output should be stable within 5 ms */
2041                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2042                 if (!(ctrl & SDHCI_CTRL_VDD_180))
2043                         return 0;
2044
2045                 pr_warn("%s: 3.3V regulator output did not became stable\n",
2046                         mmc_hostname(mmc));
2047
2048                 return -EAGAIN;
2049         case MMC_SIGNAL_VOLTAGE_180:
2050                 if (!(host->flags & SDHCI_SIGNALING_180))
2051                         return -EINVAL;
2052                 if (!IS_ERR(mmc->supply.vqmmc)) {
2053                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2054                         if (ret) {
2055                                 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2056                                         mmc_hostname(mmc));
2057                                 return -EIO;
2058                         }
2059                 }
2060
2061                 /*
2062                  * Enable 1.8V Signal Enable in the Host Control2
2063                  * register
2064                  */
2065                 ctrl |= SDHCI_CTRL_VDD_180;
2066                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2067
2068                 /* Some controller need to do more when switching */
2069                 if (host->ops->voltage_switch)
2070                         host->ops->voltage_switch(host);
2071
2072                 /* 1.8V regulator output should be stable within 5 ms */
2073                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2074                 if (ctrl & SDHCI_CTRL_VDD_180)
2075                         return 0;
2076
2077                 pr_warn("%s: 1.8V regulator output did not became stable\n",
2078                         mmc_hostname(mmc));
2079
2080                 return -EAGAIN;
2081         case MMC_SIGNAL_VOLTAGE_120:
2082                 if (!(host->flags & SDHCI_SIGNALING_120))
2083                         return -EINVAL;
2084                 if (!IS_ERR(mmc->supply.vqmmc)) {
2085                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2086                         if (ret) {
2087                                 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2088                                         mmc_hostname(mmc));
2089                                 return -EIO;
2090                         }
2091                 }
2092                 return 0;
2093         default:
2094                 /* No signal voltage switch required */
2095                 return 0;
2096         }
2097 }
2098 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2099
2100 static int sdhci_card_busy(struct mmc_host *mmc)
2101 {
2102         struct sdhci_host *host = mmc_priv(mmc);
2103         u32 present_state;
2104
2105         /* Check whether DAT[0] is 0 */
2106         present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2107
2108         return !(present_state & SDHCI_DATA_0_LVL_MASK);
2109 }
2110
2111 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2112 {
2113         struct sdhci_host *host = mmc_priv(mmc);
2114         unsigned long flags;
2115
2116         spin_lock_irqsave(&host->lock, flags);
2117         host->flags |= SDHCI_HS400_TUNING;
2118         spin_unlock_irqrestore(&host->lock, flags);
2119
2120         return 0;
2121 }
2122
2123 void sdhci_start_tuning(struct sdhci_host *host)
2124 {
2125         u16 ctrl;
2126
2127         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2128         ctrl |= SDHCI_CTRL_EXEC_TUNING;
2129         if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2130                 ctrl |= SDHCI_CTRL_TUNED_CLK;
2131         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2132
2133         /*
2134          * As per the Host Controller spec v3.00, tuning command
2135          * generates Buffer Read Ready interrupt, so enable that.
2136          *
2137          * Note: The spec clearly says that when tuning sequence
2138          * is being performed, the controller does not generate
2139          * interrupts other than Buffer Read Ready interrupt. But
2140          * to make sure we don't hit a controller bug, we _only_
2141          * enable Buffer Read Ready interrupt here.
2142          */
2143         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2144         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2145 }
2146 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2147
2148 void sdhci_end_tuning(struct sdhci_host *host)
2149 {
2150         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2151         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2152 }
2153 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2154
2155 void sdhci_reset_tuning(struct sdhci_host *host)
2156 {
2157         u16 ctrl;
2158
2159         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2160         ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2161         ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2162         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2163 }
2164 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2165
2166 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2167 {
2168         sdhci_reset_tuning(host);
2169
2170         sdhci_do_reset(host, SDHCI_RESET_CMD);
2171         sdhci_do_reset(host, SDHCI_RESET_DATA);
2172
2173         sdhci_end_tuning(host);
2174
2175         mmc_abort_tuning(host->mmc, opcode);
2176 }
2177
2178 /*
2179  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2180  * tuning command does not have a data payload (or rather the hardware does it
2181  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2182  * interrupt setup is different to other commands and there is no timeout
2183  * interrupt so special handling is needed.
2184  */
2185 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2186 {
2187         struct mmc_host *mmc = host->mmc;
2188         struct mmc_command cmd = {};
2189         struct mmc_request mrq = {};
2190         unsigned long flags;
2191         u32 b = host->sdma_boundary;
2192
2193         spin_lock_irqsave(&host->lock, flags);
2194
2195         cmd.opcode = opcode;
2196         cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2197         cmd.mrq = &mrq;
2198
2199         mrq.cmd = &cmd;
2200         /*
2201          * In response to CMD19, the card sends 64 bytes of tuning
2202          * block to the Host Controller. So we set the block size
2203          * to 64 here.
2204          */
2205         if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2206             mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2207                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2208         else
2209                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2210
2211         /*
2212          * The tuning block is sent by the card to the host controller.
2213          * So we set the TRNS_READ bit in the Transfer Mode register.
2214          * This also takes care of setting DMA Enable and Multi Block
2215          * Select in the same register to 0.
2216          */
2217         sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2218
2219         sdhci_send_command(host, &cmd);
2220
2221         host->cmd = NULL;
2222
2223         sdhci_del_timer(host, &mrq);
2224
2225         host->tuning_done = 0;
2226
2227         mmiowb();
2228         spin_unlock_irqrestore(&host->lock, flags);
2229
2230         /* Wait for Buffer Read Ready interrupt */
2231         wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2232                            msecs_to_jiffies(50));
2233
2234 }
2235 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2236
2237 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2238 {
2239         int i;
2240
2241         /*
2242          * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2243          * of loops reaches 40 times.
2244          */
2245         for (i = 0; i < MAX_TUNING_LOOP; i++) {
2246                 u16 ctrl;
2247
2248                 sdhci_send_tuning(host, opcode);
2249
2250                 if (!host->tuning_done) {
2251                         pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2252                                  mmc_hostname(host->mmc));
2253                         sdhci_abort_tuning(host, opcode);
2254                         return;
2255                 }
2256
2257                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2258                 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2259                         if (ctrl & SDHCI_CTRL_TUNED_CLK)
2260                                 return; /* Success! */
2261                         break;
2262                 }
2263
2264                 /* Spec does not require a delay between tuning cycles */
2265                 if (host->tuning_delay > 0)
2266                         mdelay(host->tuning_delay);
2267         }
2268
2269         pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2270                 mmc_hostname(host->mmc));
2271         sdhci_reset_tuning(host);
2272 }
2273
2274 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2275 {
2276         struct sdhci_host *host = mmc_priv(mmc);
2277         int err = 0;
2278         unsigned int tuning_count = 0;
2279         bool hs400_tuning;
2280
2281         hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2282
2283         if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2284                 tuning_count = host->tuning_count;
2285
2286         /*
2287          * The Host Controller needs tuning in case of SDR104 and DDR50
2288          * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2289          * the Capabilities register.
2290          * If the Host Controller supports the HS200 mode then the
2291          * tuning function has to be executed.
2292          */
2293         switch (host->timing) {
2294         /* HS400 tuning is done in HS200 mode */
2295         case MMC_TIMING_MMC_HS400:
2296                 err = -EINVAL;
2297                 goto out;
2298
2299         case MMC_TIMING_MMC_HS200:
2300                 /*
2301                  * Periodic re-tuning for HS400 is not expected to be needed, so
2302                  * disable it here.
2303                  */
2304                 if (hs400_tuning)
2305                         tuning_count = 0;
2306                 break;
2307
2308         case MMC_TIMING_UHS_SDR104:
2309         case MMC_TIMING_UHS_DDR50:
2310                 break;
2311
2312         case MMC_TIMING_UHS_SDR50:
2313                 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2314                         break;
2315                 /* FALLTHROUGH */
2316
2317         default:
2318                 goto out;
2319         }
2320
2321         if (host->ops->platform_execute_tuning) {
2322                 err = host->ops->platform_execute_tuning(host, opcode);
2323                 goto out;
2324         }
2325
2326         host->mmc->retune_period = tuning_count;
2327
2328         if (host->tuning_delay < 0)
2329                 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2330
2331         sdhci_start_tuning(host);
2332
2333         __sdhci_execute_tuning(host, opcode);
2334
2335         sdhci_end_tuning(host);
2336 out:
2337         host->flags &= ~SDHCI_HS400_TUNING;
2338
2339         return err;
2340 }
2341 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2342
2343 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2344 {
2345         /* Host Controller v3.00 defines preset value registers */
2346         if (host->version < SDHCI_SPEC_300)
2347                 return;
2348
2349         /*
2350          * We only enable or disable Preset Value if they are not already
2351          * enabled or disabled respectively. Otherwise, we bail out.
2352          */
2353         if (host->preset_enabled != enable) {
2354                 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2355
2356                 if (enable)
2357                         ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2358                 else
2359                         ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2360
2361                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2362
2363                 if (enable)
2364                         host->flags |= SDHCI_PV_ENABLED;
2365                 else
2366                         host->flags &= ~SDHCI_PV_ENABLED;
2367
2368                 host->preset_enabled = enable;
2369         }
2370 }
2371
2372 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2373                                 int err)
2374 {
2375         struct sdhci_host *host = mmc_priv(mmc);
2376         struct mmc_data *data = mrq->data;
2377
2378         if (data->host_cookie != COOKIE_UNMAPPED)
2379                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2380                              mmc_get_dma_dir(data));
2381
2382         data->host_cookie = COOKIE_UNMAPPED;
2383 }
2384
2385 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2386 {
2387         struct sdhci_host *host = mmc_priv(mmc);
2388
2389         mrq->data->host_cookie = COOKIE_UNMAPPED;
2390
2391         /*
2392          * No pre-mapping in the pre hook if we're using the bounce buffer,
2393          * for that we would need two bounce buffers since one buffer is
2394          * in flight when this is getting called.
2395          */
2396         if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2397                 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2398 }
2399
2400 static inline bool sdhci_has_requests(struct sdhci_host *host)
2401 {
2402         return host->cmd || host->data_cmd;
2403 }
2404
2405 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2406 {
2407         if (host->data_cmd) {
2408                 host->data_cmd->error = err;
2409                 sdhci_finish_mrq(host, host->data_cmd->mrq);
2410         }
2411
2412         if (host->cmd) {
2413                 host->cmd->error = err;
2414                 sdhci_finish_mrq(host, host->cmd->mrq);
2415         }
2416 }
2417
2418 static void sdhci_card_event(struct mmc_host *mmc)
2419 {
2420         struct sdhci_host *host = mmc_priv(mmc);
2421         unsigned long flags;
2422         int present;
2423
2424         /* First check if client has provided their own card event */
2425         if (host->ops->card_event)
2426                 host->ops->card_event(host);
2427
2428         present = mmc->ops->get_cd(mmc);
2429
2430         spin_lock_irqsave(&host->lock, flags);
2431
2432         /* Check sdhci_has_requests() first in case we are runtime suspended */
2433         if (sdhci_has_requests(host) && !present) {
2434                 pr_err("%s: Card removed during transfer!\n",
2435                         mmc_hostname(host->mmc));
2436                 pr_err("%s: Resetting controller.\n",
2437                         mmc_hostname(host->mmc));
2438
2439                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2440                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2441
2442                 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2443         }
2444
2445         spin_unlock_irqrestore(&host->lock, flags);
2446 }
2447
2448 static const struct mmc_host_ops sdhci_ops = {
2449         .request        = sdhci_request,
2450         .post_req       = sdhci_post_req,
2451         .pre_req        = sdhci_pre_req,
2452         .set_ios        = sdhci_set_ios,
2453         .get_cd         = sdhci_get_cd,
2454         .get_ro         = sdhci_get_ro,
2455         .hw_reset       = sdhci_hw_reset,
2456         .enable_sdio_irq = sdhci_enable_sdio_irq,
2457         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
2458         .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
2459         .execute_tuning                 = sdhci_execute_tuning,
2460         .card_event                     = sdhci_card_event,
2461         .card_busy      = sdhci_card_busy,
2462 };
2463
2464 /*****************************************************************************\
2465  *                                                                           *
2466  * Tasklets                                                                  *
2467  *                                                                           *
2468 \*****************************************************************************/
2469
2470 static bool sdhci_request_done(struct sdhci_host *host)
2471 {
2472         unsigned long flags;
2473         struct mmc_request *mrq;
2474         int i;
2475
2476         spin_lock_irqsave(&host->lock, flags);
2477
2478         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2479                 mrq = host->mrqs_done[i];
2480                 if (mrq)
2481                         break;
2482         }
2483
2484         if (!mrq) {
2485                 spin_unlock_irqrestore(&host->lock, flags);
2486                 return true;
2487         }
2488
2489         sdhci_del_timer(host, mrq);
2490
2491         /*
2492          * Always unmap the data buffers if they were mapped by
2493          * sdhci_prepare_data() whenever we finish with a request.
2494          * This avoids leaking DMA mappings on error.
2495          */
2496         if (host->flags & SDHCI_REQ_USE_DMA) {
2497                 struct mmc_data *data = mrq->data;
2498
2499                 if (data && data->host_cookie == COOKIE_MAPPED) {
2500                         if (host->bounce_buffer) {
2501                                 /*
2502                                  * On reads, copy the bounced data into the
2503                                  * sglist
2504                                  */
2505                                 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2506                                         unsigned int length = data->bytes_xfered;
2507
2508                                         if (length > host->bounce_buffer_size) {
2509                                                 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2510                                                        mmc_hostname(host->mmc),
2511                                                        host->bounce_buffer_size,
2512                                                        data->bytes_xfered);
2513                                                 /* Cap it down and continue */
2514                                                 length = host->bounce_buffer_size;
2515                                         }
2516                                         dma_sync_single_for_cpu(
2517                                                 host->mmc->parent,
2518                                                 host->bounce_addr,
2519                                                 host->bounce_buffer_size,
2520                                                 DMA_FROM_DEVICE);
2521                                         sg_copy_from_buffer(data->sg,
2522                                                 data->sg_len,
2523                                                 host->bounce_buffer,
2524                                                 length);
2525                                 } else {
2526                                         /* No copying, just switch ownership */
2527                                         dma_sync_single_for_cpu(
2528                                                 host->mmc->parent,
2529                                                 host->bounce_addr,
2530                                                 host->bounce_buffer_size,
2531                                                 mmc_get_dma_dir(data));
2532                                 }
2533                         } else {
2534                                 /* Unmap the raw data */
2535                                 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2536                                              data->sg_len,
2537                                              mmc_get_dma_dir(data));
2538                         }
2539                         data->host_cookie = COOKIE_UNMAPPED;
2540                 }
2541         }
2542
2543         /*
2544          * The controller needs a reset of internal state machines
2545          * upon error conditions.
2546          */
2547         if (sdhci_needs_reset(host, mrq)) {
2548                 /*
2549                  * Do not finish until command and data lines are available for
2550                  * reset. Note there can only be one other mrq, so it cannot
2551                  * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2552                  * would both be null.
2553                  */
2554                 if (host->cmd || host->data_cmd) {
2555                         spin_unlock_irqrestore(&host->lock, flags);
2556                         return true;
2557                 }
2558
2559                 /* Some controllers need this kick or reset won't work here */
2560                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2561                         /* This is to force an update */
2562                         host->ops->set_clock(host, host->clock);
2563
2564                 /* Spec says we should do both at the same time, but Ricoh
2565                    controllers do not like that. */
2566                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2567                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2568
2569                 host->pending_reset = false;
2570         }
2571
2572         if (!sdhci_has_requests(host))
2573                 sdhci_led_deactivate(host);
2574
2575         host->mrqs_done[i] = NULL;
2576
2577         mmiowb();
2578         spin_unlock_irqrestore(&host->lock, flags);
2579
2580         mmc_request_done(host->mmc, mrq);
2581
2582         return false;
2583 }
2584
2585 static void sdhci_tasklet_finish(unsigned long param)
2586 {
2587         struct sdhci_host *host = (struct sdhci_host *)param;
2588
2589         while (!sdhci_request_done(host))
2590                 ;
2591 }
2592
2593 static void sdhci_timeout_timer(struct timer_list *t)
2594 {
2595         struct sdhci_host *host;
2596         unsigned long flags;
2597
2598         host = from_timer(host, t, timer);
2599
2600         spin_lock_irqsave(&host->lock, flags);
2601
2602         if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2603                 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2604                        mmc_hostname(host->mmc));
2605                 sdhci_dumpregs(host);
2606
2607                 host->cmd->error = -ETIMEDOUT;
2608                 sdhci_finish_mrq(host, host->cmd->mrq);
2609         }
2610
2611         mmiowb();
2612         spin_unlock_irqrestore(&host->lock, flags);
2613 }
2614
2615 static void sdhci_timeout_data_timer(struct timer_list *t)
2616 {
2617         struct sdhci_host *host;
2618         unsigned long flags;
2619
2620         host = from_timer(host, t, data_timer);
2621
2622         spin_lock_irqsave(&host->lock, flags);
2623
2624         if (host->data || host->data_cmd ||
2625             (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2626                 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2627                        mmc_hostname(host->mmc));
2628                 sdhci_dumpregs(host);
2629
2630                 if (host->data) {
2631                         host->data->error = -ETIMEDOUT;
2632                         sdhci_finish_data(host);
2633                 } else if (host->data_cmd) {
2634                         host->data_cmd->error = -ETIMEDOUT;
2635                         sdhci_finish_mrq(host, host->data_cmd->mrq);
2636                 } else {
2637                         host->cmd->error = -ETIMEDOUT;
2638                         sdhci_finish_mrq(host, host->cmd->mrq);
2639                 }
2640         }
2641
2642         mmiowb();
2643         spin_unlock_irqrestore(&host->lock, flags);
2644 }
2645
2646 /*****************************************************************************\
2647  *                                                                           *
2648  * Interrupt handling                                                        *
2649  *                                                                           *
2650 \*****************************************************************************/
2651
2652 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2653 {
2654         /* Handle auto-CMD12 error */
2655         if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2656                 struct mmc_request *mrq = host->data_cmd->mrq;
2657                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2658                 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2659                                    SDHCI_INT_DATA_TIMEOUT :
2660                                    SDHCI_INT_DATA_CRC;
2661
2662                 /* Treat auto-CMD12 error the same as data error */
2663                 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2664                         *intmask_p |= data_err_bit;
2665                         return;
2666                 }
2667         }
2668
2669         if (!host->cmd) {
2670                 /*
2671                  * SDHCI recovers from errors by resetting the cmd and data
2672                  * circuits.  Until that is done, there very well might be more
2673                  * interrupts, so ignore them in that case.
2674                  */
2675                 if (host->pending_reset)
2676                         return;
2677                 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2678                        mmc_hostname(host->mmc), (unsigned)intmask);
2679                 sdhci_dumpregs(host);
2680                 return;
2681         }
2682
2683         if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2684                        SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2685                 if (intmask & SDHCI_INT_TIMEOUT)
2686                         host->cmd->error = -ETIMEDOUT;
2687                 else
2688                         host->cmd->error = -EILSEQ;
2689
2690                 /* Treat data command CRC error the same as data CRC error */
2691                 if (host->cmd->data &&
2692                     (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2693                      SDHCI_INT_CRC) {
2694                         host->cmd = NULL;
2695                         *intmask_p |= SDHCI_INT_DATA_CRC;
2696                         return;
2697                 }
2698
2699                 sdhci_finish_mrq(host, host->cmd->mrq);
2700                 return;
2701         }
2702
2703         /* Handle auto-CMD23 error */
2704         if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2705                 struct mmc_request *mrq = host->cmd->mrq;
2706                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2707                 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2708                           -ETIMEDOUT :
2709                           -EILSEQ;
2710
2711                 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2712                         mrq->sbc->error = err;
2713                         sdhci_finish_mrq(host, mrq);
2714                         return;
2715                 }
2716         }
2717
2718         if (intmask & SDHCI_INT_RESPONSE)
2719                 sdhci_finish_command(host);
2720 }
2721
2722 static void sdhci_adma_show_error(struct sdhci_host *host)
2723 {
2724         void *desc = host->adma_table;
2725         dma_addr_t dma = host->adma_addr;
2726
2727         sdhci_dumpregs(host);
2728
2729         while (true) {
2730                 struct sdhci_adma2_64_desc *dma_desc = desc;
2731
2732                 if (host->flags & SDHCI_USE_64_BIT_DMA)
2733                         SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2734                             (unsigned long long)dma,
2735                             le32_to_cpu(dma_desc->addr_hi),
2736                             le32_to_cpu(dma_desc->addr_lo),
2737                             le16_to_cpu(dma_desc->len),
2738                             le16_to_cpu(dma_desc->cmd));
2739                 else
2740                         SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2741                             (unsigned long long)dma,
2742                             le32_to_cpu(dma_desc->addr_lo),
2743                             le16_to_cpu(dma_desc->len),
2744                             le16_to_cpu(dma_desc->cmd));
2745
2746                 desc += host->desc_sz;
2747                 dma += host->desc_sz;
2748
2749                 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2750                         break;
2751         }
2752 }
2753
2754 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2755 {
2756         u32 command;
2757
2758         /* CMD19 generates _only_ Buffer Read Ready interrupt */
2759         if (intmask & SDHCI_INT_DATA_AVAIL) {
2760                 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2761                 if (command == MMC_SEND_TUNING_BLOCK ||
2762                     command == MMC_SEND_TUNING_BLOCK_HS200) {
2763                         host->tuning_done = 1;
2764                         wake_up(&host->buf_ready_int);
2765                         return;
2766                 }
2767         }
2768
2769         if (!host->data) {
2770                 struct mmc_command *data_cmd = host->data_cmd;
2771
2772                 /*
2773                  * The "data complete" interrupt is also used to
2774                  * indicate that a busy state has ended. See comment
2775                  * above in sdhci_cmd_irq().
2776                  */
2777                 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2778                         if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2779                                 host->data_cmd = NULL;
2780                                 data_cmd->error = -ETIMEDOUT;
2781                                 sdhci_finish_mrq(host, data_cmd->mrq);
2782                                 return;
2783                         }
2784                         if (intmask & SDHCI_INT_DATA_END) {
2785                                 host->data_cmd = NULL;
2786                                 /*
2787                                  * Some cards handle busy-end interrupt
2788                                  * before the command completed, so make
2789                                  * sure we do things in the proper order.
2790                                  */
2791                                 if (host->cmd == data_cmd)
2792                                         return;
2793
2794                                 sdhci_finish_mrq(host, data_cmd->mrq);
2795                                 return;
2796                         }
2797                 }
2798
2799                 /*
2800                  * SDHCI recovers from errors by resetting the cmd and data
2801                  * circuits. Until that is done, there very well might be more
2802                  * interrupts, so ignore them in that case.
2803                  */
2804                 if (host->pending_reset)
2805                         return;
2806
2807                 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2808                        mmc_hostname(host->mmc), (unsigned)intmask);
2809                 sdhci_dumpregs(host);
2810
2811                 return;
2812         }
2813
2814         if (intmask & SDHCI_INT_DATA_TIMEOUT)
2815                 host->data->error = -ETIMEDOUT;
2816         else if (intmask & SDHCI_INT_DATA_END_BIT)
2817                 host->data->error = -EILSEQ;
2818         else if ((intmask & SDHCI_INT_DATA_CRC) &&
2819                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2820                         != MMC_BUS_TEST_R)
2821                 host->data->error = -EILSEQ;
2822         else if (intmask & SDHCI_INT_ADMA_ERROR) {
2823                 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2824                        intmask);
2825                 sdhci_adma_show_error(host);
2826                 host->data->error = -EIO;
2827                 if (host->ops->adma_workaround)
2828                         host->ops->adma_workaround(host, intmask);
2829         }
2830
2831         if (host->data->error)
2832                 sdhci_finish_data(host);
2833         else {
2834                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2835                         sdhci_transfer_pio(host);
2836
2837                 /*
2838                  * We currently don't do anything fancy with DMA
2839                  * boundaries, but as we can't disable the feature
2840                  * we need to at least restart the transfer.
2841                  *
2842                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2843                  * should return a valid address to continue from, but as
2844                  * some controllers are faulty, don't trust them.
2845                  */
2846                 if (intmask & SDHCI_INT_DMA_END) {
2847                         u32 dmastart, dmanow;
2848
2849                         dmastart = sdhci_sdma_address(host);
2850                         dmanow = dmastart + host->data->bytes_xfered;
2851                         /*
2852                          * Force update to the next DMA block boundary.
2853                          */
2854                         dmanow = (dmanow &
2855                                 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2856                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
2857                         host->data->bytes_xfered = dmanow - dmastart;
2858                         DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2859                             dmastart, host->data->bytes_xfered, dmanow);
2860                         sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2861                 }
2862
2863                 if (intmask & SDHCI_INT_DATA_END) {
2864                         if (host->cmd == host->data_cmd) {
2865                                 /*
2866                                  * Data managed to finish before the
2867                                  * command completed. Make sure we do
2868                                  * things in the proper order.
2869                                  */
2870                                 host->data_early = 1;
2871                         } else {
2872                                 sdhci_finish_data(host);
2873                         }
2874                 }
2875         }
2876 }
2877
2878 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2879 {
2880         irqreturn_t result = IRQ_NONE;
2881         struct sdhci_host *host = dev_id;
2882         u32 intmask, mask, unexpected = 0;
2883         int max_loops = 16;
2884
2885         spin_lock(&host->lock);
2886
2887         if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2888                 spin_unlock(&host->lock);
2889                 return IRQ_NONE;
2890         }
2891
2892         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2893         if (!intmask || intmask == 0xffffffff) {
2894                 result = IRQ_NONE;
2895                 goto out;
2896         }
2897
2898         do {
2899                 DBG("IRQ status 0x%08x\n", intmask);
2900
2901                 if (host->ops->irq) {
2902                         intmask = host->ops->irq(host, intmask);
2903                         if (!intmask)
2904                                 goto cont;
2905                 }
2906
2907                 /* Clear selected interrupts. */
2908                 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2909                                   SDHCI_INT_BUS_POWER);
2910                 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2911
2912                 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2913                         u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2914                                       SDHCI_CARD_PRESENT;
2915
2916                         /*
2917                          * There is a observation on i.mx esdhc.  INSERT
2918                          * bit will be immediately set again when it gets
2919                          * cleared, if a card is inserted.  We have to mask
2920                          * the irq to prevent interrupt storm which will
2921                          * freeze the system.  And the REMOVE gets the
2922                          * same situation.
2923                          *
2924                          * More testing are needed here to ensure it works
2925                          * for other platforms though.
2926                          */
2927                         host->ier &= ~(SDHCI_INT_CARD_INSERT |
2928                                        SDHCI_INT_CARD_REMOVE);
2929                         host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2930                                                SDHCI_INT_CARD_INSERT;
2931                         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2932                         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2933
2934                         sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2935                                      SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2936
2937                         host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2938                                                        SDHCI_INT_CARD_REMOVE);
2939                         result = IRQ_WAKE_THREAD;
2940                 }
2941
2942                 if (intmask & SDHCI_INT_CMD_MASK)
2943                         sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
2944
2945                 if (intmask & SDHCI_INT_DATA_MASK)
2946                         sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2947
2948                 if (intmask & SDHCI_INT_BUS_POWER)
2949                         pr_err("%s: Card is consuming too much power!\n",
2950                                 mmc_hostname(host->mmc));
2951
2952                 if (intmask & SDHCI_INT_RETUNE)
2953                         mmc_retune_needed(host->mmc);
2954
2955                 if ((intmask & SDHCI_INT_CARD_INT) &&
2956                     (host->ier & SDHCI_INT_CARD_INT)) {
2957                         sdhci_enable_sdio_irq_nolock(host, false);
2958                         host->thread_isr |= SDHCI_INT_CARD_INT;
2959                         result = IRQ_WAKE_THREAD;
2960                 }
2961
2962                 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2963                              SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2964                              SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2965                              SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2966
2967                 if (intmask) {
2968                         unexpected |= intmask;
2969                         sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2970                 }
2971 cont:
2972                 if (result == IRQ_NONE)
2973                         result = IRQ_HANDLED;
2974
2975                 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2976         } while (intmask && --max_loops);
2977 out:
2978         spin_unlock(&host->lock);
2979
2980         if (unexpected) {
2981                 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2982                            mmc_hostname(host->mmc), unexpected);
2983                 sdhci_dumpregs(host);
2984         }
2985
2986         return result;
2987 }
2988
2989 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2990 {
2991         struct sdhci_host *host = dev_id;
2992         unsigned long flags;
2993         u32 isr;
2994
2995         spin_lock_irqsave(&host->lock, flags);
2996         isr = host->thread_isr;
2997         host->thread_isr = 0;
2998         spin_unlock_irqrestore(&host->lock, flags);
2999
3000         if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3001                 struct mmc_host *mmc = host->mmc;
3002
3003                 mmc->ops->card_event(mmc);
3004                 mmc_detect_change(mmc, msecs_to_jiffies(200));
3005         }
3006
3007         if (isr & SDHCI_INT_CARD_INT) {
3008                 sdio_run_irqs(host->mmc);
3009
3010                 spin_lock_irqsave(&host->lock, flags);
3011                 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3012                         sdhci_enable_sdio_irq_nolock(host, true);
3013                 spin_unlock_irqrestore(&host->lock, flags);
3014         }
3015
3016         return isr ? IRQ_HANDLED : IRQ_NONE;
3017 }
3018
3019 /*****************************************************************************\
3020  *                                                                           *
3021  * Suspend/resume                                                            *
3022  *                                                                           *
3023 \*****************************************************************************/
3024
3025 #ifdef CONFIG_PM
3026
3027 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3028 {
3029         return mmc_card_is_removable(host->mmc) &&
3030                !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3031                !mmc_can_gpio_cd(host->mmc);
3032 }
3033
3034 /*
3035  * To enable wakeup events, the corresponding events have to be enabled in
3036  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3037  * Table' in the SD Host Controller Standard Specification.
3038  * It is useless to restore SDHCI_INT_ENABLE state in
3039  * sdhci_disable_irq_wakeups() since it will be set by
3040  * sdhci_enable_card_detection() or sdhci_init().
3041  */
3042 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3043 {
3044         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3045                   SDHCI_WAKE_ON_INT;
3046         u32 irq_val = 0;
3047         u8 wake_val = 0;
3048         u8 val;
3049
3050         if (sdhci_cd_irq_can_wakeup(host)) {
3051                 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3052                 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3053         }
3054
3055         if (mmc_card_wake_sdio_irq(host->mmc)) {
3056                 wake_val |= SDHCI_WAKE_ON_INT;
3057                 irq_val |= SDHCI_INT_CARD_INT;
3058         }
3059
3060         if (!irq_val)
3061                 return false;
3062
3063         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3064         val &= ~mask;
3065         val |= wake_val;
3066         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3067
3068         sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3069
3070         host->irq_wake_enabled = !enable_irq_wake(host->irq);
3071
3072         return host->irq_wake_enabled;
3073 }
3074
3075 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3076 {
3077         u8 val;
3078         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3079                         | SDHCI_WAKE_ON_INT;
3080
3081         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3082         val &= ~mask;
3083         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3084
3085         disable_irq_wake(host->irq);
3086
3087         host->irq_wake_enabled = false;
3088 }
3089
3090 int sdhci_suspend_host(struct sdhci_host *host)
3091 {
3092         sdhci_disable_card_detection(host);
3093
3094         mmc_retune_timer_stop(host->mmc);
3095
3096         if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3097             !sdhci_enable_irq_wakeups(host)) {
3098                 host->ier = 0;
3099                 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3100                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3101                 free_irq(host->irq, host);
3102         }
3103
3104         return 0;
3105 }
3106
3107 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3108
3109 int sdhci_resume_host(struct sdhci_host *host)
3110 {
3111         struct mmc_host *mmc = host->mmc;
3112         int ret = 0;
3113
3114         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3115                 if (host->ops->enable_dma)
3116                         host->ops->enable_dma(host);
3117         }
3118
3119         if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3120             (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3121                 /* Card keeps power but host controller does not */
3122                 sdhci_init(host, 0);
3123                 host->pwr = 0;
3124                 host->clock = 0;
3125                 mmc->ops->set_ios(mmc, &mmc->ios);
3126         } else {
3127                 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3128                 mmiowb();
3129         }
3130
3131         if (host->irq_wake_enabled) {
3132                 sdhci_disable_irq_wakeups(host);
3133         } else {
3134                 ret = request_threaded_irq(host->irq, sdhci_irq,
3135                                            sdhci_thread_irq, IRQF_SHARED,
3136                                            mmc_hostname(host->mmc), host);
3137                 if (ret)
3138                         return ret;
3139         }
3140
3141         sdhci_enable_card_detection(host);
3142
3143         return ret;
3144 }
3145
3146 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3147
3148 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3149 {
3150         unsigned long flags;
3151
3152         mmc_retune_timer_stop(host->mmc);
3153
3154         spin_lock_irqsave(&host->lock, flags);
3155         host->ier &= SDHCI_INT_CARD_INT;
3156         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3157         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3158         spin_unlock_irqrestore(&host->lock, flags);
3159
3160         synchronize_hardirq(host->irq);
3161
3162         spin_lock_irqsave(&host->lock, flags);
3163         host->runtime_suspended = true;
3164         spin_unlock_irqrestore(&host->lock, flags);
3165
3166         return 0;
3167 }
3168 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3169
3170 int sdhci_runtime_resume_host(struct sdhci_host *host)
3171 {
3172         struct mmc_host *mmc = host->mmc;
3173         unsigned long flags;
3174         int host_flags = host->flags;
3175
3176         if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3177                 if (host->ops->enable_dma)
3178                         host->ops->enable_dma(host);
3179         }
3180
3181         sdhci_init(host, 0);
3182
3183         if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3184             mmc->ios.power_mode != MMC_POWER_OFF) {
3185                 /* Force clock and power re-program */
3186                 host->pwr = 0;
3187                 host->clock = 0;
3188                 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3189                 mmc->ops->set_ios(mmc, &mmc->ios);
3190
3191                 if ((host_flags & SDHCI_PV_ENABLED) &&
3192                     !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3193                         spin_lock_irqsave(&host->lock, flags);
3194                         sdhci_enable_preset_value(host, true);
3195                         spin_unlock_irqrestore(&host->lock, flags);
3196                 }
3197
3198                 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3199                     mmc->ops->hs400_enhanced_strobe)
3200                         mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3201         }
3202
3203         spin_lock_irqsave(&host->lock, flags);
3204
3205         host->runtime_suspended = false;
3206
3207         /* Enable SDIO IRQ */
3208         if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3209                 sdhci_enable_sdio_irq_nolock(host, true);
3210
3211         /* Enable Card Detection */
3212         sdhci_enable_card_detection(host);
3213
3214         spin_unlock_irqrestore(&host->lock, flags);
3215
3216         return 0;
3217 }
3218 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3219
3220 #endif /* CONFIG_PM */
3221
3222 /*****************************************************************************\
3223  *                                                                           *
3224  * Command Queue Engine (CQE) helpers                                        *
3225  *                                                                           *
3226 \*****************************************************************************/
3227
3228 void sdhci_cqe_enable(struct mmc_host *mmc)
3229 {
3230         struct sdhci_host *host = mmc_priv(mmc);
3231         unsigned long flags;
3232         u8 ctrl;
3233
3234         spin_lock_irqsave(&host->lock, flags);
3235
3236         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3237         ctrl &= ~SDHCI_CTRL_DMA_MASK;
3238         if (host->flags & SDHCI_USE_64_BIT_DMA)
3239                 ctrl |= SDHCI_CTRL_ADMA64;
3240         else
3241                 ctrl |= SDHCI_CTRL_ADMA32;
3242         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3243
3244         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3245                      SDHCI_BLOCK_SIZE);
3246
3247         /* Set maximum timeout */
3248         sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3249
3250         host->ier = host->cqe_ier;
3251
3252         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3253         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3254
3255         host->cqe_on = true;
3256
3257         pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3258                  mmc_hostname(mmc), host->ier,
3259                  sdhci_readl(host, SDHCI_INT_STATUS));
3260
3261         mmiowb();
3262         spin_unlock_irqrestore(&host->lock, flags);
3263 }
3264 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3265
3266 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3267 {
3268         struct sdhci_host *host = mmc_priv(mmc);
3269         unsigned long flags;
3270
3271         spin_lock_irqsave(&host->lock, flags);
3272
3273         sdhci_set_default_irqs(host);
3274
3275         host->cqe_on = false;
3276
3277         if (recovery) {
3278                 sdhci_do_reset(host, SDHCI_RESET_CMD);
3279                 sdhci_do_reset(host, SDHCI_RESET_DATA);
3280         }
3281
3282         pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3283                  mmc_hostname(mmc), host->ier,
3284                  sdhci_readl(host, SDHCI_INT_STATUS));
3285
3286         mmiowb();
3287         spin_unlock_irqrestore(&host->lock, flags);
3288 }
3289 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3290
3291 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3292                    int *data_error)
3293 {
3294         u32 mask;
3295
3296         if (!host->cqe_on)
3297                 return false;
3298
3299         if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3300                 *cmd_error = -EILSEQ;
3301         else if (intmask & SDHCI_INT_TIMEOUT)
3302                 *cmd_error = -ETIMEDOUT;
3303         else
3304                 *cmd_error = 0;
3305
3306         if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3307                 *data_error = -EILSEQ;
3308         else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3309                 *data_error = -ETIMEDOUT;
3310         else if (intmask & SDHCI_INT_ADMA_ERROR)
3311                 *data_error = -EIO;
3312         else
3313                 *data_error = 0;
3314
3315         /* Clear selected interrupts. */
3316         mask = intmask & host->cqe_ier;
3317         sdhci_writel(host, mask, SDHCI_INT_STATUS);
3318
3319         if (intmask & SDHCI_INT_BUS_POWER)
3320                 pr_err("%s: Card is consuming too much power!\n",
3321                        mmc_hostname(host->mmc));
3322
3323         intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3324         if (intmask) {
3325                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3326                 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3327                        mmc_hostname(host->mmc), intmask);
3328                 sdhci_dumpregs(host);
3329         }
3330
3331         return true;
3332 }
3333 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3334
3335 /*****************************************************************************\
3336  *                                                                           *
3337  * Device allocation/registration                                            *
3338  *                                                                           *
3339 \*****************************************************************************/
3340
3341 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3342         size_t priv_size)
3343 {
3344         struct mmc_host *mmc;
3345         struct sdhci_host *host;
3346
3347         WARN_ON(dev == NULL);
3348
3349         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3350         if (!mmc)
3351                 return ERR_PTR(-ENOMEM);
3352
3353         host = mmc_priv(mmc);
3354         host->mmc = mmc;
3355         host->mmc_host_ops = sdhci_ops;
3356         mmc->ops = &host->mmc_host_ops;
3357
3358         host->flags = SDHCI_SIGNALING_330;
3359
3360         host->cqe_ier     = SDHCI_CQE_INT_MASK;
3361         host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3362
3363         host->tuning_delay = -1;
3364
3365         host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3366
3367         return host;
3368 }
3369
3370 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3371
3372 static int sdhci_set_dma_mask(struct sdhci_host *host)
3373 {
3374         struct mmc_host *mmc = host->mmc;
3375         struct device *dev = mmc_dev(mmc);
3376         int ret = -EINVAL;
3377
3378         if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3379                 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3380
3381         /* Try 64-bit mask if hardware is capable  of it */
3382         if (host->flags & SDHCI_USE_64_BIT_DMA) {
3383                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3384                 if (ret) {
3385                         pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3386                                 mmc_hostname(mmc));
3387                         host->flags &= ~SDHCI_USE_64_BIT_DMA;
3388                 }
3389         }
3390
3391         /* 32-bit mask as default & fallback */
3392         if (ret) {
3393                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3394                 if (ret)
3395                         pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3396                                 mmc_hostname(mmc));
3397         }
3398
3399         return ret;
3400 }
3401
3402 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3403 {
3404         u16 v;
3405         u64 dt_caps_mask = 0;
3406         u64 dt_caps = 0;
3407
3408         if (host->read_caps)
3409                 return;
3410
3411         host->read_caps = true;
3412
3413         if (debug_quirks)
3414                 host->quirks = debug_quirks;
3415
3416         if (debug_quirks2)
3417                 host->quirks2 = debug_quirks2;
3418
3419         sdhci_do_reset(host, SDHCI_RESET_ALL);
3420
3421         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3422                              "sdhci-caps-mask", &dt_caps_mask);
3423         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3424                              "sdhci-caps", &dt_caps);
3425
3426         v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3427         host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3428
3429         if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3430                 return;
3431
3432         if (caps) {
3433                 host->caps = *caps;
3434         } else {
3435                 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3436                 host->caps &= ~lower_32_bits(dt_caps_mask);
3437                 host->caps |= lower_32_bits(dt_caps);
3438         }
3439
3440         if (host->version < SDHCI_SPEC_300)
3441                 return;
3442
3443         if (caps1) {
3444                 host->caps1 = *caps1;
3445         } else {
3446                 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3447                 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3448                 host->caps1 |= upper_32_bits(dt_caps);
3449         }
3450 }
3451 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3452
3453 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3454 {
3455         struct mmc_host *mmc = host->mmc;
3456         unsigned int max_blocks;
3457         unsigned int bounce_size;
3458         int ret;
3459
3460         /*
3461          * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3462          * has diminishing returns, this is probably because SD/MMC
3463          * cards are usually optimized to handle this size of requests.
3464          */
3465         bounce_size = SZ_64K;
3466         /*
3467          * Adjust downwards to maximum request size if this is less
3468          * than our segment size, else hammer down the maximum
3469          * request size to the maximum buffer size.
3470          */
3471         if (mmc->max_req_size < bounce_size)
3472                 bounce_size = mmc->max_req_size;
3473         max_blocks = bounce_size / 512;
3474
3475         /*
3476          * When we just support one segment, we can get significant
3477          * speedups by the help of a bounce buffer to group scattered
3478          * reads/writes together.
3479          */
3480         host->bounce_buffer = devm_kmalloc(mmc->parent,
3481                                            bounce_size,
3482                                            GFP_KERNEL);
3483         if (!host->bounce_buffer) {
3484                 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3485                        mmc_hostname(mmc),
3486                        bounce_size);
3487                 /*
3488                  * Exiting with zero here makes sure we proceed with
3489                  * mmc->max_segs == 1.
3490                  */
3491                 return 0;
3492         }
3493
3494         host->bounce_addr = dma_map_single(mmc->parent,
3495                                            host->bounce_buffer,
3496                                            bounce_size,
3497                                            DMA_BIDIRECTIONAL);
3498         ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3499         if (ret)
3500                 /* Again fall back to max_segs == 1 */
3501                 return 0;
3502         host->bounce_buffer_size = bounce_size;
3503
3504         /* Lie about this since we're bouncing */
3505         mmc->max_segs = max_blocks;
3506         mmc->max_seg_size = bounce_size;
3507         mmc->max_req_size = bounce_size;
3508
3509         pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3510                 mmc_hostname(mmc), max_blocks, bounce_size);
3511
3512         return 0;
3513 }
3514
3515 int sdhci_setup_host(struct sdhci_host *host)
3516 {
3517         struct mmc_host *mmc;
3518         u32 max_current_caps;
3519         unsigned int ocr_avail;
3520         unsigned int override_timeout_clk;
3521         u32 max_clk;
3522         int ret;
3523
3524         WARN_ON(host == NULL);
3525         if (host == NULL)
3526                 return -EINVAL;
3527
3528         mmc = host->mmc;
3529
3530         /*
3531          * If there are external regulators, get them. Note this must be done
3532          * early before resetting the host and reading the capabilities so that
3533          * the host can take the appropriate action if regulators are not
3534          * available.
3535          */
3536         ret = mmc_regulator_get_supply(mmc);
3537         if (ret)
3538                 return ret;
3539
3540         DBG("Version:   0x%08x | Present:  0x%08x\n",
3541             sdhci_readw(host, SDHCI_HOST_VERSION),
3542             sdhci_readl(host, SDHCI_PRESENT_STATE));
3543         DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3544             sdhci_readl(host, SDHCI_CAPABILITIES),
3545             sdhci_readl(host, SDHCI_CAPABILITIES_1));
3546
3547         sdhci_read_caps(host);
3548
3549         override_timeout_clk = host->timeout_clk;
3550
3551         if (host->version > SDHCI_SPEC_300) {
3552                 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3553                        mmc_hostname(mmc), host->version);
3554         }
3555
3556         if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3557                 mmc->caps2 &= ~MMC_CAP2_CQE;
3558
3559         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3560                 host->flags |= SDHCI_USE_SDMA;
3561         else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3562                 DBG("Controller doesn't have SDMA capability\n");
3563         else
3564                 host->flags |= SDHCI_USE_SDMA;
3565
3566         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3567                 (host->flags & SDHCI_USE_SDMA)) {
3568                 DBG("Disabling DMA as it is marked broken\n");
3569                 host->flags &= ~SDHCI_USE_SDMA;
3570         }
3571
3572         if ((host->version >= SDHCI_SPEC_200) &&
3573                 (host->caps & SDHCI_CAN_DO_ADMA2))
3574                 host->flags |= SDHCI_USE_ADMA;
3575
3576         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3577                 (host->flags & SDHCI_USE_ADMA)) {
3578                 DBG("Disabling ADMA as it is marked broken\n");
3579                 host->flags &= ~SDHCI_USE_ADMA;
3580         }
3581
3582         /*
3583          * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3584          * and *must* do 64-bit DMA.  A driver has the opportunity to change
3585          * that during the first call to ->enable_dma().  Similarly
3586          * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3587          * implement.
3588          */
3589         if (host->caps & SDHCI_CAN_64BIT)
3590                 host->flags |= SDHCI_USE_64_BIT_DMA;
3591
3592         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3593                 ret = sdhci_set_dma_mask(host);
3594
3595                 if (!ret && host->ops->enable_dma)
3596                         ret = host->ops->enable_dma(host);
3597
3598                 if (ret) {
3599                         pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3600                                 mmc_hostname(mmc));
3601                         host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3602
3603                         ret = 0;
3604                 }
3605         }
3606
3607         /* SDMA does not support 64-bit DMA */
3608         if (host->flags & SDHCI_USE_64_BIT_DMA)
3609                 host->flags &= ~SDHCI_USE_SDMA;
3610
3611         if (host->flags & SDHCI_USE_ADMA) {
3612                 dma_addr_t dma;
3613                 void *buf;
3614
3615                 /*
3616                  * The DMA descriptor table size is calculated as the maximum
3617                  * number of segments times 2, to allow for an alignment
3618                  * descriptor for each segment, plus 1 for a nop end descriptor,
3619                  * all multipled by the descriptor size.
3620                  */
3621                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3622                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3623                                               SDHCI_ADMA2_64_DESC_SZ;
3624                         host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3625                 } else {
3626                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3627                                               SDHCI_ADMA2_32_DESC_SZ;
3628                         host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3629                 }
3630
3631                 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3632                 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3633                                          host->adma_table_sz, &dma, GFP_KERNEL);
3634                 if (!buf) {
3635                         pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3636                                 mmc_hostname(mmc));
3637                         host->flags &= ~SDHCI_USE_ADMA;
3638                 } else if ((dma + host->align_buffer_sz) &
3639                            (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3640                         pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3641                                 mmc_hostname(mmc));
3642                         host->flags &= ~SDHCI_USE_ADMA;
3643                         dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3644                                           host->adma_table_sz, buf, dma);
3645                 } else {
3646                         host->align_buffer = buf;
3647                         host->align_addr = dma;
3648
3649                         host->adma_table = buf + host->align_buffer_sz;
3650                         host->adma_addr = dma + host->align_buffer_sz;
3651                 }
3652         }
3653
3654         /*
3655          * If we use DMA, then it's up to the caller to set the DMA
3656          * mask, but PIO does not need the hw shim so we set a new
3657          * mask here in that case.
3658          */
3659         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3660                 host->dma_mask = DMA_BIT_MASK(64);
3661                 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3662         }
3663
3664         if (host->version >= SDHCI_SPEC_300)
3665                 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3666                         >> SDHCI_CLOCK_BASE_SHIFT;
3667         else
3668                 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3669                         >> SDHCI_CLOCK_BASE_SHIFT;
3670
3671         host->max_clk *= 1000000;
3672         if (host->max_clk == 0 || host->quirks &
3673                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3674                 if (!host->ops->get_max_clock) {
3675                         pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3676                                mmc_hostname(mmc));
3677                         ret = -ENODEV;
3678                         goto undma;
3679                 }
3680                 host->max_clk = host->ops->get_max_clock(host);
3681         }
3682
3683         /*
3684          * In case of Host Controller v3.00, find out whether clock
3685          * multiplier is supported.
3686          */
3687         host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3688                         SDHCI_CLOCK_MUL_SHIFT;
3689
3690         /*
3691          * In case the value in Clock Multiplier is 0, then programmable
3692          * clock mode is not supported, otherwise the actual clock
3693          * multiplier is one more than the value of Clock Multiplier
3694          * in the Capabilities Register.
3695          */
3696         if (host->clk_mul)
3697                 host->clk_mul += 1;
3698
3699         /*
3700          * Set host parameters.
3701          */
3702         max_clk = host->max_clk;
3703
3704         if (host->ops->get_min_clock)
3705                 mmc->f_min = host->ops->get_min_clock(host);
3706         else if (host->version >= SDHCI_SPEC_300) {
3707                 if (host->clk_mul)
3708                         max_clk = host->max_clk * host->clk_mul;
3709                 /*
3710                  * Divided Clock Mode minimum clock rate is always less than
3711                  * Programmable Clock Mode minimum clock rate.
3712                  */
3713                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3714         } else
3715                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3716
3717         if (!mmc->f_max || mmc->f_max > max_clk)
3718                 mmc->f_max = max_clk;
3719
3720         if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3721                 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3722                                         SDHCI_TIMEOUT_CLK_SHIFT;
3723
3724                 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3725                         host->timeout_clk *= 1000;
3726
3727                 if (host->timeout_clk == 0) {
3728                         if (!host->ops->get_timeout_clock) {
3729                                 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3730                                         mmc_hostname(mmc));
3731                                 ret = -ENODEV;
3732                                 goto undma;
3733                         }
3734
3735                         host->timeout_clk =
3736                                 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3737                                              1000);
3738                 }
3739
3740                 if (override_timeout_clk)
3741                         host->timeout_clk = override_timeout_clk;
3742
3743                 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3744                         host->ops->get_max_timeout_count(host) : 1 << 27;
3745                 mmc->max_busy_timeout /= host->timeout_clk;
3746         }
3747
3748         if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3749             !host->ops->get_max_timeout_count)
3750                 mmc->max_busy_timeout = 0;
3751
3752         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3753         mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3754
3755         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3756                 host->flags |= SDHCI_AUTO_CMD12;
3757
3758         /* Auto-CMD23 stuff only works in ADMA or PIO. */
3759         if ((host->version >= SDHCI_SPEC_300) &&
3760             ((host->flags & SDHCI_USE_ADMA) ||
3761              !(host->flags & SDHCI_USE_SDMA)) &&
3762              !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3763                 host->flags |= SDHCI_AUTO_CMD23;
3764                 DBG("Auto-CMD23 available\n");
3765         } else {
3766                 DBG("Auto-CMD23 unavailable\n");
3767         }
3768
3769         /*
3770          * A controller may support 8-bit width, but the board itself
3771          * might not have the pins brought out.  Boards that support
3772          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3773          * their platform code before calling sdhci_add_host(), and we
3774          * won't assume 8-bit width for hosts without that CAP.
3775          */
3776         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3777                 mmc->caps |= MMC_CAP_4_BIT_DATA;
3778
3779         if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3780                 mmc->caps &= ~MMC_CAP_CMD23;
3781
3782         if (host->caps & SDHCI_CAN_DO_HISPD)
3783                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3784
3785         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3786             mmc_card_is_removable(mmc) &&
3787             mmc_gpio_get_cd(host->mmc) < 0)
3788                 mmc->caps |= MMC_CAP_NEEDS_POLL;
3789
3790         if (!IS_ERR(mmc->supply.vqmmc)) {
3791                 ret = regulator_enable(mmc->supply.vqmmc);
3792
3793                 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3794                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3795                                                     1950000))
3796                         host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3797                                          SDHCI_SUPPORT_SDR50 |
3798                                          SDHCI_SUPPORT_DDR50);
3799
3800                 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3801                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3802                                                     3600000))
3803                         host->flags &= ~SDHCI_SIGNALING_330;
3804
3805                 if (ret) {
3806                         pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3807                                 mmc_hostname(mmc), ret);
3808                         mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3809                 }
3810         }
3811
3812         if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3813                 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3814                                  SDHCI_SUPPORT_DDR50);
3815                 /*
3816                  * The SDHCI controller in a SoC might support HS200/HS400
3817                  * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3818                  * but if the board is modeled such that the IO lines are not
3819                  * connected to 1.8v then HS200/HS400 cannot be supported.
3820                  * Disable HS200/HS400 if the board does not have 1.8v connected
3821                  * to the IO lines. (Applicable for other modes in 1.8v)
3822                  */
3823                 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3824                 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3825         }
3826
3827         /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3828         if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3829                            SDHCI_SUPPORT_DDR50))
3830                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3831
3832         /* SDR104 supports also implies SDR50 support */
3833         if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3834                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3835                 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3836                  * field can be promoted to support HS200.
3837                  */
3838                 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3839                         mmc->caps2 |= MMC_CAP2_HS200;
3840         } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3841                 mmc->caps |= MMC_CAP_UHS_SDR50;
3842         }
3843
3844         if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3845             (host->caps1 & SDHCI_SUPPORT_HS400))
3846                 mmc->caps2 |= MMC_CAP2_HS400;
3847
3848         if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3849             (IS_ERR(mmc->supply.vqmmc) ||
3850              !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3851                                              1300000)))
3852                 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3853
3854         if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3855             !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3856                 mmc->caps |= MMC_CAP_UHS_DDR50;
3857
3858         /* Does the host need tuning for SDR50? */
3859         if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3860                 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3861
3862         /* Driver Type(s) (A, C, D) supported by the host */
3863         if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3864                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3865         if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3866                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3867         if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3868                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3869
3870         /* Initial value for re-tuning timer count */
3871         host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3872                              SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3873
3874         /*
3875          * In case Re-tuning Timer is not disabled, the actual value of
3876          * re-tuning timer will be 2 ^ (n - 1).
3877          */
3878         if (host->tuning_count)
3879                 host->tuning_count = 1 << (host->tuning_count - 1);
3880
3881         /* Re-tuning mode supported by the Host Controller */
3882         host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3883                              SDHCI_RETUNING_MODE_SHIFT;
3884
3885         ocr_avail = 0;
3886
3887         /*
3888          * According to SD Host Controller spec v3.00, if the Host System
3889          * can afford more than 150mA, Host Driver should set XPC to 1. Also
3890          * the value is meaningful only if Voltage Support in the Capabilities
3891          * register is set. The actual current value is 4 times the register
3892          * value.
3893          */
3894         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3895         if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3896                 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3897                 if (curr > 0) {
3898
3899                         /* convert to SDHCI_MAX_CURRENT format */
3900                         curr = curr/1000;  /* convert to mA */
3901                         curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3902
3903                         curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3904                         max_current_caps =
3905                                 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3906                                 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3907                                 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3908                 }
3909         }
3910
3911         if (host->caps & SDHCI_CAN_VDD_330) {
3912                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3913
3914                 mmc->max_current_330 = ((max_current_caps &
3915                                    SDHCI_MAX_CURRENT_330_MASK) >>
3916                                    SDHCI_MAX_CURRENT_330_SHIFT) *
3917                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3918         }
3919         if (host->caps & SDHCI_CAN_VDD_300) {
3920                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3921
3922                 mmc->max_current_300 = ((max_current_caps &
3923                                    SDHCI_MAX_CURRENT_300_MASK) >>
3924                                    SDHCI_MAX_CURRENT_300_SHIFT) *
3925                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3926         }
3927         if (host->caps & SDHCI_CAN_VDD_180) {
3928                 ocr_avail |= MMC_VDD_165_195;
3929
3930                 mmc->max_current_180 = ((max_current_caps &
3931                                    SDHCI_MAX_CURRENT_180_MASK) >>
3932                                    SDHCI_MAX_CURRENT_180_SHIFT) *
3933                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3934         }
3935
3936         /* If OCR set by host, use it instead. */
3937         if (host->ocr_mask)
3938                 ocr_avail = host->ocr_mask;
3939
3940         /* If OCR set by external regulators, give it highest prio. */
3941         if (mmc->ocr_avail)
3942                 ocr_avail = mmc->ocr_avail;
3943
3944         mmc->ocr_avail = ocr_avail;
3945         mmc->ocr_avail_sdio = ocr_avail;
3946         if (host->ocr_avail_sdio)
3947                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3948         mmc->ocr_avail_sd = ocr_avail;
3949         if (host->ocr_avail_sd)
3950                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3951         else /* normal SD controllers don't support 1.8V */
3952                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3953         mmc->ocr_avail_mmc = ocr_avail;
3954         if (host->ocr_avail_mmc)
3955                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3956
3957         if (mmc->ocr_avail == 0) {
3958                 pr_err("%s: Hardware doesn't report any support voltages.\n",
3959                        mmc_hostname(mmc));
3960                 ret = -ENODEV;
3961                 goto unreg;
3962         }
3963
3964         if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3965                           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3966                           MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3967             (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3968                 host->flags |= SDHCI_SIGNALING_180;
3969
3970         if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3971                 host->flags |= SDHCI_SIGNALING_120;
3972
3973         spin_lock_init(&host->lock);
3974
3975         /*
3976          * Maximum number of sectors in one transfer. Limited by SDMA boundary
3977          * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3978          * is less anyway.
3979          */
3980         mmc->max_req_size = 524288;
3981
3982         /*
3983          * Maximum number of segments. Depends on if the hardware
3984          * can do scatter/gather or not.
3985          */
3986         if (host->flags & SDHCI_USE_ADMA) {
3987                 mmc->max_segs = SDHCI_MAX_SEGS;
3988         } else if (host->flags & SDHCI_USE_SDMA) {
3989                 mmc->max_segs = 1;
3990                 if (swiotlb_max_segment()) {
3991                         unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3992                                                 IO_TLB_SEGSIZE;
3993                         mmc->max_req_size = min(mmc->max_req_size,
3994                                                 max_req_size);
3995                 }
3996         } else { /* PIO */
3997                 mmc->max_segs = SDHCI_MAX_SEGS;
3998         }
3999
4000         /*
4001          * Maximum segment size. Could be one segment with the maximum number
4002          * of bytes. When doing hardware scatter/gather, each entry cannot
4003          * be larger than 64 KiB though.
4004          */
4005         if (host->flags & SDHCI_USE_ADMA) {
4006                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4007                         mmc->max_seg_size = 65535;
4008                 else
4009                         mmc->max_seg_size = 65536;
4010         } else {
4011                 mmc->max_seg_size = mmc->max_req_size;
4012         }
4013
4014         /*
4015          * Maximum block size. This varies from controller to controller and
4016          * is specified in the capabilities register.
4017          */
4018         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4019                 mmc->max_blk_size = 2;
4020         } else {
4021                 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4022                                 SDHCI_MAX_BLOCK_SHIFT;
4023                 if (mmc->max_blk_size >= 3) {
4024                         pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4025                                 mmc_hostname(mmc));
4026                         mmc->max_blk_size = 0;
4027                 }
4028         }
4029
4030         mmc->max_blk_size = 512 << mmc->max_blk_size;
4031
4032         /*
4033          * Maximum block count.
4034          */
4035         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4036
4037         if (mmc->max_segs == 1) {
4038                 /* This may alter mmc->*_blk_* parameters */
4039                 ret = sdhci_allocate_bounce_buffer(host);
4040                 if (ret)
4041                         return ret;
4042         }
4043
4044         return 0;
4045
4046 unreg:
4047         if (!IS_ERR(mmc->supply.vqmmc))
4048                 regulator_disable(mmc->supply.vqmmc);
4049 undma:
4050         if (host->align_buffer)
4051                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4052                                   host->adma_table_sz, host->align_buffer,
4053                                   host->align_addr);
4054         host->adma_table = NULL;
4055         host->align_buffer = NULL;
4056
4057         return ret;
4058 }
4059 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4060
4061 void sdhci_cleanup_host(struct sdhci_host *host)
4062 {
4063         struct mmc_host *mmc = host->mmc;
4064
4065         if (!IS_ERR(mmc->supply.vqmmc))
4066                 regulator_disable(mmc->supply.vqmmc);
4067
4068         if (host->align_buffer)
4069                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4070                                   host->adma_table_sz, host->align_buffer,
4071                                   host->align_addr);
4072         host->adma_table = NULL;
4073         host->align_buffer = NULL;
4074 }
4075 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4076
4077 int __sdhci_add_host(struct sdhci_host *host)
4078 {
4079         struct mmc_host *mmc = host->mmc;
4080         int ret;
4081
4082         /*
4083          * Init tasklets.
4084          */
4085         tasklet_init(&host->finish_tasklet,
4086                 sdhci_tasklet_finish, (unsigned long)host);
4087
4088         timer_setup(&host->timer, sdhci_timeout_timer, 0);
4089         timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4090
4091         init_waitqueue_head(&host->buf_ready_int);
4092
4093         sdhci_init(host, 0);
4094
4095         ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4096                                    IRQF_SHARED, mmc_hostname(mmc), host);
4097         if (ret) {
4098                 pr_err("%s: Failed to request IRQ %d: %d\n",
4099                        mmc_hostname(mmc), host->irq, ret);
4100                 goto untasklet;
4101         }
4102
4103         ret = sdhci_led_register(host);
4104         if (ret) {
4105                 pr_err("%s: Failed to register LED device: %d\n",
4106                        mmc_hostname(mmc), ret);
4107                 goto unirq;
4108         }
4109
4110         mmiowb();
4111
4112         ret = mmc_add_host(mmc);
4113         if (ret)
4114                 goto unled;
4115
4116         pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4117                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4118                 (host->flags & SDHCI_USE_ADMA) ?
4119                 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4120                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4121
4122         sdhci_enable_card_detection(host);
4123
4124         return 0;
4125
4126 unled:
4127         sdhci_led_unregister(host);
4128 unirq:
4129         sdhci_do_reset(host, SDHCI_RESET_ALL);
4130         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4131         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4132         free_irq(host->irq, host);
4133 untasklet:
4134         tasklet_kill(&host->finish_tasklet);
4135
4136         return ret;
4137 }
4138 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4139
4140 int sdhci_add_host(struct sdhci_host *host)
4141 {
4142         int ret;
4143
4144         ret = sdhci_setup_host(host);
4145         if (ret)
4146                 return ret;
4147
4148         ret = __sdhci_add_host(host);
4149         if (ret)
4150                 goto cleanup;
4151
4152         return 0;
4153
4154 cleanup:
4155         sdhci_cleanup_host(host);
4156
4157         return ret;
4158 }
4159 EXPORT_SYMBOL_GPL(sdhci_add_host);
4160
4161 void sdhci_remove_host(struct sdhci_host *host, int dead)
4162 {
4163         struct mmc_host *mmc = host->mmc;
4164         unsigned long flags;
4165
4166         if (dead) {
4167                 spin_lock_irqsave(&host->lock, flags);
4168
4169                 host->flags |= SDHCI_DEVICE_DEAD;
4170
4171                 if (sdhci_has_requests(host)) {
4172                         pr_err("%s: Controller removed during "
4173                                 " transfer!\n", mmc_hostname(mmc));
4174                         sdhci_error_out_mrqs(host, -ENOMEDIUM);
4175                 }
4176
4177                 spin_unlock_irqrestore(&host->lock, flags);
4178         }
4179
4180         sdhci_disable_card_detection(host);
4181
4182         mmc_remove_host(mmc);
4183
4184         sdhci_led_unregister(host);
4185
4186         if (!dead)
4187                 sdhci_do_reset(host, SDHCI_RESET_ALL);
4188
4189         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4190         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4191         free_irq(host->irq, host);
4192
4193         del_timer_sync(&host->timer);
4194         del_timer_sync(&host->data_timer);
4195
4196         tasklet_kill(&host->finish_tasklet);
4197
4198         if (!IS_ERR(mmc->supply.vqmmc))
4199                 regulator_disable(mmc->supply.vqmmc);
4200
4201         if (host->align_buffer)
4202                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4203                                   host->adma_table_sz, host->align_buffer,
4204                                   host->align_addr);
4205
4206         host->adma_table = NULL;
4207         host->align_buffer = NULL;
4208 }
4209
4210 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4211
4212 void sdhci_free_host(struct sdhci_host *host)
4213 {
4214         mmc_free_host(host->mmc);
4215 }
4216
4217 EXPORT_SYMBOL_GPL(sdhci_free_host);
4218
4219 /*****************************************************************************\
4220  *                                                                           *
4221  * Driver init/exit                                                          *
4222  *                                                                           *
4223 \*****************************************************************************/
4224
4225 static int __init sdhci_drv_init(void)
4226 {
4227         pr_info(DRIVER_NAME
4228                 ": Secure Digital Host Controller Interface driver\n");
4229         pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4230
4231         return 0;
4232 }
4233
4234 static void __exit sdhci_drv_exit(void)
4235 {
4236 }
4237
4238 module_init(sdhci_drv_init);
4239 module_exit(sdhci_drv_exit);
4240
4241 module_param(debug_quirks, uint, 0444);
4242 module_param(debug_quirks2, uint, 0444);
4243
4244 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4245 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4246 MODULE_LICENSE("GPL");
4247
4248 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4249 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");