GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / spi / spi-tegra210-quad.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2020 NVIDIA CORPORATION.
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21 #include <linux/reset.h>
22 #include <linux/spi/spi.h>
23 #include <linux/acpi.h>
24 #include <linux/property.h>
25
26 #define QSPI_COMMAND1                           0x000
27 #define QSPI_BIT_LENGTH(x)                      (((x) & 0x1f) << 0)
28 #define QSPI_PACKED                             BIT(5)
29 #define QSPI_INTERFACE_WIDTH_MASK               (0x03 << 7)
30 #define QSPI_INTERFACE_WIDTH(x)                 (((x) & 0x03) << 7)
31 #define QSPI_INTERFACE_WIDTH_SINGLE             QSPI_INTERFACE_WIDTH(0)
32 #define QSPI_INTERFACE_WIDTH_DUAL               QSPI_INTERFACE_WIDTH(1)
33 #define QSPI_INTERFACE_WIDTH_QUAD               QSPI_INTERFACE_WIDTH(2)
34 #define QSPI_SDR_DDR_SEL                        BIT(9)
35 #define QSPI_TX_EN                              BIT(11)
36 #define QSPI_RX_EN                              BIT(12)
37 #define QSPI_CS_SW_VAL                          BIT(20)
38 #define QSPI_CS_SW_HW                           BIT(21)
39
40 #define QSPI_CS_POL_INACTIVE(n)                 (1 << (22 + (n)))
41 #define QSPI_CS_POL_INACTIVE_MASK               (0xF << 22)
42 #define QSPI_CS_SEL_0                           (0 << 26)
43 #define QSPI_CS_SEL_1                           (1 << 26)
44 #define QSPI_CS_SEL_2                           (2 << 26)
45 #define QSPI_CS_SEL_3                           (3 << 26)
46 #define QSPI_CS_SEL_MASK                        (3 << 26)
47 #define QSPI_CS_SEL(x)                          (((x) & 0x3) << 26)
48
49 #define QSPI_CONTROL_MODE_0                     (0 << 28)
50 #define QSPI_CONTROL_MODE_3                     (3 << 28)
51 #define QSPI_CONTROL_MODE_MASK                  (3 << 28)
52 #define QSPI_M_S                                BIT(30)
53 #define QSPI_PIO                                BIT(31)
54
55 #define QSPI_COMMAND2                           0x004
56 #define QSPI_TX_TAP_DELAY(x)                    (((x) & 0x3f) << 10)
57 #define QSPI_RX_TAP_DELAY(x)                    (((x) & 0xff) << 0)
58
59 #define QSPI_CS_TIMING1                         0x008
60 #define QSPI_SETUP_HOLD(setup, hold)            (((setup) << 4) | (hold))
61
62 #define QSPI_CS_TIMING2                         0x00c
63 #define CYCLES_BETWEEN_PACKETS_0(x)             (((x) & 0x1f) << 0)
64 #define CS_ACTIVE_BETWEEN_PACKETS_0             BIT(5)
65
66 #define QSPI_TRANS_STATUS                       0x010
67 #define QSPI_BLK_CNT(val)                       (((val) >> 0) & 0xffff)
68 #define QSPI_RDY                                BIT(30)
69
70 #define QSPI_FIFO_STATUS                        0x014
71 #define QSPI_RX_FIFO_EMPTY                      BIT(0)
72 #define QSPI_RX_FIFO_FULL                       BIT(1)
73 #define QSPI_TX_FIFO_EMPTY                      BIT(2)
74 #define QSPI_TX_FIFO_FULL                       BIT(3)
75 #define QSPI_RX_FIFO_UNF                        BIT(4)
76 #define QSPI_RX_FIFO_OVF                        BIT(5)
77 #define QSPI_TX_FIFO_UNF                        BIT(6)
78 #define QSPI_TX_FIFO_OVF                        BIT(7)
79 #define QSPI_ERR                                BIT(8)
80 #define QSPI_TX_FIFO_FLUSH                      BIT(14)
81 #define QSPI_RX_FIFO_FLUSH                      BIT(15)
82 #define QSPI_TX_FIFO_EMPTY_COUNT(val)           (((val) >> 16) & 0x7f)
83 #define QSPI_RX_FIFO_FULL_COUNT(val)            (((val) >> 23) & 0x7f)
84
85 #define QSPI_FIFO_ERROR                         (QSPI_RX_FIFO_UNF | \
86                                                  QSPI_RX_FIFO_OVF | \
87                                                  QSPI_TX_FIFO_UNF | \
88                                                  QSPI_TX_FIFO_OVF)
89 #define QSPI_FIFO_EMPTY                         (QSPI_RX_FIFO_EMPTY | \
90                                                  QSPI_TX_FIFO_EMPTY)
91
92 #define QSPI_TX_DATA                            0x018
93 #define QSPI_RX_DATA                            0x01c
94
95 #define QSPI_DMA_CTL                            0x020
96 #define QSPI_TX_TRIG(n)                         (((n) & 0x3) << 15)
97 #define QSPI_TX_TRIG_1                          QSPI_TX_TRIG(0)
98 #define QSPI_TX_TRIG_4                          QSPI_TX_TRIG(1)
99 #define QSPI_TX_TRIG_8                          QSPI_TX_TRIG(2)
100 #define QSPI_TX_TRIG_16                         QSPI_TX_TRIG(3)
101
102 #define QSPI_RX_TRIG(n)                         (((n) & 0x3) << 19)
103 #define QSPI_RX_TRIG_1                          QSPI_RX_TRIG(0)
104 #define QSPI_RX_TRIG_4                          QSPI_RX_TRIG(1)
105 #define QSPI_RX_TRIG_8                          QSPI_RX_TRIG(2)
106 #define QSPI_RX_TRIG_16                         QSPI_RX_TRIG(3)
107
108 #define QSPI_DMA_EN                             BIT(31)
109
110 #define QSPI_DMA_BLK                            0x024
111 #define QSPI_DMA_BLK_SET(x)                     (((x) & 0xffff) << 0)
112
113 #define QSPI_TX_FIFO                            0x108
114 #define QSPI_RX_FIFO                            0x188
115
116 #define QSPI_FIFO_DEPTH                         64
117
118 #define QSPI_INTR_MASK                          0x18c
119 #define QSPI_INTR_RX_FIFO_UNF_MASK              BIT(25)
120 #define QSPI_INTR_RX_FIFO_OVF_MASK              BIT(26)
121 #define QSPI_INTR_TX_FIFO_UNF_MASK              BIT(27)
122 #define QSPI_INTR_TX_FIFO_OVF_MASK              BIT(28)
123 #define QSPI_INTR_RDY_MASK                      BIT(29)
124 #define QSPI_INTR_RX_TX_FIFO_ERR                (QSPI_INTR_RX_FIFO_UNF_MASK | \
125                                                  QSPI_INTR_RX_FIFO_OVF_MASK | \
126                                                  QSPI_INTR_TX_FIFO_UNF_MASK | \
127                                                  QSPI_INTR_TX_FIFO_OVF_MASK)
128
129 #define QSPI_MISC_REG                           0x194
130 #define QSPI_NUM_DUMMY_CYCLE(x)                 (((x) & 0xff) << 0)
131 #define QSPI_DUMMY_CYCLES_MAX                   0xff
132
133 #define QSPI_CMB_SEQ_CMD                        0x19c
134 #define QSPI_COMMAND_VALUE_SET(X)               (((x) & 0xFF) << 0)
135
136 #define QSPI_CMB_SEQ_CMD_CFG                    0x1a0
137 #define QSPI_COMMAND_X1_X2_X4(x)                (((x) & 0x3) << 13)
138 #define QSPI_COMMAND_X1_X2_X4_MASK              (0x03 << 13)
139 #define QSPI_COMMAND_SDR_DDR                    BIT(12)
140 #define QSPI_COMMAND_SIZE_SET(x)                (((x) & 0xFF) << 0)
141
142 #define QSPI_GLOBAL_CONFIG                      0X1a4
143 #define QSPI_CMB_SEQ_EN                         BIT(0)
144 #define QSPI_TPM_WAIT_POLL_EN                   BIT(1)
145
146 #define QSPI_CMB_SEQ_ADDR                       0x1a8
147 #define QSPI_ADDRESS_VALUE_SET(X)               (((x) & 0xFFFF) << 0)
148
149 #define QSPI_CMB_SEQ_ADDR_CFG                   0x1ac
150 #define QSPI_ADDRESS_X1_X2_X4(x)                (((x) & 0x3) << 13)
151 #define QSPI_ADDRESS_X1_X2_X4_MASK              (0x03 << 13)
152 #define QSPI_ADDRESS_SDR_DDR                    BIT(12)
153 #define QSPI_ADDRESS_SIZE_SET(x)                (((x) & 0xFF) << 0)
154
155 #define DATA_DIR_TX                             BIT(0)
156 #define DATA_DIR_RX                             BIT(1)
157
158 #define QSPI_DMA_TIMEOUT                        (msecs_to_jiffies(1000))
159 #define DEFAULT_QSPI_DMA_BUF_LEN                (64 * 1024)
160 #define CMD_TRANSFER                            0
161 #define ADDR_TRANSFER                           1
162 #define DATA_TRANSFER                           2
163
164 struct tegra_qspi_soc_data {
165         bool has_dma;
166         bool cmb_xfer_capable;
167         bool supports_tpm;
168         unsigned int cs_count;
169 };
170
171 struct tegra_qspi_client_data {
172         int tx_clk_tap_delay;
173         int rx_clk_tap_delay;
174 };
175
176 struct tegra_qspi {
177         struct device                           *dev;
178         struct spi_controller                   *host;
179         /* lock to protect data accessed by irq */
180         spinlock_t                              lock;
181
182         struct clk                              *clk;
183         void __iomem                            *base;
184         phys_addr_t                             phys;
185         unsigned int                            irq;
186
187         u32                                     cur_speed;
188         unsigned int                            cur_pos;
189         unsigned int                            words_per_32bit;
190         unsigned int                            bytes_per_word;
191         unsigned int                            curr_dma_words;
192         unsigned int                            cur_direction;
193
194         unsigned int                            cur_rx_pos;
195         unsigned int                            cur_tx_pos;
196
197         unsigned int                            dma_buf_size;
198         unsigned int                            max_buf_size;
199         bool                                    is_curr_dma_xfer;
200
201         struct completion                       rx_dma_complete;
202         struct completion                       tx_dma_complete;
203
204         u32                                     tx_status;
205         u32                                     rx_status;
206         u32                                     status_reg;
207         bool                                    is_packed;
208         bool                                    use_dma;
209
210         u32                                     command1_reg;
211         u32                                     dma_control_reg;
212         u32                                     def_command1_reg;
213         u32                                     def_command2_reg;
214         u32                                     spi_cs_timing1;
215         u32                                     spi_cs_timing2;
216         u8                                      dummy_cycles;
217
218         struct completion                       xfer_completion;
219         struct spi_transfer                     *curr_xfer;
220
221         struct dma_chan                         *rx_dma_chan;
222         u32                                     *rx_dma_buf;
223         dma_addr_t                              rx_dma_phys;
224         struct dma_async_tx_descriptor          *rx_dma_desc;
225
226         struct dma_chan                         *tx_dma_chan;
227         u32                                     *tx_dma_buf;
228         dma_addr_t                              tx_dma_phys;
229         struct dma_async_tx_descriptor          *tx_dma_desc;
230         const struct tegra_qspi_soc_data        *soc_data;
231 };
232
233 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
234 {
235         return readl(tqspi->base + offset);
236 }
237
238 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
239 {
240         writel(value, tqspi->base + offset);
241
242         /* read back register to make sure that register writes completed */
243         if (offset != QSPI_TX_FIFO)
244                 readl(tqspi->base + QSPI_COMMAND1);
245 }
246
247 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
248 {
249         u32 value;
250
251         /* write 1 to clear status register */
252         value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
253         tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
254
255         value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
256         if (!(value & QSPI_INTR_RDY_MASK)) {
257                 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
258                 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
259         }
260
261         /* clear fifo status error if any */
262         value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
263         if (value & QSPI_ERR)
264                 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
265 }
266
267 static unsigned int
268 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
269 {
270         unsigned int max_word, max_len, total_fifo_words;
271         unsigned int remain_len = t->len - tqspi->cur_pos;
272         unsigned int bits_per_word = t->bits_per_word;
273
274         tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
275
276         /*
277          * Tegra QSPI controller supports packed or unpacked mode transfers.
278          * Packed mode is used for data transfers using 8, 16, or 32 bits per
279          * word with a minimum transfer of 1 word and for all other transfers
280          * unpacked mode will be used.
281          */
282
283         if ((bits_per_word == 8 || bits_per_word == 16 ||
284              bits_per_word == 32) && t->len > 3) {
285                 tqspi->is_packed = true;
286                 tqspi->words_per_32bit = 32 / bits_per_word;
287         } else {
288                 tqspi->is_packed = false;
289                 tqspi->words_per_32bit = 1;
290         }
291
292         if (tqspi->is_packed) {
293                 max_len = min(remain_len, tqspi->max_buf_size);
294                 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
295                 total_fifo_words = (max_len + 3) / 4;
296         } else {
297                 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
298                 max_word = min(max_word, tqspi->max_buf_size / 4);
299                 tqspi->curr_dma_words = max_word;
300                 total_fifo_words = max_word;
301         }
302
303         return total_fifo_words;
304 }
305
306 static unsigned int
307 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
308 {
309         unsigned int written_words, fifo_words_left, count;
310         unsigned int len, tx_empty_count, max_n_32bit, i;
311         u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
312         u32 fifo_status;
313
314         fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
315         tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
316
317         if (tqspi->is_packed) {
318                 fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
319                 written_words = min(fifo_words_left, tqspi->curr_dma_words);
320                 len = written_words * tqspi->bytes_per_word;
321                 max_n_32bit = DIV_ROUND_UP(len, 4);
322                 for (count = 0; count < max_n_32bit; count++) {
323                         u32 x = 0;
324
325                         for (i = 0; (i < 4) && len; i++, len--)
326                                 x |= (u32)(*tx_buf++) << (i * 8);
327                         tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
328                 }
329
330                 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
331         } else {
332                 unsigned int write_bytes;
333                 u8 bytes_per_word = tqspi->bytes_per_word;
334
335                 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
336                 written_words = max_n_32bit;
337                 len = written_words * tqspi->bytes_per_word;
338                 if (len > t->len - tqspi->cur_pos)
339                         len = t->len - tqspi->cur_pos;
340                 write_bytes = len;
341                 for (count = 0; count < max_n_32bit; count++) {
342                         u32 x = 0;
343
344                         for (i = 0; len && (i < bytes_per_word); i++, len--)
345                                 x |= (u32)(*tx_buf++) << (i * 8);
346                         tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
347                 }
348
349                 tqspi->cur_tx_pos += write_bytes;
350         }
351
352         return written_words;
353 }
354
355 static unsigned int
356 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
357 {
358         u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
359         unsigned int len, rx_full_count, count, i;
360         unsigned int read_words = 0;
361         u32 fifo_status, x;
362
363         fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
364         rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
365         if (tqspi->is_packed) {
366                 len = tqspi->curr_dma_words * tqspi->bytes_per_word;
367                 for (count = 0; count < rx_full_count; count++) {
368                         x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
369
370                         for (i = 0; len && (i < 4); i++, len--)
371                                 *rx_buf++ = (x >> i * 8) & 0xff;
372                 }
373
374                 read_words += tqspi->curr_dma_words;
375                 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
376         } else {
377                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
378                 u8 bytes_per_word = tqspi->bytes_per_word;
379                 unsigned int read_bytes;
380
381                 len = rx_full_count * bytes_per_word;
382                 if (len > t->len - tqspi->cur_pos)
383                         len = t->len - tqspi->cur_pos;
384                 read_bytes = len;
385                 for (count = 0; count < rx_full_count; count++) {
386                         x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
387
388                         for (i = 0; len && (i < bytes_per_word); i++, len--)
389                                 *rx_buf++ = (x >> (i * 8)) & 0xff;
390                 }
391
392                 read_words += rx_full_count;
393                 tqspi->cur_rx_pos += read_bytes;
394         }
395
396         return read_words;
397 }
398
399 static void
400 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
401 {
402         dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
403                                 tqspi->dma_buf_size, DMA_TO_DEVICE);
404
405         /*
406          * In packed mode, each word in FIFO may contain multiple packets
407          * based on bits per word. So all bytes in each FIFO word are valid.
408          *
409          * In unpacked mode, each word in FIFO contains single packet and
410          * based on bits per word any remaining bits in FIFO word will be
411          * ignored by the hardware and are invalid bits.
412          */
413         if (tqspi->is_packed) {
414                 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
415         } else {
416                 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
417                 unsigned int i, count, consume, write_bytes;
418
419                 /*
420                  * Fill tx_dma_buf to contain single packet in each word based
421                  * on bits per word from SPI core tx_buf.
422                  */
423                 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
424                 if (consume > t->len - tqspi->cur_pos)
425                         consume = t->len - tqspi->cur_pos;
426                 write_bytes = consume;
427                 for (count = 0; count < tqspi->curr_dma_words; count++) {
428                         u32 x = 0;
429
430                         for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
431                                 x |= (u32)(*tx_buf++) << (i * 8);
432                         tqspi->tx_dma_buf[count] = x;
433                 }
434
435                 tqspi->cur_tx_pos += write_bytes;
436         }
437
438         dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
439                                    tqspi->dma_buf_size, DMA_TO_DEVICE);
440 }
441
442 static void
443 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
444 {
445         dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
446                                 tqspi->dma_buf_size, DMA_FROM_DEVICE);
447
448         if (tqspi->is_packed) {
449                 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
450         } else {
451                 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
452                 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
453                 unsigned int i, count, consume, read_bytes;
454
455                 /*
456                  * Each FIFO word contains single data packet.
457                  * Skip invalid bits in each FIFO word based on bits per word
458                  * and align bytes while filling in SPI core rx_buf.
459                  */
460                 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
461                 if (consume > t->len - tqspi->cur_pos)
462                         consume = t->len - tqspi->cur_pos;
463                 read_bytes = consume;
464                 for (count = 0; count < tqspi->curr_dma_words; count++) {
465                         u32 x = tqspi->rx_dma_buf[count] & rx_mask;
466
467                         for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
468                                 *rx_buf++ = (x >> (i * 8)) & 0xff;
469                 }
470
471                 tqspi->cur_rx_pos += read_bytes;
472         }
473
474         dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
475                                    tqspi->dma_buf_size, DMA_FROM_DEVICE);
476 }
477
478 static void tegra_qspi_dma_complete(void *args)
479 {
480         struct completion *dma_complete = args;
481
482         complete(dma_complete);
483 }
484
485 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
486 {
487         dma_addr_t tx_dma_phys;
488
489         reinit_completion(&tqspi->tx_dma_complete);
490
491         if (tqspi->is_packed)
492                 tx_dma_phys = t->tx_dma;
493         else
494                 tx_dma_phys = tqspi->tx_dma_phys;
495
496         tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
497                                                          len, DMA_MEM_TO_DEV,
498                                                          DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
499
500         if (!tqspi->tx_dma_desc) {
501                 dev_err(tqspi->dev, "Unable to get TX descriptor\n");
502                 return -EIO;
503         }
504
505         tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
506         tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
507         dmaengine_submit(tqspi->tx_dma_desc);
508         dma_async_issue_pending(tqspi->tx_dma_chan);
509
510         return 0;
511 }
512
513 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
514 {
515         dma_addr_t rx_dma_phys;
516
517         reinit_completion(&tqspi->rx_dma_complete);
518
519         if (tqspi->is_packed)
520                 rx_dma_phys = t->rx_dma;
521         else
522                 rx_dma_phys = tqspi->rx_dma_phys;
523
524         tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
525                                                          len, DMA_DEV_TO_MEM,
526                                                          DMA_PREP_INTERRUPT |  DMA_CTRL_ACK);
527
528         if (!tqspi->rx_dma_desc) {
529                 dev_err(tqspi->dev, "Unable to get RX descriptor\n");
530                 return -EIO;
531         }
532
533         tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
534         tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
535         dmaengine_submit(tqspi->rx_dma_desc);
536         dma_async_issue_pending(tqspi->rx_dma_chan);
537
538         return 0;
539 }
540
541 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
542 {
543         void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
544         u32 val;
545
546         val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
547         if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
548                 return 0;
549
550         val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
551         tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
552
553         if (!atomic)
554                 return readl_relaxed_poll_timeout(addr, val,
555                                                   (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
556                                                   1000, 1000000);
557
558         return readl_relaxed_poll_timeout_atomic(addr, val,
559                                                  (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
560                                                  1000, 1000000);
561 }
562
563 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
564 {
565         u32 intr_mask;
566
567         intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
568         intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
569         tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
570 }
571
572 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
573 {
574         u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
575         u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
576         unsigned int len;
577
578         len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
579
580         if (t->tx_buf) {
581                 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
582                 if (dma_mapping_error(tqspi->dev, t->tx_dma))
583                         return -ENOMEM;
584         }
585
586         if (t->rx_buf) {
587                 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
588                 if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
589                         dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
590                         return -ENOMEM;
591                 }
592         }
593
594         return 0;
595 }
596
597 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
598 {
599         unsigned int len;
600
601         len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
602
603         dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
604         dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
605 }
606
607 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
608 {
609         struct dma_slave_config dma_sconfig = { 0 };
610         unsigned int len;
611         u8 dma_burst;
612         int ret = 0;
613         u32 val;
614
615         if (tqspi->is_packed) {
616                 ret = tegra_qspi_dma_map_xfer(tqspi, t);
617                 if (ret < 0)
618                         return ret;
619         }
620
621         val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
622         tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
623
624         tegra_qspi_unmask_irq(tqspi);
625
626         if (tqspi->is_packed)
627                 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
628         else
629                 len = tqspi->curr_dma_words * 4;
630
631         /* set attention level based on length of transfer */
632         val = 0;
633         if (len & 0xf) {
634                 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
635                 dma_burst = 1;
636         } else if (((len) >> 4) & 0x1) {
637                 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
638                 dma_burst = 4;
639         } else {
640                 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
641                 dma_burst = 8;
642         }
643
644         tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
645         tqspi->dma_control_reg = val;
646
647         dma_sconfig.device_fc = true;
648         if (tqspi->cur_direction & DATA_DIR_TX) {
649                 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
650                 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
651                 dma_sconfig.dst_maxburst = dma_burst;
652                 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
653                 if (ret < 0) {
654                         dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
655                         return ret;
656                 }
657
658                 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
659                 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
660                 if (ret < 0) {
661                         dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
662                         return ret;
663                 }
664         }
665
666         if (tqspi->cur_direction & DATA_DIR_RX) {
667                 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
668                 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
669                 dma_sconfig.src_maxburst = dma_burst;
670                 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
671                 if (ret < 0) {
672                         dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
673                         return ret;
674                 }
675
676                 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
677                                            tqspi->dma_buf_size,
678                                            DMA_FROM_DEVICE);
679
680                 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
681                 if (ret < 0) {
682                         dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
683                         if (tqspi->cur_direction & DATA_DIR_TX)
684                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
685                         return ret;
686                 }
687         }
688
689         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
690
691         tqspi->is_curr_dma_xfer = true;
692         tqspi->dma_control_reg = val;
693         val |= QSPI_DMA_EN;
694         tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
695
696         return ret;
697 }
698
699 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
700 {
701         u32 val;
702         unsigned int cur_words;
703
704         if (qspi->cur_direction & DATA_DIR_TX)
705                 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
706         else
707                 cur_words = qspi->curr_dma_words;
708
709         val = QSPI_DMA_BLK_SET(cur_words - 1);
710         tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
711
712         tegra_qspi_unmask_irq(qspi);
713
714         qspi->is_curr_dma_xfer = false;
715         val = qspi->command1_reg;
716         val |= QSPI_PIO;
717         tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
718
719         return 0;
720 }
721
722 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
723 {
724         if (!tqspi->soc_data->has_dma)
725                 return;
726
727         if (tqspi->tx_dma_buf) {
728                 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
729                                   tqspi->tx_dma_buf, tqspi->tx_dma_phys);
730                 tqspi->tx_dma_buf = NULL;
731         }
732
733         if (tqspi->tx_dma_chan) {
734                 dma_release_channel(tqspi->tx_dma_chan);
735                 tqspi->tx_dma_chan = NULL;
736         }
737
738         if (tqspi->rx_dma_buf) {
739                 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
740                                   tqspi->rx_dma_buf, tqspi->rx_dma_phys);
741                 tqspi->rx_dma_buf = NULL;
742         }
743
744         if (tqspi->rx_dma_chan) {
745                 dma_release_channel(tqspi->rx_dma_chan);
746                 tqspi->rx_dma_chan = NULL;
747         }
748 }
749
750 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
751 {
752         struct dma_chan *dma_chan;
753         dma_addr_t dma_phys;
754         u32 *dma_buf;
755         int err;
756
757         if (!tqspi->soc_data->has_dma)
758                 return 0;
759
760         dma_chan = dma_request_chan(tqspi->dev, "rx");
761         if (IS_ERR(dma_chan)) {
762                 err = PTR_ERR(dma_chan);
763                 goto err_out;
764         }
765
766         tqspi->rx_dma_chan = dma_chan;
767
768         dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
769         if (!dma_buf) {
770                 err = -ENOMEM;
771                 goto err_out;
772         }
773
774         tqspi->rx_dma_buf = dma_buf;
775         tqspi->rx_dma_phys = dma_phys;
776
777         dma_chan = dma_request_chan(tqspi->dev, "tx");
778         if (IS_ERR(dma_chan)) {
779                 err = PTR_ERR(dma_chan);
780                 goto err_out;
781         }
782
783         tqspi->tx_dma_chan = dma_chan;
784
785         dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
786         if (!dma_buf) {
787                 err = -ENOMEM;
788                 goto err_out;
789         }
790
791         tqspi->tx_dma_buf = dma_buf;
792         tqspi->tx_dma_phys = dma_phys;
793         tqspi->use_dma = true;
794
795         return 0;
796
797 err_out:
798         tegra_qspi_deinit_dma(tqspi);
799
800         if (err != -EPROBE_DEFER) {
801                 dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
802                 dev_err(tqspi->dev, "falling back to PIO\n");
803                 return 0;
804         }
805
806         return err;
807 }
808
809 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
810                                          bool is_first_of_msg)
811 {
812         struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
813         struct tegra_qspi_client_data *cdata = spi->controller_data;
814         u32 command1, command2, speed = t->speed_hz;
815         u8 bits_per_word = t->bits_per_word;
816         u32 tx_tap = 0, rx_tap = 0;
817         int req_mode;
818
819         if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
820                 clk_set_rate(tqspi->clk, speed);
821                 tqspi->cur_speed = speed;
822         }
823
824         tqspi->cur_pos = 0;
825         tqspi->cur_rx_pos = 0;
826         tqspi->cur_tx_pos = 0;
827         tqspi->curr_xfer = t;
828
829         if (is_first_of_msg) {
830                 tegra_qspi_mask_clear_irq(tqspi);
831
832                 command1 = tqspi->def_command1_reg;
833                 command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
834                 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
835
836                 command1 &= ~QSPI_CONTROL_MODE_MASK;
837                 req_mode = spi->mode & 0x3;
838                 if (req_mode == SPI_MODE_3)
839                         command1 |= QSPI_CONTROL_MODE_3;
840                 else
841                         command1 |= QSPI_CONTROL_MODE_0;
842
843                 if (spi->mode & SPI_CS_HIGH)
844                         command1 |= QSPI_CS_SW_VAL;
845                 else
846                         command1 &= ~QSPI_CS_SW_VAL;
847                 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
848
849                 if (cdata && cdata->tx_clk_tap_delay)
850                         tx_tap = cdata->tx_clk_tap_delay;
851
852                 if (cdata && cdata->rx_clk_tap_delay)
853                         rx_tap = cdata->rx_clk_tap_delay;
854
855                 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
856                 if (command2 != tqspi->def_command2_reg)
857                         tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
858
859         } else {
860                 command1 = tqspi->command1_reg;
861                 command1 &= ~QSPI_BIT_LENGTH(~0);
862                 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
863         }
864
865         command1 &= ~QSPI_SDR_DDR_SEL;
866
867         return command1;
868 }
869
870 static int tegra_qspi_start_transfer_one(struct spi_device *spi,
871                                          struct spi_transfer *t, u32 command1)
872 {
873         struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
874         unsigned int total_fifo_words;
875         u8 bus_width = 0;
876         int ret;
877
878         total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
879
880         command1 &= ~QSPI_PACKED;
881         if (tqspi->is_packed)
882                 command1 |= QSPI_PACKED;
883         tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
884
885         tqspi->cur_direction = 0;
886
887         command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
888         if (t->rx_buf) {
889                 command1 |= QSPI_RX_EN;
890                 tqspi->cur_direction |= DATA_DIR_RX;
891                 bus_width = t->rx_nbits;
892         }
893
894         if (t->tx_buf) {
895                 command1 |= QSPI_TX_EN;
896                 tqspi->cur_direction |= DATA_DIR_TX;
897                 bus_width = t->tx_nbits;
898         }
899
900         command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
901
902         if (bus_width == SPI_NBITS_QUAD)
903                 command1 |= QSPI_INTERFACE_WIDTH_QUAD;
904         else if (bus_width == SPI_NBITS_DUAL)
905                 command1 |= QSPI_INTERFACE_WIDTH_DUAL;
906         else
907                 command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
908
909         tqspi->command1_reg = command1;
910
911         tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
912
913         ret = tegra_qspi_flush_fifos(tqspi, false);
914         if (ret < 0)
915                 return ret;
916
917         if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
918                 ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
919         else
920                 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
921
922         return ret;
923 }
924
925 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
926 {
927         struct tegra_qspi_client_data *cdata;
928         struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
929
930         cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
931         if (!cdata)
932                 return NULL;
933
934         device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
935                                  &cdata->tx_clk_tap_delay);
936         device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
937                                  &cdata->rx_clk_tap_delay);
938
939         return cdata;
940 }
941
942 static int tegra_qspi_setup(struct spi_device *spi)
943 {
944         struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
945         struct tegra_qspi_client_data *cdata = spi->controller_data;
946         unsigned long flags;
947         u32 val;
948         int ret;
949
950         ret = pm_runtime_resume_and_get(tqspi->dev);
951         if (ret < 0) {
952                 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
953                 return ret;
954         }
955
956         if (!cdata) {
957                 cdata = tegra_qspi_parse_cdata_dt(spi);
958                 spi->controller_data = cdata;
959         }
960         spin_lock_irqsave(&tqspi->lock, flags);
961
962         /* keep default cs state to inactive */
963         val = tqspi->def_command1_reg;
964         val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
965         if (spi->mode & SPI_CS_HIGH)
966                 val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
967         else
968                 val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
969
970         tqspi->def_command1_reg = val;
971         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
972
973         spin_unlock_irqrestore(&tqspi->lock, flags);
974
975         pm_runtime_put(tqspi->dev);
976
977         return 0;
978 }
979
980 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
981 {
982         dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
983         dev_dbg(tqspi->dev, "Command1:    0x%08x | Command2:    0x%08x\n",
984                 tegra_qspi_readl(tqspi, QSPI_COMMAND1),
985                 tegra_qspi_readl(tqspi, QSPI_COMMAND2));
986         dev_dbg(tqspi->dev, "DMA_CTL:     0x%08x | DMA_BLK:     0x%08x\n",
987                 tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
988                 tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
989         dev_dbg(tqspi->dev, "INTR_MASK:  0x%08x | MISC: 0x%08x\n",
990                 tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
991                 tegra_qspi_readl(tqspi, QSPI_MISC_REG));
992         dev_dbg(tqspi->dev, "TRANS_STAT:  0x%08x | FIFO_STATUS: 0x%08x\n",
993                 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
994                 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
995 }
996
997 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
998 {
999         dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
1000         tegra_qspi_dump_regs(tqspi);
1001         tegra_qspi_flush_fifos(tqspi, true);
1002         if (device_reset(tqspi->dev) < 0)
1003                 dev_warn_once(tqspi->dev, "device reset failed\n");
1004 }
1005
1006 static void tegra_qspi_transfer_end(struct spi_device *spi)
1007 {
1008         struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
1009         int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1010
1011         if (cs_val)
1012                 tqspi->command1_reg |= QSPI_CS_SW_VAL;
1013         else
1014                 tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1015         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1016         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1017 }
1018
1019 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1020 {
1021         u32 cmd_config = 0;
1022
1023         /* Extract Command configuration and value */
1024         if (is_ddr)
1025                 cmd_config |= QSPI_COMMAND_SDR_DDR;
1026         else
1027                 cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1028
1029         cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1030         cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1031
1032         return cmd_config;
1033 }
1034
1035 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1036 {
1037         u32 addr_config = 0;
1038
1039         /* Extract Address configuration and value */
1040         is_ddr = 0; //Only SDR mode supported
1041         bus_width = 0; //X1 mode
1042
1043         if (is_ddr)
1044                 addr_config |= QSPI_ADDRESS_SDR_DDR;
1045         else
1046                 addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1047
1048         addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1049         addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1050
1051         return addr_config;
1052 }
1053
1054 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1055                                         struct spi_message *msg)
1056 {
1057         bool is_first_msg = true;
1058         struct spi_transfer *xfer;
1059         struct spi_device *spi = msg->spi;
1060         u8 transfer_phase = 0;
1061         u32 cmd1 = 0, dma_ctl = 0;
1062         int ret = 0;
1063         u32 address_value = 0;
1064         u32 cmd_config = 0, addr_config = 0;
1065         u8 cmd_value = 0, val = 0;
1066
1067         /* Enable Combined sequence mode */
1068         val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1069         if (spi->mode & SPI_TPM_HW_FLOW) {
1070                 if (tqspi->soc_data->supports_tpm)
1071                         val |= QSPI_TPM_WAIT_POLL_EN;
1072                 else
1073                         return -EIO;
1074         }
1075         val |= QSPI_CMB_SEQ_EN;
1076         tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1077         /* Process individual transfer list */
1078         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1079                 switch (transfer_phase) {
1080                 case CMD_TRANSFER:
1081                         /* X1 SDR mode */
1082                         cmd_config = tegra_qspi_cmd_config(false, 0,
1083                                                            xfer->len);
1084                         cmd_value = *((const u8 *)(xfer->tx_buf));
1085                         break;
1086                 case ADDR_TRANSFER:
1087                         /* X1 SDR mode */
1088                         addr_config = tegra_qspi_addr_config(false, 0,
1089                                                              xfer->len);
1090                         address_value = *((const u32 *)(xfer->tx_buf));
1091                         break;
1092                 case DATA_TRANSFER:
1093                         /* Program Command, Address value in register */
1094                         tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1095                         tegra_qspi_writel(tqspi, address_value,
1096                                           QSPI_CMB_SEQ_ADDR);
1097                         /* Program Command and Address config in register */
1098                         tegra_qspi_writel(tqspi, cmd_config,
1099                                           QSPI_CMB_SEQ_CMD_CFG);
1100                         tegra_qspi_writel(tqspi, addr_config,
1101                                           QSPI_CMB_SEQ_ADDR_CFG);
1102
1103                         reinit_completion(&tqspi->xfer_completion);
1104                         cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1105                                                              is_first_msg);
1106                         ret = tegra_qspi_start_transfer_one(spi, xfer,
1107                                                             cmd1);
1108
1109                         if (ret < 0) {
1110                                 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1111                                         ret);
1112                                 return ret;
1113                         }
1114
1115                         is_first_msg = false;
1116                         ret = wait_for_completion_timeout
1117                                         (&tqspi->xfer_completion,
1118                                         QSPI_DMA_TIMEOUT);
1119
1120                         if (WARN_ON(ret == 0)) {
1121                                 dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
1122                                         ret);
1123                                 if (tqspi->is_curr_dma_xfer &&
1124                                     (tqspi->cur_direction & DATA_DIR_TX))
1125                                         dmaengine_terminate_all
1126                                                 (tqspi->tx_dma_chan);
1127
1128                                 if (tqspi->is_curr_dma_xfer &&
1129                                     (tqspi->cur_direction & DATA_DIR_RX))
1130                                         dmaengine_terminate_all
1131                                                 (tqspi->rx_dma_chan);
1132
1133                                 /* Abort transfer by resetting pio/dma bit */
1134                                 if (!tqspi->is_curr_dma_xfer) {
1135                                         cmd1 = tegra_qspi_readl
1136                                                         (tqspi,
1137                                                          QSPI_COMMAND1);
1138                                         cmd1 &= ~QSPI_PIO;
1139                                         tegra_qspi_writel
1140                                                         (tqspi, cmd1,
1141                                                          QSPI_COMMAND1);
1142                                 } else {
1143                                         dma_ctl = tegra_qspi_readl
1144                                                         (tqspi,
1145                                                          QSPI_DMA_CTL);
1146                                         dma_ctl &= ~QSPI_DMA_EN;
1147                                         tegra_qspi_writel(tqspi, dma_ctl,
1148                                                           QSPI_DMA_CTL);
1149                                 }
1150
1151                                 /* Reset controller if timeout happens */
1152                                 if (device_reset(tqspi->dev) < 0)
1153                                         dev_warn_once(tqspi->dev,
1154                                                       "device reset failed\n");
1155                                 ret = -EIO;
1156                                 goto exit;
1157                         }
1158
1159                         if (tqspi->tx_status ||  tqspi->rx_status) {
1160                                 dev_err(tqspi->dev, "QSPI Transfer failed\n");
1161                                 tqspi->tx_status = 0;
1162                                 tqspi->rx_status = 0;
1163                                 ret = -EIO;
1164                                 goto exit;
1165                         }
1166                         if (!xfer->cs_change) {
1167                                 tegra_qspi_transfer_end(spi);
1168                                 spi_transfer_delay_exec(xfer);
1169                         }
1170                         break;
1171                 default:
1172                         ret = -EINVAL;
1173                         goto exit;
1174                 }
1175                 msg->actual_length += xfer->len;
1176                 transfer_phase++;
1177         }
1178         ret = 0;
1179
1180 exit:
1181         msg->status = ret;
1182         if (ret < 0) {
1183                 tegra_qspi_transfer_end(spi);
1184                 spi_transfer_delay_exec(xfer);
1185         }
1186
1187         return ret;
1188 }
1189
1190 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1191                                             struct spi_message *msg)
1192 {
1193         struct spi_device *spi = msg->spi;
1194         struct spi_transfer *transfer;
1195         bool is_first_msg = true;
1196         int ret = 0, val = 0;
1197
1198         msg->status = 0;
1199         msg->actual_length = 0;
1200         tqspi->tx_status = 0;
1201         tqspi->rx_status = 0;
1202
1203         /* Disable Combined sequence mode */
1204         val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1205         val &= ~QSPI_CMB_SEQ_EN;
1206         if (tqspi->soc_data->supports_tpm)
1207                 val &= ~QSPI_TPM_WAIT_POLL_EN;
1208         tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1209         list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1210                 struct spi_transfer *xfer = transfer;
1211                 u8 dummy_bytes = 0;
1212                 u32 cmd1;
1213
1214                 tqspi->dummy_cycles = 0;
1215                 /*
1216                  * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1217                  * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1218                  * So, check if the next transfer is dummy data transfer and program dummy
1219                  * clock cycles along with the current transfer and skip next transfer.
1220                  */
1221                 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1222                         struct spi_transfer *next_xfer;
1223
1224                         next_xfer = list_next_entry(xfer, transfer_list);
1225                         if (next_xfer->dummy_data) {
1226                                 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1227
1228                                 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1229                                         tqspi->dummy_cycles = dummy_cycles;
1230                                         dummy_bytes = next_xfer->len;
1231                                         transfer = next_xfer;
1232                                 }
1233                         }
1234                 }
1235
1236                 reinit_completion(&tqspi->xfer_completion);
1237
1238                 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1239
1240                 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1241                 if (ret < 0) {
1242                         dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1243                         goto complete_xfer;
1244                 }
1245
1246                 ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1247                                                   QSPI_DMA_TIMEOUT);
1248                 if (WARN_ON(ret == 0)) {
1249                         dev_err(tqspi->dev, "transfer timeout\n");
1250                         if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1251                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1252                         if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1253                                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1254                         tegra_qspi_handle_error(tqspi);
1255                         ret = -EIO;
1256                         goto complete_xfer;
1257                 }
1258
1259                 if (tqspi->tx_status ||  tqspi->rx_status) {
1260                         tegra_qspi_handle_error(tqspi);
1261                         ret = -EIO;
1262                         goto complete_xfer;
1263                 }
1264
1265                 msg->actual_length += xfer->len + dummy_bytes;
1266
1267 complete_xfer:
1268                 if (ret < 0) {
1269                         tegra_qspi_transfer_end(spi);
1270                         spi_transfer_delay_exec(xfer);
1271                         goto exit;
1272                 }
1273
1274                 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1275                         /* de-activate CS after last transfer only when cs_change is not set */
1276                         if (!xfer->cs_change) {
1277                                 tegra_qspi_transfer_end(spi);
1278                                 spi_transfer_delay_exec(xfer);
1279                         }
1280                 } else if (xfer->cs_change) {
1281                          /* de-activated CS between the transfers only when cs_change is set */
1282                         tegra_qspi_transfer_end(spi);
1283                         spi_transfer_delay_exec(xfer);
1284                 }
1285         }
1286
1287         ret = 0;
1288 exit:
1289         msg->status = ret;
1290
1291         return ret;
1292 }
1293
1294 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1295                                         struct spi_message *msg)
1296 {
1297         int transfer_count = 0;
1298         struct spi_transfer *xfer;
1299
1300         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1301                 transfer_count++;
1302         }
1303         if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
1304                 return false;
1305         xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1306                                 transfer_list);
1307         if (xfer->len > 2)
1308                 return false;
1309         xfer = list_next_entry(xfer, transfer_list);
1310         if (xfer->len > 4 || xfer->len < 3)
1311                 return false;
1312         xfer = list_next_entry(xfer, transfer_list);
1313         if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
1314                 return false;
1315
1316         return true;
1317 }
1318
1319 static int tegra_qspi_transfer_one_message(struct spi_controller *host,
1320                                            struct spi_message *msg)
1321 {
1322         struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1323         int ret;
1324
1325         if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1326                 ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1327         else
1328                 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1329
1330         spi_finalize_current_message(host);
1331
1332         return ret;
1333 }
1334
1335 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1336 {
1337         struct spi_transfer *t = tqspi->curr_xfer;
1338         unsigned long flags;
1339
1340         spin_lock_irqsave(&tqspi->lock, flags);
1341
1342         if (tqspi->tx_status ||  tqspi->rx_status) {
1343                 tegra_qspi_handle_error(tqspi);
1344                 complete(&tqspi->xfer_completion);
1345                 goto exit;
1346         }
1347
1348         if (tqspi->cur_direction & DATA_DIR_RX)
1349                 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1350
1351         if (tqspi->cur_direction & DATA_DIR_TX)
1352                 tqspi->cur_pos = tqspi->cur_tx_pos;
1353         else
1354                 tqspi->cur_pos = tqspi->cur_rx_pos;
1355
1356         if (tqspi->cur_pos == t->len) {
1357                 complete(&tqspi->xfer_completion);
1358                 goto exit;
1359         }
1360
1361         tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1362         tegra_qspi_start_cpu_based_transfer(tqspi, t);
1363 exit:
1364         spin_unlock_irqrestore(&tqspi->lock, flags);
1365         return IRQ_HANDLED;
1366 }
1367
1368 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1369 {
1370         struct spi_transfer *t = tqspi->curr_xfer;
1371         unsigned int total_fifo_words;
1372         unsigned long flags;
1373         long wait_status;
1374         int err = 0;
1375
1376         if (tqspi->cur_direction & DATA_DIR_TX) {
1377                 if (tqspi->tx_status) {
1378                         dmaengine_terminate_all(tqspi->tx_dma_chan);
1379                         err += 1;
1380                 } else {
1381                         wait_status = wait_for_completion_interruptible_timeout(
1382                                 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1383                         if (wait_status <= 0) {
1384                                 dmaengine_terminate_all(tqspi->tx_dma_chan);
1385                                 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1386                                 err += 1;
1387                         }
1388                 }
1389         }
1390
1391         if (tqspi->cur_direction & DATA_DIR_RX) {
1392                 if (tqspi->rx_status) {
1393                         dmaengine_terminate_all(tqspi->rx_dma_chan);
1394                         err += 2;
1395                 } else {
1396                         wait_status = wait_for_completion_interruptible_timeout(
1397                                 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1398                         if (wait_status <= 0) {
1399                                 dmaengine_terminate_all(tqspi->rx_dma_chan);
1400                                 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1401                                 err += 2;
1402                         }
1403                 }
1404         }
1405
1406         spin_lock_irqsave(&tqspi->lock, flags);
1407
1408         if (err) {
1409                 tegra_qspi_dma_unmap_xfer(tqspi, t);
1410                 tegra_qspi_handle_error(tqspi);
1411                 complete(&tqspi->xfer_completion);
1412                 goto exit;
1413         }
1414
1415         if (tqspi->cur_direction & DATA_DIR_RX)
1416                 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1417
1418         if (tqspi->cur_direction & DATA_DIR_TX)
1419                 tqspi->cur_pos = tqspi->cur_tx_pos;
1420         else
1421                 tqspi->cur_pos = tqspi->cur_rx_pos;
1422
1423         if (tqspi->cur_pos == t->len) {
1424                 tegra_qspi_dma_unmap_xfer(tqspi, t);
1425                 complete(&tqspi->xfer_completion);
1426                 goto exit;
1427         }
1428
1429         tegra_qspi_dma_unmap_xfer(tqspi, t);
1430
1431         /* continue transfer in current message */
1432         total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1433         if (total_fifo_words > QSPI_FIFO_DEPTH)
1434                 err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1435         else
1436                 err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1437
1438 exit:
1439         spin_unlock_irqrestore(&tqspi->lock, flags);
1440         return IRQ_HANDLED;
1441 }
1442
1443 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1444 {
1445         struct tegra_qspi *tqspi = context_data;
1446
1447         tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1448
1449         if (tqspi->cur_direction & DATA_DIR_TX)
1450                 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1451
1452         if (tqspi->cur_direction & DATA_DIR_RX)
1453                 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1454
1455         tegra_qspi_mask_clear_irq(tqspi);
1456
1457         if (!tqspi->is_curr_dma_xfer)
1458                 return handle_cpu_based_xfer(tqspi);
1459
1460         return handle_dma_based_xfer(tqspi);
1461 }
1462
1463 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1464         .has_dma = true,
1465         .cmb_xfer_capable = false,
1466         .supports_tpm = false,
1467         .cs_count = 1,
1468 };
1469
1470 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1471         .has_dma = true,
1472         .cmb_xfer_capable = true,
1473         .supports_tpm = false,
1474         .cs_count = 1,
1475 };
1476
1477 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1478         .has_dma = false,
1479         .cmb_xfer_capable = true,
1480         .supports_tpm = true,
1481         .cs_count = 1,
1482 };
1483
1484 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1485         .has_dma = false,
1486         .cmb_xfer_capable = true,
1487         .supports_tpm = true,
1488         .cs_count = 4,
1489 };
1490
1491 static const struct of_device_id tegra_qspi_of_match[] = {
1492         {
1493                 .compatible = "nvidia,tegra210-qspi",
1494                 .data       = &tegra210_qspi_soc_data,
1495         }, {
1496                 .compatible = "nvidia,tegra186-qspi",
1497                 .data       = &tegra186_qspi_soc_data,
1498         }, {
1499                 .compatible = "nvidia,tegra194-qspi",
1500                 .data       = &tegra186_qspi_soc_data,
1501         }, {
1502                 .compatible = "nvidia,tegra234-qspi",
1503                 .data       = &tegra234_qspi_soc_data,
1504         }, {
1505                 .compatible = "nvidia,tegra241-qspi",
1506                 .data       = &tegra241_qspi_soc_data,
1507         },
1508         {}
1509 };
1510
1511 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1512
1513 #ifdef CONFIG_ACPI
1514 static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1515         {
1516                 .id = "NVDA1213",
1517                 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1518         }, {
1519                 .id = "NVDA1313",
1520                 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1521         }, {
1522                 .id = "NVDA1413",
1523                 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1524         }, {
1525                 .id = "NVDA1513",
1526                 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1527         },
1528         {}
1529 };
1530
1531 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1532 #endif
1533
1534 static int tegra_qspi_probe(struct platform_device *pdev)
1535 {
1536         struct spi_controller   *host;
1537         struct tegra_qspi       *tqspi;
1538         struct resource         *r;
1539         int ret, qspi_irq;
1540         int bus_num;
1541
1542         host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi));
1543         if (!host)
1544                 return -ENOMEM;
1545
1546         platform_set_drvdata(pdev, host);
1547         tqspi = spi_controller_get_devdata(host);
1548
1549         host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1550                           SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1551         host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1552         host->flags = SPI_CONTROLLER_HALF_DUPLEX;
1553         host->setup = tegra_qspi_setup;
1554         host->transfer_one_message = tegra_qspi_transfer_one_message;
1555         host->num_chipselect = 1;
1556         host->auto_runtime_pm = true;
1557
1558         bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1559         if (bus_num >= 0)
1560                 host->bus_num = bus_num;
1561
1562         tqspi->host = host;
1563         tqspi->dev = &pdev->dev;
1564         spin_lock_init(&tqspi->lock);
1565
1566         tqspi->soc_data = device_get_match_data(&pdev->dev);
1567         host->num_chipselect = tqspi->soc_data->cs_count;
1568         tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1569         if (IS_ERR(tqspi->base))
1570                 return PTR_ERR(tqspi->base);
1571
1572         tqspi->phys = r->start;
1573         qspi_irq = platform_get_irq(pdev, 0);
1574         if (qspi_irq < 0)
1575                 return qspi_irq;
1576         tqspi->irq = qspi_irq;
1577
1578         if (!has_acpi_companion(tqspi->dev)) {
1579                 tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1580                 if (IS_ERR(tqspi->clk)) {
1581                         ret = PTR_ERR(tqspi->clk);
1582                         dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1583                         return ret;
1584                 }
1585
1586         }
1587
1588         tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1589         tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1590
1591         ret = tegra_qspi_init_dma(tqspi);
1592         if (ret < 0)
1593                 return ret;
1594
1595         if (tqspi->use_dma)
1596                 tqspi->max_buf_size = tqspi->dma_buf_size;
1597
1598         init_completion(&tqspi->tx_dma_complete);
1599         init_completion(&tqspi->rx_dma_complete);
1600         init_completion(&tqspi->xfer_completion);
1601
1602         pm_runtime_enable(&pdev->dev);
1603         ret = pm_runtime_resume_and_get(&pdev->dev);
1604         if (ret < 0) {
1605                 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1606                 goto exit_pm_disable;
1607         }
1608
1609         if (device_reset(tqspi->dev) < 0)
1610                 dev_warn_once(tqspi->dev, "device reset failed\n");
1611
1612         tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW |  QSPI_CS_SW_VAL;
1613         tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1614         tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1615         tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1616         tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1617
1618         pm_runtime_put(&pdev->dev);
1619
1620         ret = request_threaded_irq(tqspi->irq, NULL,
1621                                    tegra_qspi_isr_thread, IRQF_ONESHOT,
1622                                    dev_name(&pdev->dev), tqspi);
1623         if (ret < 0) {
1624                 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1625                 goto exit_pm_disable;
1626         }
1627
1628         host->dev.of_node = pdev->dev.of_node;
1629         ret = spi_register_controller(host);
1630         if (ret < 0) {
1631                 dev_err(&pdev->dev, "failed to register host: %d\n", ret);
1632                 goto exit_free_irq;
1633         }
1634
1635         return 0;
1636
1637 exit_free_irq:
1638         free_irq(qspi_irq, tqspi);
1639 exit_pm_disable:
1640         pm_runtime_force_suspend(&pdev->dev);
1641         tegra_qspi_deinit_dma(tqspi);
1642         return ret;
1643 }
1644
1645 static void tegra_qspi_remove(struct platform_device *pdev)
1646 {
1647         struct spi_controller *host = platform_get_drvdata(pdev);
1648         struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1649
1650         spi_unregister_controller(host);
1651         free_irq(tqspi->irq, tqspi);
1652         pm_runtime_force_suspend(&pdev->dev);
1653         tegra_qspi_deinit_dma(tqspi);
1654 }
1655
1656 static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1657 {
1658         struct spi_controller *host = dev_get_drvdata(dev);
1659
1660         return spi_controller_suspend(host);
1661 }
1662
1663 static int __maybe_unused tegra_qspi_resume(struct device *dev)
1664 {
1665         struct spi_controller *host = dev_get_drvdata(dev);
1666         struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1667         int ret;
1668
1669         ret = pm_runtime_resume_and_get(dev);
1670         if (ret < 0) {
1671                 dev_err(dev, "failed to get runtime PM: %d\n", ret);
1672                 return ret;
1673         }
1674
1675         tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1676         tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1677         pm_runtime_put(dev);
1678
1679         return spi_controller_resume(host);
1680 }
1681
1682 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1683 {
1684         struct spi_controller *host = dev_get_drvdata(dev);
1685         struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1686
1687         /* Runtime pm disabled with ACPI */
1688         if (has_acpi_companion(tqspi->dev))
1689                 return 0;
1690         /* flush all write which are in PPSB queue by reading back */
1691         tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1692
1693         clk_disable_unprepare(tqspi->clk);
1694
1695         return 0;
1696 }
1697
1698 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1699 {
1700         struct spi_controller *host = dev_get_drvdata(dev);
1701         struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1702         int ret;
1703
1704         /* Runtime pm disabled with ACPI */
1705         if (has_acpi_companion(tqspi->dev))
1706                 return 0;
1707         ret = clk_prepare_enable(tqspi->clk);
1708         if (ret < 0)
1709                 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1710
1711         return ret;
1712 }
1713
1714 static const struct dev_pm_ops tegra_qspi_pm_ops = {
1715         SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1716         SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1717 };
1718
1719 static struct platform_driver tegra_qspi_driver = {
1720         .driver = {
1721                 .name           = "tegra-qspi",
1722                 .pm             = &tegra_qspi_pm_ops,
1723                 .of_match_table = tegra_qspi_of_match,
1724                 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1725         },
1726         .probe =        tegra_qspi_probe,
1727         .remove_new =   tegra_qspi_remove,
1728 };
1729 module_platform_driver(tegra_qspi_driver);
1730
1731 MODULE_ALIAS("platform:qspi-tegra");
1732 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1733 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1734 MODULE_LICENSE("GPL v2");