GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / spi / spi-axi-spi-engine.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <lars@metafoo.de>
6  */
7
8 #include <linux/clk.h>
9 #include <linux/idr.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/of.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/spi/spi.h>
16 #include <linux/timer.h>
17
18 #define SPI_ENGINE_VERSION_MAJOR(x)     ((x >> 16) & 0xff)
19 #define SPI_ENGINE_VERSION_MINOR(x)     ((x >> 8) & 0xff)
20 #define SPI_ENGINE_VERSION_PATCH(x)     (x & 0xff)
21
22 #define SPI_ENGINE_REG_VERSION                  0x00
23
24 #define SPI_ENGINE_REG_RESET                    0x40
25
26 #define SPI_ENGINE_REG_INT_ENABLE               0x80
27 #define SPI_ENGINE_REG_INT_PENDING              0x84
28 #define SPI_ENGINE_REG_INT_SOURCE               0x88
29
30 #define SPI_ENGINE_REG_SYNC_ID                  0xc0
31
32 #define SPI_ENGINE_REG_CMD_FIFO_ROOM            0xd0
33 #define SPI_ENGINE_REG_SDO_FIFO_ROOM            0xd4
34 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL           0xd8
35
36 #define SPI_ENGINE_REG_CMD_FIFO                 0xe0
37 #define SPI_ENGINE_REG_SDO_DATA_FIFO            0xe4
38 #define SPI_ENGINE_REG_SDI_DATA_FIFO            0xe8
39 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK       0xec
40
41 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY         BIT(0)
42 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY         BIT(1)
43 #define SPI_ENGINE_INT_SDI_ALMOST_FULL          BIT(2)
44 #define SPI_ENGINE_INT_SYNC                     BIT(3)
45
46 #define SPI_ENGINE_CONFIG_CPHA                  BIT(0)
47 #define SPI_ENGINE_CONFIG_CPOL                  BIT(1)
48 #define SPI_ENGINE_CONFIG_3WIRE                 BIT(2)
49
50 #define SPI_ENGINE_INST_TRANSFER                0x0
51 #define SPI_ENGINE_INST_ASSERT                  0x1
52 #define SPI_ENGINE_INST_WRITE                   0x2
53 #define SPI_ENGINE_INST_MISC                    0x3
54
55 #define SPI_ENGINE_CMD_REG_CLK_DIV              0x0
56 #define SPI_ENGINE_CMD_REG_CONFIG               0x1
57 #define SPI_ENGINE_CMD_REG_XFER_BITS            0x2
58
59 #define SPI_ENGINE_MISC_SYNC                    0x0
60 #define SPI_ENGINE_MISC_SLEEP                   0x1
61
62 #define SPI_ENGINE_TRANSFER_WRITE               0x1
63 #define SPI_ENGINE_TRANSFER_READ                0x2
64
65 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
66         (((inst) << 12) | ((arg1) << 8) | (arg2))
67
68 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
69         SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
70 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
71         SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
72 #define SPI_ENGINE_CMD_WRITE(reg, val) \
73         SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
74 #define SPI_ENGINE_CMD_SLEEP(delay) \
75         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
76 #define SPI_ENGINE_CMD_SYNC(id) \
77         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
78
79 struct spi_engine_program {
80         unsigned int length;
81         uint16_t instructions[];
82 };
83
84 /**
85  * struct spi_engine_message_state - SPI engine per-message state
86  */
87 struct spi_engine_message_state {
88         /** @p: Instructions for executing this message. */
89         struct spi_engine_program *p;
90         /** @cmd_length: Number of elements in cmd_buf array. */
91         unsigned cmd_length;
92         /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
93         const uint16_t *cmd_buf;
94         /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
95         struct spi_transfer *tx_xfer;
96         /** @tx_length: Size of tx_buf in bytes. */
97         unsigned int tx_length;
98         /** @tx_buf: Bytes not yet written to TX FIFO. */
99         const uint8_t *tx_buf;
100         /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
101         struct spi_transfer *rx_xfer;
102         /** @rx_length: Size of tx_buf in bytes. */
103         unsigned int rx_length;
104         /** @rx_buf: Bytes not yet written to the RX FIFO. */
105         uint8_t *rx_buf;
106         /** @sync_id: ID to correlate SYNC interrupts with this message. */
107         u8 sync_id;
108 };
109
110 struct spi_engine {
111         struct clk *clk;
112         struct clk *ref_clk;
113
114         spinlock_t lock;
115
116         void __iomem *base;
117         struct ida sync_ida;
118         struct timer_list watchdog_timer;
119         struct spi_controller *controller;
120
121         unsigned int int_enable;
122 };
123
124 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
125         bool dry, uint16_t cmd)
126 {
127         if (!dry)
128                 p->instructions[p->length] = cmd;
129         p->length++;
130 }
131
132 static unsigned int spi_engine_get_config(struct spi_device *spi)
133 {
134         unsigned int config = 0;
135
136         if (spi->mode & SPI_CPOL)
137                 config |= SPI_ENGINE_CONFIG_CPOL;
138         if (spi->mode & SPI_CPHA)
139                 config |= SPI_ENGINE_CONFIG_CPHA;
140         if (spi->mode & SPI_3WIRE)
141                 config |= SPI_ENGINE_CONFIG_3WIRE;
142
143         return config;
144 }
145
146 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
147         struct spi_transfer *xfer)
148 {
149         unsigned int len;
150
151         if (xfer->bits_per_word <= 8)
152                 len = xfer->len;
153         else if (xfer->bits_per_word <= 16)
154                 len = xfer->len / 2;
155         else
156                 len = xfer->len / 4;
157
158         while (len) {
159                 unsigned int n = min(len, 256U);
160                 unsigned int flags = 0;
161
162                 if (xfer->tx_buf)
163                         flags |= SPI_ENGINE_TRANSFER_WRITE;
164                 if (xfer->rx_buf)
165                         flags |= SPI_ENGINE_TRANSFER_READ;
166
167                 spi_engine_program_add_cmd(p, dry,
168                         SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
169                 len -= n;
170         }
171 }
172
173 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
174                                  int delay_ns, u32 sclk_hz)
175 {
176         unsigned int t;
177
178         /* negative delay indicates error, e.g. from spi_delay_to_ns() */
179         if (delay_ns <= 0)
180                 return;
181
182         /* rounding down since executing the instruction adds a couple of ticks delay */
183         t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
184         while (t) {
185                 unsigned int n = min(t, 256U);
186
187                 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
188                 t -= n;
189         }
190 }
191
192 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
193                 struct spi_device *spi, bool assert)
194 {
195         unsigned int mask = 0xff;
196
197         if (assert)
198                 mask ^= BIT(spi_get_chipselect(spi, 0));
199
200         spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
201 }
202
203 /*
204  * Performs precompile steps on the message.
205  *
206  * The SPI core does most of the message/transfer validation and filling in
207  * fields for us via __spi_validate(). This fixes up anything remaining not
208  * done there.
209  *
210  * NB: This is separate from spi_engine_compile_message() because the latter
211  * is called twice and would otherwise result in double-evaluation.
212  */
213 static void spi_engine_precompile_message(struct spi_message *msg)
214 {
215         unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
216         struct spi_transfer *xfer;
217
218         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
219                 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
220                 xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
221         }
222 }
223
224 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
225                                        struct spi_engine_program *p)
226 {
227         struct spi_device *spi = msg->spi;
228         struct spi_controller *host = spi->controller;
229         struct spi_transfer *xfer;
230         int clk_div, new_clk_div;
231         bool keep_cs = false;
232         u8 bits_per_word = 0;
233
234         clk_div = 1;
235
236         spi_engine_program_add_cmd(p, dry,
237                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
238                         spi_engine_get_config(spi)));
239
240         xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
241         spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
242
243         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
244                 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
245                 if (new_clk_div != clk_div) {
246                         clk_div = new_clk_div;
247                         /* actual divider used is register value + 1 */
248                         spi_engine_program_add_cmd(p, dry,
249                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
250                                         clk_div - 1));
251                 }
252
253                 if (bits_per_word != xfer->bits_per_word) {
254                         bits_per_word = xfer->bits_per_word;
255                         spi_engine_program_add_cmd(p, dry,
256                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
257                                         bits_per_word));
258                 }
259
260                 spi_engine_gen_xfer(p, dry, xfer);
261                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
262                                      xfer->effective_speed_hz);
263
264                 if (xfer->cs_change) {
265                         if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
266                                 keep_cs = true;
267                         } else {
268                                 if (!xfer->cs_off)
269                                         spi_engine_gen_cs(p, dry, spi, false);
270
271                                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
272                                         &xfer->cs_change_delay, xfer),
273                                         xfer->effective_speed_hz);
274
275                                 if (!list_next_entry(xfer, transfer_list)->cs_off)
276                                         spi_engine_gen_cs(p, dry, spi, true);
277                         }
278                 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
279                            xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
280                         spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
281                 }
282         }
283
284         if (!keep_cs)
285                 spi_engine_gen_cs(p, dry, spi, false);
286
287         /*
288          * Restore clockdiv to default so that future gen_sleep commands don't
289          * have to be aware of the current register state.
290          */
291         if (clk_div != 1)
292                 spi_engine_program_add_cmd(p, dry,
293                         SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
294 }
295
296 static void spi_engine_xfer_next(struct spi_message *msg,
297         struct spi_transfer **_xfer)
298 {
299         struct spi_transfer *xfer = *_xfer;
300
301         if (!xfer) {
302                 xfer = list_first_entry(&msg->transfers,
303                         struct spi_transfer, transfer_list);
304         } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
305                 xfer = NULL;
306         } else {
307                 xfer = list_next_entry(xfer, transfer_list);
308         }
309
310         *_xfer = xfer;
311 }
312
313 static void spi_engine_tx_next(struct spi_message *msg)
314 {
315         struct spi_engine_message_state *st = msg->state;
316         struct spi_transfer *xfer = st->tx_xfer;
317
318         do {
319                 spi_engine_xfer_next(msg, &xfer);
320         } while (xfer && !xfer->tx_buf);
321
322         st->tx_xfer = xfer;
323         if (xfer) {
324                 st->tx_length = xfer->len;
325                 st->tx_buf = xfer->tx_buf;
326         } else {
327                 st->tx_buf = NULL;
328         }
329 }
330
331 static void spi_engine_rx_next(struct spi_message *msg)
332 {
333         struct spi_engine_message_state *st = msg->state;
334         struct spi_transfer *xfer = st->rx_xfer;
335
336         do {
337                 spi_engine_xfer_next(msg, &xfer);
338         } while (xfer && !xfer->rx_buf);
339
340         st->rx_xfer = xfer;
341         if (xfer) {
342                 st->rx_length = xfer->len;
343                 st->rx_buf = xfer->rx_buf;
344         } else {
345                 st->rx_buf = NULL;
346         }
347 }
348
349 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
350                                       struct spi_message *msg)
351 {
352         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
353         struct spi_engine_message_state *st = msg->state;
354         unsigned int n, m, i;
355         const uint16_t *buf;
356
357         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
358         while (n && st->cmd_length) {
359                 m = min(n, st->cmd_length);
360                 buf = st->cmd_buf;
361                 for (i = 0; i < m; i++)
362                         writel_relaxed(buf[i], addr);
363                 st->cmd_buf += m;
364                 st->cmd_length -= m;
365                 n -= m;
366         }
367
368         return st->cmd_length != 0;
369 }
370
371 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
372                                      struct spi_message *msg)
373 {
374         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
375         struct spi_engine_message_state *st = msg->state;
376         unsigned int n, m, i;
377
378         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
379         while (n && st->tx_length) {
380                 if (st->tx_xfer->bits_per_word <= 8) {
381                         const u8 *buf = st->tx_buf;
382
383                         m = min(n, st->tx_length);
384                         for (i = 0; i < m; i++)
385                                 writel_relaxed(buf[i], addr);
386                         st->tx_buf += m;
387                         st->tx_length -= m;
388                 } else if (st->tx_xfer->bits_per_word <= 16) {
389                         const u16 *buf = (const u16 *)st->tx_buf;
390
391                         m = min(n, st->tx_length / 2);
392                         for (i = 0; i < m; i++)
393                                 writel_relaxed(buf[i], addr);
394                         st->tx_buf += m * 2;
395                         st->tx_length -= m * 2;
396                 } else {
397                         const u32 *buf = (const u32 *)st->tx_buf;
398
399                         m = min(n, st->tx_length / 4);
400                         for (i = 0; i < m; i++)
401                                 writel_relaxed(buf[i], addr);
402                         st->tx_buf += m * 4;
403                         st->tx_length -= m * 4;
404                 }
405                 n -= m;
406                 if (st->tx_length == 0)
407                         spi_engine_tx_next(msg);
408         }
409
410         return st->tx_length != 0;
411 }
412
413 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
414                                     struct spi_message *msg)
415 {
416         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
417         struct spi_engine_message_state *st = msg->state;
418         unsigned int n, m, i;
419
420         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
421         while (n && st->rx_length) {
422                 if (st->rx_xfer->bits_per_word <= 8) {
423                         u8 *buf = st->rx_buf;
424
425                         m = min(n, st->rx_length);
426                         for (i = 0; i < m; i++)
427                                 buf[i] = readl_relaxed(addr);
428                         st->rx_buf += m;
429                         st->rx_length -= m;
430                 } else if (st->rx_xfer->bits_per_word <= 16) {
431                         u16 *buf = (u16 *)st->rx_buf;
432
433                         m = min(n, st->rx_length / 2);
434                         for (i = 0; i < m; i++)
435                                 buf[i] = readl_relaxed(addr);
436                         st->rx_buf += m * 2;
437                         st->rx_length -= m * 2;
438                 } else {
439                         u32 *buf = (u32 *)st->rx_buf;
440
441                         m = min(n, st->rx_length / 4);
442                         for (i = 0; i < m; i++)
443                                 buf[i] = readl_relaxed(addr);
444                         st->rx_buf += m * 4;
445                         st->rx_length -= m * 4;
446                 }
447                 n -= m;
448                 if (st->rx_length == 0)
449                         spi_engine_rx_next(msg);
450         }
451
452         return st->rx_length != 0;
453 }
454
455 static irqreturn_t spi_engine_irq(int irq, void *devid)
456 {
457         struct spi_controller *host = devid;
458         struct spi_message *msg = host->cur_msg;
459         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
460         unsigned int disable_int = 0;
461         unsigned int pending;
462         int completed_id = -1;
463
464         pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
465
466         if (pending & SPI_ENGINE_INT_SYNC) {
467                 writel_relaxed(SPI_ENGINE_INT_SYNC,
468                         spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
469                 completed_id = readl_relaxed(
470                         spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
471         }
472
473         spin_lock(&spi_engine->lock);
474
475         if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
476                 if (!spi_engine_write_cmd_fifo(spi_engine, msg))
477                         disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
478         }
479
480         if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
481                 if (!spi_engine_write_tx_fifo(spi_engine, msg))
482                         disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
483         }
484
485         if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
486                 if (!spi_engine_read_rx_fifo(spi_engine, msg))
487                         disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
488         }
489
490         if (pending & SPI_ENGINE_INT_SYNC && msg) {
491                 struct spi_engine_message_state *st = msg->state;
492
493                 if (completed_id == st->sync_id) {
494                         if (timer_delete_sync(&spi_engine->watchdog_timer)) {
495                                 msg->status = 0;
496                                 msg->actual_length = msg->frame_length;
497                                 spi_finalize_current_message(host);
498                         }
499                         disable_int |= SPI_ENGINE_INT_SYNC;
500                 }
501         }
502
503         if (disable_int) {
504                 spi_engine->int_enable &= ~disable_int;
505                 writel_relaxed(spi_engine->int_enable,
506                         spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
507         }
508
509         spin_unlock(&spi_engine->lock);
510
511         return IRQ_HANDLED;
512 }
513
514 static int spi_engine_prepare_message(struct spi_controller *host,
515                                       struct spi_message *msg)
516 {
517         struct spi_engine_program p_dry, *p;
518         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
519         struct spi_engine_message_state *st;
520         size_t size;
521         int ret;
522
523         st = kzalloc(sizeof(*st), GFP_KERNEL);
524         if (!st)
525                 return -ENOMEM;
526
527         spi_engine_precompile_message(msg);
528
529         p_dry.length = 0;
530         spi_engine_compile_message(msg, true, &p_dry);
531
532         size = sizeof(*p->instructions) * (p_dry.length + 1);
533         p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
534         if (!p) {
535                 kfree(st);
536                 return -ENOMEM;
537         }
538
539         ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
540         if (ret < 0) {
541                 kfree(p);
542                 kfree(st);
543                 return ret;
544         }
545
546         st->sync_id = ret;
547
548         spi_engine_compile_message(msg, false, p);
549
550         spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
551
552         st->p = p;
553         st->cmd_buf = p->instructions;
554         st->cmd_length = p->length;
555         msg->state = st;
556
557         return 0;
558 }
559
560 static int spi_engine_unprepare_message(struct spi_controller *host,
561                                         struct spi_message *msg)
562 {
563         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
564         struct spi_engine_message_state *st = msg->state;
565
566         ida_free(&spi_engine->sync_ida, st->sync_id);
567         kfree(st->p);
568         kfree(st);
569
570         return 0;
571 }
572
573 static int spi_engine_transfer_one_message(struct spi_controller *host,
574         struct spi_message *msg)
575 {
576         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
577         struct spi_engine_message_state *st = msg->state;
578         unsigned int int_enable = 0;
579         unsigned long flags;
580
581         mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000));
582
583         spin_lock_irqsave(&spi_engine->lock, flags);
584
585         if (spi_engine_write_cmd_fifo(spi_engine, msg))
586                 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
587
588         spi_engine_tx_next(msg);
589         if (spi_engine_write_tx_fifo(spi_engine, msg))
590                 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
591
592         spi_engine_rx_next(msg);
593         if (st->rx_length != 0)
594                 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
595
596         int_enable |= SPI_ENGINE_INT_SYNC;
597
598         writel_relaxed(int_enable,
599                 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
600         spi_engine->int_enable = int_enable;
601         spin_unlock_irqrestore(&spi_engine->lock, flags);
602
603         return 0;
604 }
605
606 static void spi_engine_timeout(struct timer_list *timer)
607 {
608         struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer);
609         struct spi_controller *host = spi_engine->controller;
610
611         if (WARN_ON(!host->cur_msg))
612                 return;
613
614         dev_err(&host->dev,
615                 "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
616         host->cur_msg->status = -ETIMEDOUT;
617         spi_finalize_current_message(host);
618 }
619
620 static void spi_engine_release_hw(void *p)
621 {
622         struct spi_engine *spi_engine = p;
623
624         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
625         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
626         writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
627 }
628
629 static int spi_engine_probe(struct platform_device *pdev)
630 {
631         struct spi_engine *spi_engine;
632         struct spi_controller *host;
633         unsigned int version;
634         int irq;
635         int ret;
636
637         irq = platform_get_irq(pdev, 0);
638         if (irq < 0)
639                 return irq;
640
641         host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
642         if (!host)
643                 return -ENOMEM;
644
645         spi_engine = spi_controller_get_devdata(host);
646
647         spin_lock_init(&spi_engine->lock);
648         ida_init(&spi_engine->sync_ida);
649         timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE);
650         spi_engine->controller = host;
651
652         spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
653         if (IS_ERR(spi_engine->clk))
654                 return PTR_ERR(spi_engine->clk);
655
656         spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
657         if (IS_ERR(spi_engine->ref_clk))
658                 return PTR_ERR(spi_engine->ref_clk);
659
660         spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
661         if (IS_ERR(spi_engine->base))
662                 return PTR_ERR(spi_engine->base);
663
664         version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
665         if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
666                 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
667                         SPI_ENGINE_VERSION_MAJOR(version),
668                         SPI_ENGINE_VERSION_MINOR(version),
669                         SPI_ENGINE_VERSION_PATCH(version));
670                 return -ENODEV;
671         }
672
673         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
674         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
675         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
676
677         ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
678                                        spi_engine);
679         if (ret)
680                 return ret;
681
682         ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
683                                host);
684         if (ret)
685                 return ret;
686
687         host->dev.of_node = pdev->dev.of_node;
688         host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
689         host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
690         host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
691         host->transfer_one_message = spi_engine_transfer_one_message;
692         host->prepare_message = spi_engine_prepare_message;
693         host->unprepare_message = spi_engine_unprepare_message;
694         host->num_chipselect = 8;
695
696         if (host->max_speed_hz == 0)
697                 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
698
699         ret = devm_spi_register_controller(&pdev->dev, host);
700         if (ret)
701                 return ret;
702
703         platform_set_drvdata(pdev, host);
704
705         return 0;
706 }
707
708 static const struct of_device_id spi_engine_match_table[] = {
709         { .compatible = "adi,axi-spi-engine-1.00.a" },
710         { },
711 };
712 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
713
714 static struct platform_driver spi_engine_driver = {
715         .probe = spi_engine_probe,
716         .driver = {
717                 .name = "spi-engine",
718                 .of_match_table = spi_engine_match_table,
719         },
720 };
721 module_platform_driver(spi_engine_driver);
722
723 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
724 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
725 MODULE_LICENSE("GPL");