GNU Linux-libre 4.19.295-gnu1
[releases.git] / drivers / spi / spi-fsl-cpm.c
1 /*
2  * Freescale SPI controller driver cpm functions.
3  *
4  * Maintainer: Kumar Gala
5  *
6  * Copyright (C) 2006 Polycom, Inc.
7  * Copyright 2010 Freescale Semiconductor, Inc.
8  *
9  * CPM SPI and QE buffer descriptors mode support:
10  * Copyright (c) 2009  MontaVista Software, Inc.
11  * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
12  *
13  * This program is free software; you can redistribute  it and/or modify it
14  * under  the terms of  the GNU General  Public License as published by the
15  * Free Software Foundation;  either version 2 of the  License, or (at your
16  * option) any later version.
17  */
18 #include <asm/cpm.h>
19 #include <soc/fsl/qe/qe.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/fsl_devices.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/spi/spi.h>
26 #include <linux/types.h>
27 #include <linux/platform_device.h>
28 #include <linux/byteorder/generic.h>
29
30 #include "spi-fsl-cpm.h"
31 #include "spi-fsl-lib.h"
32 #include "spi-fsl-spi.h"
33
34 /* CPM1 and CPM2 are mutually exclusive. */
35 #ifdef CONFIG_CPM1
36 #include <asm/cpm1.h>
37 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
38 #else
39 #include <asm/cpm2.h>
40 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
41 #endif
42
43 #define SPIE_TXB        0x00000200      /* Last char is written to tx fifo */
44 #define SPIE_RXB        0x00000100      /* Last char is written to rx buf */
45
46 /* SPCOM register values */
47 #define SPCOM_STR       (1 << 23)       /* Start transmit */
48
49 #define SPI_PRAM_SIZE   0x100
50 #define SPI_MRBLR       ((unsigned int)PAGE_SIZE)
51
52 static void *fsl_dummy_rx;
53 static DEFINE_MUTEX(fsl_dummy_rx_lock);
54 static int fsl_dummy_rx_refcnt;
55
56 void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
57 {
58         if (mspi->flags & SPI_QE) {
59                 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
60                              QE_CR_PROTOCOL_UNSPECIFIED, 0);
61         } else {
62                 if (mspi->flags & SPI_CPM1) {
63                         out_be32(&mspi->pram->rstate, 0);
64                         out_be16(&mspi->pram->rbptr,
65                                  in_be16(&mspi->pram->rbase));
66                         out_be32(&mspi->pram->tstate, 0);
67                         out_be16(&mspi->pram->tbptr,
68                                  in_be16(&mspi->pram->tbase));
69                 } else {
70                         cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
71                 }
72         }
73 }
74 EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
75
76 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
77 {
78         struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
79         struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
80         unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
81         unsigned int xfer_ofs;
82         struct fsl_spi_reg *reg_base = mspi->reg_base;
83
84         xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
85
86         if (mspi->rx_dma == mspi->dma_dummy_rx)
87                 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
88         else
89                 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
90         out_be16(&rx_bd->cbd_datlen, 0);
91         out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
92
93         if (mspi->tx_dma == mspi->dma_dummy_tx)
94                 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
95         else
96                 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
97         out_be16(&tx_bd->cbd_datlen, xfer_len);
98         out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
99                                  BD_SC_LAST);
100
101         /* start transfer */
102         mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
103 }
104
105 int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
106                      struct spi_transfer *t, bool is_dma_mapped)
107 {
108         struct device *dev = mspi->dev;
109         struct fsl_spi_reg *reg_base = mspi->reg_base;
110
111         if (is_dma_mapped) {
112                 mspi->map_tx_dma = 0;
113                 mspi->map_rx_dma = 0;
114         } else {
115                 mspi->map_tx_dma = 1;
116                 mspi->map_rx_dma = 1;
117         }
118
119         if (!t->tx_buf) {
120                 mspi->tx_dma = mspi->dma_dummy_tx;
121                 mspi->map_tx_dma = 0;
122         }
123
124         if (!t->rx_buf) {
125                 mspi->rx_dma = mspi->dma_dummy_rx;
126                 mspi->map_rx_dma = 0;
127         }
128         if (t->bits_per_word == 16 && t->tx_buf) {
129                 const u16 *src = t->tx_buf;
130                 u16 *dst;
131                 int i;
132
133                 dst = kmalloc(t->len, GFP_KERNEL);
134                 if (!dst)
135                         return -ENOMEM;
136
137                 for (i = 0; i < t->len >> 1; i++)
138                         dst[i] = cpu_to_le16p(src + i);
139
140                 mspi->tx = dst;
141                 mspi->map_tx_dma = 1;
142         }
143
144         if (mspi->map_tx_dma) {
145                 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
146
147                 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
148                                               DMA_TO_DEVICE);
149                 if (dma_mapping_error(dev, mspi->tx_dma)) {
150                         dev_err(dev, "unable to map tx dma\n");
151                         return -ENOMEM;
152                 }
153         } else if (t->tx_buf) {
154                 mspi->tx_dma = t->tx_dma;
155         }
156
157         if (mspi->map_rx_dma) {
158                 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
159                                               DMA_FROM_DEVICE);
160                 if (dma_mapping_error(dev, mspi->rx_dma)) {
161                         dev_err(dev, "unable to map rx dma\n");
162                         goto err_rx_dma;
163                 }
164         } else if (t->rx_buf) {
165                 mspi->rx_dma = t->rx_dma;
166         }
167
168         /* enable rx ints */
169         mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
170
171         mspi->xfer_in_progress = t;
172         mspi->count = t->len;
173
174         /* start CPM transfers */
175         fsl_spi_cpm_bufs_start(mspi);
176
177         return 0;
178
179 err_rx_dma:
180         if (mspi->map_tx_dma)
181                 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
182         return -ENOMEM;
183 }
184 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
185
186 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
187 {
188         struct device *dev = mspi->dev;
189         struct spi_transfer *t = mspi->xfer_in_progress;
190
191         if (mspi->map_tx_dma)
192                 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
193         if (mspi->map_rx_dma)
194                 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
195         mspi->xfer_in_progress = NULL;
196
197         if (t->bits_per_word == 16 && t->rx_buf) {
198                 int i;
199
200                 for (i = 0; i < t->len; i += 2)
201                         le16_to_cpus(t->rx_buf + i);
202         }
203 }
204 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
205
206 void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
207 {
208         u16 len;
209         struct fsl_spi_reg *reg_base = mspi->reg_base;
210
211         dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
212                 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
213
214         len = in_be16(&mspi->rx_bd->cbd_datlen);
215         if (len > mspi->count) {
216                 WARN_ON(1);
217                 len = mspi->count;
218         }
219
220         /* Clear the events */
221         mpc8xxx_spi_write_reg(&reg_base->event, events);
222
223         mspi->count -= len;
224         if (mspi->count)
225                 fsl_spi_cpm_bufs_start(mspi);
226         else
227                 complete(&mspi->done);
228 }
229 EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
230
231 static void *fsl_spi_alloc_dummy_rx(void)
232 {
233         mutex_lock(&fsl_dummy_rx_lock);
234
235         if (!fsl_dummy_rx)
236                 fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
237         if (fsl_dummy_rx)
238                 fsl_dummy_rx_refcnt++;
239
240         mutex_unlock(&fsl_dummy_rx_lock);
241
242         return fsl_dummy_rx;
243 }
244
245 static void fsl_spi_free_dummy_rx(void)
246 {
247         mutex_lock(&fsl_dummy_rx_lock);
248
249         switch (fsl_dummy_rx_refcnt) {
250         case 0:
251                 WARN_ON(1);
252                 break;
253         case 1:
254                 kfree(fsl_dummy_rx);
255                 fsl_dummy_rx = NULL;
256                 /* fall through */
257         default:
258                 fsl_dummy_rx_refcnt--;
259                 break;
260         }
261
262         mutex_unlock(&fsl_dummy_rx_lock);
263 }
264
265 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
266 {
267         struct device *dev = mspi->dev;
268         struct device_node *np = dev->of_node;
269         const u32 *iprop;
270         int size;
271         void __iomem *spi_base;
272         unsigned long pram_ofs = -ENOMEM;
273
274         /* Can't use of_address_to_resource(), QE muram isn't at 0. */
275         iprop = of_get_property(np, "reg", &size);
276
277         /* QE with a fixed pram location? */
278         if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
279                 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
280
281         /* QE but with a dynamic pram location? */
282         if (mspi->flags & SPI_QE) {
283                 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
284                 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
285                              QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
286                 return pram_ofs;
287         }
288
289         spi_base = of_iomap(np, 1);
290         if (spi_base == NULL)
291                 return -EINVAL;
292
293         if (mspi->flags & SPI_CPM2) {
294                 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
295                 out_be16(spi_base, pram_ofs);
296         }
297
298         iounmap(spi_base);
299         return pram_ofs;
300 }
301
302 int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
303 {
304         struct device *dev = mspi->dev;
305         struct device_node *np = dev->of_node;
306         const u32 *iprop;
307         int size;
308         unsigned long bds_ofs;
309
310         if (!(mspi->flags & SPI_CPM_MODE))
311                 return 0;
312
313         if (!fsl_spi_alloc_dummy_rx())
314                 return -ENOMEM;
315
316         if (mspi->flags & SPI_QE) {
317                 iprop = of_get_property(np, "cell-index", &size);
318                 if (iprop && size == sizeof(*iprop))
319                         mspi->subblock = *iprop;
320
321                 switch (mspi->subblock) {
322                 default:
323                         dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
324                         /* fall through */
325                 case 0:
326                         mspi->subblock = QE_CR_SUBBLOCK_SPI1;
327                         break;
328                 case 1:
329                         mspi->subblock = QE_CR_SUBBLOCK_SPI2;
330                         break;
331                 }
332         }
333
334         if (mspi->flags & SPI_CPM1) {
335                 struct resource *res;
336                 void *pram;
337
338                 res = platform_get_resource(to_platform_device(dev),
339                                             IORESOURCE_MEM, 1);
340                 pram = devm_ioremap_resource(dev, res);
341                 if (IS_ERR(pram))
342                         mspi->pram = NULL;
343                 else
344                         mspi->pram = pram;
345         } else {
346                 unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
347
348                 if (IS_ERR_VALUE(pram_ofs))
349                         mspi->pram = NULL;
350                 else
351                         mspi->pram = cpm_muram_addr(pram_ofs);
352         }
353         if (mspi->pram == NULL) {
354                 dev_err(dev, "can't allocate spi parameter ram\n");
355                 goto err_pram;
356         }
357
358         bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
359                                   sizeof(*mspi->rx_bd), 8);
360         if (IS_ERR_VALUE(bds_ofs)) {
361                 dev_err(dev, "can't allocate bds\n");
362                 goto err_bds;
363         }
364
365         mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
366                                             DMA_TO_DEVICE);
367         if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
368                 dev_err(dev, "unable to map dummy tx buffer\n");
369                 goto err_dummy_tx;
370         }
371
372         mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
373                                             DMA_FROM_DEVICE);
374         if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
375                 dev_err(dev, "unable to map dummy rx buffer\n");
376                 goto err_dummy_rx;
377         }
378
379         mspi->tx_bd = cpm_muram_addr(bds_ofs);
380         mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
381
382         /* Initialize parameter ram. */
383         out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
384         out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
385         out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
386         out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
387         out_be16(&mspi->pram->mrblr, SPI_MRBLR);
388         out_be32(&mspi->pram->rstate, 0);
389         out_be32(&mspi->pram->rdp, 0);
390         out_be16(&mspi->pram->rbptr, 0);
391         out_be16(&mspi->pram->rbc, 0);
392         out_be32(&mspi->pram->rxtmp, 0);
393         out_be32(&mspi->pram->tstate, 0);
394         out_be32(&mspi->pram->tdp, 0);
395         out_be16(&mspi->pram->tbptr, 0);
396         out_be16(&mspi->pram->tbc, 0);
397         out_be32(&mspi->pram->txtmp, 0);
398
399         return 0;
400
401 err_dummy_rx:
402         dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
403 err_dummy_tx:
404         cpm_muram_free(bds_ofs);
405 err_bds:
406         if (!(mspi->flags & SPI_CPM1))
407                 cpm_muram_free(cpm_muram_offset(mspi->pram));
408 err_pram:
409         fsl_spi_free_dummy_rx();
410         return -ENOMEM;
411 }
412 EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
413
414 void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
415 {
416         struct device *dev = mspi->dev;
417
418         if (!(mspi->flags & SPI_CPM_MODE))
419                 return;
420
421         dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
422         dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
423         cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
424         cpm_muram_free(cpm_muram_offset(mspi->pram));
425         fsl_spi_free_dummy_rx();
426 }
427 EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
428
429 MODULE_LICENSE("GPL");