GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / i3c / master / mipi-i3c-hci / dma.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
8  * v1.x of the spec and v2.0 will likely be split out.
9  */
10
11 #include <linux/bitfield.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/i3c/master.h>
16 #include <linux/io.h>
17
18 #include "hci.h"
19 #include "cmd.h"
20 #include "ibi.h"
21
22
23 /*
24  * Software Parameter Values (somewhat arb itrary for now).
25  * Some of them could be determined at run time eventually.
26  */
27
28 #define XFER_RINGS                      1       /* max: 8 */
29 #define XFER_RING_ENTRIES               16      /* max: 255 */
30
31 #define IBI_RINGS                       1       /* max: 8 */
32 #define IBI_STATUS_RING_ENTRIES         32      /* max: 255 */
33 #define IBI_CHUNK_CACHELINES            1       /* max: 256 bytes equivalent */
34 #define IBI_CHUNK_POOL_SIZE             128     /* max: 1023 */
35
36 /*
37  * Ring Header Preamble
38  */
39
40 #define rhs_reg_read(r)         readl(hci->RHS_regs + (RHS_##r))
41 #define rhs_reg_write(r, v)     writel(v, hci->RHS_regs + (RHS_##r))
42
43 #define RHS_CONTROL                     0x00
44 #define PREAMBLE_SIZE                   GENMASK(31, 24) /* Preamble Section Size */
45 #define HEADER_SIZE                     GENMASK(23, 16) /* Ring Header Size */
46 #define MAX_HEADER_COUNT_CAP            GENMASK(7, 4) /* HC Max Header Count */
47 #define MAX_HEADER_COUNT                GENMASK(3, 0) /* Driver Max Header Count */
48
49 #define RHS_RHn_OFFSET(n)               (0x04 + (n)*4)
50
51 /*
52  * Ring Header (Per-Ring Bundle)
53  */
54
55 #define rh_reg_read(r)          readl(rh->regs + (RH_##r))
56 #define rh_reg_write(r, v)      writel(v, rh->regs + (RH_##r))
57
58 #define RH_CR_SETUP                     0x00    /* Command/Response Ring */
59 #define CR_XFER_STRUCT_SIZE             GENMASK(31, 24)
60 #define CR_RESP_STRUCT_SIZE             GENMASK(23, 16)
61 #define CR_RING_SIZE                    GENMASK(8, 0)
62
63 #define RH_IBI_SETUP                    0x04
64 #define IBI_STATUS_STRUCT_SIZE          GENMASK(31, 24)
65 #define IBI_STATUS_RING_SIZE            GENMASK(23, 16)
66 #define IBI_DATA_CHUNK_SIZE             GENMASK(12, 10)
67 #define IBI_DATA_CHUNK_COUNT            GENMASK(9, 0)
68
69 #define RH_CHUNK_CONTROL                        0x08
70
71 #define RH_INTR_STATUS                  0x10
72 #define RH_INTR_STATUS_ENABLE           0x14
73 #define RH_INTR_SIGNAL_ENABLE           0x18
74 #define RH_INTR_FORCE                   0x1c
75 #define INTR_IBI_READY                  BIT(12)
76 #define INTR_TRANSFER_COMPLETION        BIT(11)
77 #define INTR_RING_OP                    BIT(10)
78 #define INTR_TRANSFER_ERR               BIT(9)
79 #define INTR_WARN_INS_STOP_MODE         BIT(7)
80 #define INTR_IBI_RING_FULL              BIT(6)
81 #define INTR_TRANSFER_ABORT             BIT(5)
82
83 #define RH_RING_STATUS                  0x20
84 #define RING_STATUS_LOCKED              BIT(3)
85 #define RING_STATUS_ABORTED             BIT(2)
86 #define RING_STATUS_RUNNING             BIT(1)
87 #define RING_STATUS_ENABLED             BIT(0)
88
89 #define RH_RING_CONTROL                 0x24
90 #define RING_CTRL_ABORT                 BIT(2)
91 #define RING_CTRL_RUN_STOP              BIT(1)
92 #define RING_CTRL_ENABLE                BIT(0)
93
94 #define RH_RING_OPERATION1              0x28
95 #define RING_OP1_IBI_DEQ_PTR            GENMASK(23, 16)
96 #define RING_OP1_CR_SW_DEQ_PTR          GENMASK(15, 8)
97 #define RING_OP1_CR_ENQ_PTR             GENMASK(7, 0)
98
99 #define RH_RING_OPERATION2              0x2c
100 #define RING_OP2_IBI_ENQ_PTR            GENMASK(23, 16)
101 #define RING_OP2_CR_DEQ_PTR             GENMASK(7, 0)
102
103 #define RH_CMD_RING_BASE_LO             0x30
104 #define RH_CMD_RING_BASE_HI             0x34
105 #define RH_RESP_RING_BASE_LO            0x38
106 #define RH_RESP_RING_BASE_HI            0x3c
107 #define RH_IBI_STATUS_RING_BASE_LO      0x40
108 #define RH_IBI_STATUS_RING_BASE_HI      0x44
109 #define RH_IBI_DATA_RING_BASE_LO        0x48
110 #define RH_IBI_DATA_RING_BASE_HI        0x4c
111
112 #define RH_CMD_RING_SG                  0x50    /* Ring Scatter Gather Support */
113 #define RH_RESP_RING_SG                 0x54
114 #define RH_IBI_STATUS_RING_SG           0x58
115 #define RH_IBI_DATA_RING_SG             0x5c
116 #define RING_SG_BLP                     BIT(31) /* Buffer Vs. List Pointer */
117 #define RING_SG_LIST_SIZE               GENMASK(15, 0)
118
119 /*
120  * Data Buffer Descriptor (in memory)
121  */
122
123 #define DATA_BUF_BLP                    BIT(31) /* Buffer Vs. List Pointer */
124 #define DATA_BUF_IOC                    BIT(30) /* Interrupt on Completion */
125 #define DATA_BUF_BLOCK_SIZE             GENMASK(15, 0)
126
127
128 struct hci_rh_data {
129         void __iomem *regs;
130         void *xfer, *resp, *ibi_status, *ibi_data;
131         dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
132         unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
133         unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
134         unsigned int done_ptr, ibi_chunk_ptr;
135         struct hci_xfer **src_xfers;
136         spinlock_t lock;
137         struct completion op_done;
138 };
139
140 struct hci_rings_data {
141         unsigned int total;
142         struct hci_rh_data headers[];
143 };
144
145 struct hci_dma_dev_ibi_data {
146         struct i3c_generic_ibi_pool *pool;
147         unsigned int max_len;
148 };
149
150 static inline u32 lo32(dma_addr_t physaddr)
151 {
152         return physaddr;
153 }
154
155 static inline u32 hi32(dma_addr_t physaddr)
156 {
157         /* trickery to avoid compiler warnings on 32-bit build targets */
158         if (sizeof(dma_addr_t) > 4) {
159                 u64 hi = physaddr;
160                 return hi >> 32;
161         }
162         return 0;
163 }
164
165 static void hci_dma_cleanup(struct i3c_hci *hci)
166 {
167         struct hci_rings_data *rings = hci->io_data;
168         struct hci_rh_data *rh;
169         unsigned int i;
170
171         if (!rings)
172                 return;
173
174         for (i = 0; i < rings->total; i++) {
175                 rh = &rings->headers[i];
176
177                 rh_reg_write(RING_CONTROL, 0);
178                 rh_reg_write(CR_SETUP, 0);
179                 rh_reg_write(IBI_SETUP, 0);
180                 rh_reg_write(INTR_SIGNAL_ENABLE, 0);
181
182                 if (rh->xfer)
183                         dma_free_coherent(&hci->master.dev,
184                                           rh->xfer_struct_sz * rh->xfer_entries,
185                                           rh->xfer, rh->xfer_dma);
186                 if (rh->resp)
187                         dma_free_coherent(&hci->master.dev,
188                                           rh->resp_struct_sz * rh->xfer_entries,
189                                           rh->resp, rh->resp_dma);
190                 kfree(rh->src_xfers);
191                 if (rh->ibi_status)
192                         dma_free_coherent(&hci->master.dev,
193                                           rh->ibi_status_sz * rh->ibi_status_entries,
194                                           rh->ibi_status, rh->ibi_status_dma);
195                 if (rh->ibi_data_dma)
196                         dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
197                                          rh->ibi_chunk_sz * rh->ibi_chunks_total,
198                                          DMA_FROM_DEVICE);
199                 kfree(rh->ibi_data);
200         }
201
202         rhs_reg_write(CONTROL, 0);
203
204         kfree(rings);
205         hci->io_data = NULL;
206 }
207
208 static int hci_dma_init(struct i3c_hci *hci)
209 {
210         struct hci_rings_data *rings;
211         struct hci_rh_data *rh;
212         u32 regval;
213         unsigned int i, nr_rings, xfers_sz, resps_sz;
214         unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
215         int ret;
216
217         regval = rhs_reg_read(CONTROL);
218         nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
219         dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
220         if (unlikely(nr_rings > 8)) {
221                 dev_err(&hci->master.dev, "number of rings should be <= 8\n");
222                 nr_rings = 8;
223         }
224         if (nr_rings > XFER_RINGS)
225                 nr_rings = XFER_RINGS;
226         rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
227         if (!rings)
228                 return -ENOMEM;
229         hci->io_data = rings;
230         rings->total = nr_rings;
231
232         for (i = 0; i < rings->total; i++) {
233                 u32 offset = rhs_reg_read(RHn_OFFSET(i));
234
235                 dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
236                 ret = -EINVAL;
237                 if (!offset)
238                         goto err_out;
239                 rh = &rings->headers[i];
240                 rh->regs = hci->base_regs + offset;
241                 spin_lock_init(&rh->lock);
242                 init_completion(&rh->op_done);
243
244                 rh->xfer_entries = XFER_RING_ENTRIES;
245
246                 regval = rh_reg_read(CR_SETUP);
247                 rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
248                 rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
249                 DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
250                     rh->xfer_struct_sz, rh->resp_struct_sz);
251                 xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
252                 resps_sz = rh->resp_struct_sz * rh->xfer_entries;
253
254                 rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
255                                               &rh->xfer_dma, GFP_KERNEL);
256                 rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
257                                               &rh->resp_dma, GFP_KERNEL);
258                 rh->src_xfers =
259                         kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
260                                       GFP_KERNEL);
261                 ret = -ENOMEM;
262                 if (!rh->xfer || !rh->resp || !rh->src_xfers)
263                         goto err_out;
264
265                 rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
266                 rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
267                 rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
268                 rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
269
270                 regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
271                 rh_reg_write(CR_SETUP, regval);
272
273                 rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
274                 rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
275                                                  INTR_TRANSFER_COMPLETION |
276                                                  INTR_RING_OP |
277                                                  INTR_TRANSFER_ERR |
278                                                  INTR_WARN_INS_STOP_MODE |
279                                                  INTR_IBI_RING_FULL |
280                                                  INTR_TRANSFER_ABORT);
281
282                 /* IBIs */
283
284                 if (i >= IBI_RINGS)
285                         goto ring_ready;
286
287                 regval = rh_reg_read(IBI_SETUP);
288                 rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
289                 rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
290                 rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
291
292                 rh->ibi_chunk_sz = dma_get_cache_alignment();
293                 rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
294                 BUG_ON(rh->ibi_chunk_sz > 256);
295
296                 ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
297                 ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
298
299                 rh->ibi_status =
300                         dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
301                                            &rh->ibi_status_dma, GFP_KERNEL);
302                 rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
303                 ret = -ENOMEM;
304                 if (!rh->ibi_status || !rh->ibi_data)
305                         goto err_out;
306                 rh->ibi_data_dma =
307                         dma_map_single(&hci->master.dev, rh->ibi_data,
308                                        ibi_data_ring_sz, DMA_FROM_DEVICE);
309                 if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
310                         rh->ibi_data_dma = 0;
311                         ret = -ENOMEM;
312                         goto err_out;
313                 }
314
315                 regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
316                                     rh->ibi_status_entries) |
317                          FIELD_PREP(IBI_DATA_CHUNK_SIZE,
318                                     ilog2(rh->ibi_chunk_sz) - 2) |
319                          FIELD_PREP(IBI_DATA_CHUNK_COUNT,
320                                     rh->ibi_chunks_total);
321                 rh_reg_write(IBI_SETUP, regval);
322
323                 regval = rh_reg_read(INTR_SIGNAL_ENABLE);
324                 regval |= INTR_IBI_READY;
325                 rh_reg_write(INTR_SIGNAL_ENABLE, regval);
326
327 ring_ready:
328                 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
329         }
330
331         regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
332         rhs_reg_write(CONTROL, regval);
333         return 0;
334
335 err_out:
336         hci_dma_cleanup(hci);
337         return ret;
338 }
339
340 static void hci_dma_unmap_xfer(struct i3c_hci *hci,
341                                struct hci_xfer *xfer_list, unsigned int n)
342 {
343         struct hci_xfer *xfer;
344         unsigned int i;
345
346         for (i = 0; i < n; i++) {
347                 xfer = xfer_list + i;
348                 dma_unmap_single(&hci->master.dev,
349                                  xfer->data_dma, xfer->data_len,
350                                  xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
351         }
352 }
353
354 static int hci_dma_queue_xfer(struct i3c_hci *hci,
355                               struct hci_xfer *xfer_list, int n)
356 {
357         struct hci_rings_data *rings = hci->io_data;
358         struct hci_rh_data *rh;
359         unsigned int i, ring, enqueue_ptr;
360         u32 op1_val, op2_val;
361
362         /* For now we only use ring 0 */
363         ring = 0;
364         rh = &rings->headers[ring];
365
366         op1_val = rh_reg_read(RING_OPERATION1);
367         enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
368         for (i = 0; i < n; i++) {
369                 struct hci_xfer *xfer = xfer_list + i;
370                 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
371
372                 /* store cmd descriptor */
373                 *ring_data++ = xfer->cmd_desc[0];
374                 *ring_data++ = xfer->cmd_desc[1];
375                 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
376                         *ring_data++ = xfer->cmd_desc[2];
377                         *ring_data++ = xfer->cmd_desc[3];
378                 }
379
380                 /* first word of Data Buffer Descriptor Structure */
381                 if (!xfer->data)
382                         xfer->data_len = 0;
383                 *ring_data++ =
384                         FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
385                         ((i == n - 1) ? DATA_BUF_IOC : 0);
386
387                 /* 2nd and 3rd words of Data Buffer Descriptor Structure */
388                 if (xfer->data) {
389                         xfer->data_dma =
390                                 dma_map_single(&hci->master.dev,
391                                                xfer->data,
392                                                xfer->data_len,
393                                                xfer->rnw ?
394                                                   DMA_FROM_DEVICE :
395                                                   DMA_TO_DEVICE);
396                         if (dma_mapping_error(&hci->master.dev,
397                                               xfer->data_dma)) {
398                                 hci_dma_unmap_xfer(hci, xfer_list, i);
399                                 return -ENOMEM;
400                         }
401                         *ring_data++ = lo32(xfer->data_dma);
402                         *ring_data++ = hi32(xfer->data_dma);
403                 } else {
404                         *ring_data++ = 0;
405                         *ring_data++ = 0;
406                 }
407
408                 /* remember corresponding xfer struct */
409                 rh->src_xfers[enqueue_ptr] = xfer;
410                 /* remember corresponding ring/entry for this xfer structure */
411                 xfer->ring_number = ring;
412                 xfer->ring_entry = enqueue_ptr;
413
414                 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
415
416                 /*
417                  * We may update the hardware view of the enqueue pointer
418                  * only if we didn't reach its dequeue pointer.
419                  */
420                 op2_val = rh_reg_read(RING_OPERATION2);
421                 if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
422                         /* the ring is full */
423                         hci_dma_unmap_xfer(hci, xfer_list, i + 1);
424                         return -EBUSY;
425                 }
426         }
427
428         /* take care to update the hardware enqueue pointer atomically */
429         spin_lock_irq(&rh->lock);
430         op1_val = rh_reg_read(RING_OPERATION1);
431         op1_val &= ~RING_OP1_CR_ENQ_PTR;
432         op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
433         rh_reg_write(RING_OPERATION1, op1_val);
434         spin_unlock_irq(&rh->lock);
435
436         return 0;
437 }
438
439 static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
440                                  struct hci_xfer *xfer_list, int n)
441 {
442         struct hci_rings_data *rings = hci->io_data;
443         struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
444         unsigned int i;
445         bool did_unqueue = false;
446
447         /* stop the ring */
448         rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
449         if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
450                 /*
451                  * We're deep in it if ever this condition is ever met.
452                  * Hardware might still be writing to memory, etc.
453                  * Better suspend the world than risking silent corruption.
454                  */
455                 dev_crit(&hci->master.dev, "unable to abort the ring\n");
456                 BUG();
457         }
458
459         for (i = 0; i < n; i++) {
460                 struct hci_xfer *xfer = xfer_list + i;
461                 int idx = xfer->ring_entry;
462
463                 /*
464                  * At the time the abort happened, the xfer might have
465                  * completed already. If not then replace corresponding
466                  * descriptor entries with a no-op.
467                  */
468                 if (idx >= 0) {
469                         u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
470
471                         /* store no-op cmd descriptor */
472                         *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
473                         *ring_data++ = 0;
474                         if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
475                                 *ring_data++ = 0;
476                                 *ring_data++ = 0;
477                         }
478
479                         /* disassociate this xfer struct */
480                         rh->src_xfers[idx] = NULL;
481
482                         /* and unmap it */
483                         hci_dma_unmap_xfer(hci, xfer, 1);
484
485                         did_unqueue = true;
486                 }
487         }
488
489         /* restart the ring */
490         rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
491
492         return did_unqueue;
493 }
494
495 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
496 {
497         u32 op1_val, op2_val, resp, *ring_resp;
498         unsigned int tid, done_ptr = rh->done_ptr;
499         struct hci_xfer *xfer;
500
501         for (;;) {
502                 op2_val = rh_reg_read(RING_OPERATION2);
503                 if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
504                         break;
505
506                 ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
507                 resp = *ring_resp;
508                 tid = RESP_TID(resp);
509                 DBG("resp = 0x%08x", resp);
510
511                 xfer = rh->src_xfers[done_ptr];
512                 if (!xfer) {
513                         DBG("orphaned ring entry");
514                 } else {
515                         hci_dma_unmap_xfer(hci, xfer, 1);
516                         xfer->ring_entry = -1;
517                         xfer->response = resp;
518                         if (tid != xfer->cmd_tid) {
519                                 dev_err(&hci->master.dev,
520                                         "response tid=%d when expecting %d\n",
521                                         tid, xfer->cmd_tid);
522                                 /* TODO: do something about it? */
523                         }
524                         if (xfer->completion)
525                                 complete(xfer->completion);
526                 }
527
528                 done_ptr = (done_ptr + 1) % rh->xfer_entries;
529                 rh->done_ptr = done_ptr;
530         }
531
532         /* take care to update the software dequeue pointer atomically */
533         spin_lock(&rh->lock);
534         op1_val = rh_reg_read(RING_OPERATION1);
535         op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
536         op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
537         rh_reg_write(RING_OPERATION1, op1_val);
538         spin_unlock(&rh->lock);
539 }
540
541 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
542                                const struct i3c_ibi_setup *req)
543 {
544         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
545         struct i3c_generic_ibi_pool *pool;
546         struct hci_dma_dev_ibi_data *dev_ibi;
547
548         dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
549         if (!dev_ibi)
550                 return -ENOMEM;
551         pool = i3c_generic_ibi_alloc_pool(dev, req);
552         if (IS_ERR(pool)) {
553                 kfree(dev_ibi);
554                 return PTR_ERR(pool);
555         }
556         dev_ibi->pool = pool;
557         dev_ibi->max_len = req->max_payload_len;
558         dev_data->ibi_data = dev_ibi;
559         return 0;
560 }
561
562 static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
563 {
564         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
565         struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
566
567         dev_data->ibi_data = NULL;
568         i3c_generic_ibi_free_pool(dev_ibi->pool);
569         kfree(dev_ibi);
570 }
571
572 static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
573                                      struct i3c_dev_desc *dev,
574                                      struct i3c_ibi_slot *slot)
575 {
576         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
577         struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
578
579         i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
580 }
581
582 static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
583 {
584         struct i3c_dev_desc *dev;
585         struct i3c_hci_dev_data *dev_data;
586         struct hci_dma_dev_ibi_data *dev_ibi;
587         struct i3c_ibi_slot *slot;
588         u32 op1_val, op2_val, ibi_status_error;
589         unsigned int ptr, enq_ptr, deq_ptr;
590         unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
591         int ibi_addr, last_ptr;
592         void *ring_ibi_data;
593         dma_addr_t ring_ibi_data_dma;
594
595         op1_val = rh_reg_read(RING_OPERATION1);
596         deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
597
598         op2_val = rh_reg_read(RING_OPERATION2);
599         enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
600
601         ibi_status_error = 0;
602         ibi_addr = -1;
603         ibi_chunks = 0;
604         ibi_size = 0;
605         last_ptr = -1;
606
607         /* let's find all we can about this IBI */
608         for (ptr = deq_ptr; ptr != enq_ptr;
609              ptr = (ptr + 1) % rh->ibi_status_entries) {
610                 u32 ibi_status, *ring_ibi_status;
611                 unsigned int chunks;
612
613                 ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
614                 ibi_status = *ring_ibi_status;
615                 DBG("status = %#x", ibi_status);
616
617                 if (ibi_status_error) {
618                         /* we no longer care */
619                 } else if (ibi_status & IBI_ERROR) {
620                         ibi_status_error = ibi_status;
621                 } else if (ibi_addr ==  -1) {
622                         ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
623                 } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
624                         /* the address changed unexpectedly */
625                         ibi_status_error = ibi_status;
626                 }
627
628                 chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
629                 ibi_chunks += chunks;
630                 if (!(ibi_status & IBI_LAST_STATUS)) {
631                         ibi_size += chunks * rh->ibi_chunk_sz;
632                 } else {
633                         ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
634                         last_ptr = ptr;
635                         break;
636                 }
637         }
638
639         /* validate what we've got */
640
641         if (last_ptr == -1) {
642                 /* this IBI sequence is not yet complete */
643                 DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
644                 return;
645         }
646         deq_ptr = last_ptr + 1;
647         deq_ptr %= rh->ibi_status_entries;
648
649         if (ibi_status_error) {
650                 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
651                 goto done;
652         }
653
654         /* determine who this is for */
655         dev = i3c_hci_addr_to_dev(hci, ibi_addr);
656         if (!dev) {
657                 dev_err(&hci->master.dev,
658                         "IBI for unknown device %#x\n", ibi_addr);
659                 goto done;
660         }
661
662         dev_data = i3c_dev_get_master_data(dev);
663         dev_ibi = dev_data->ibi_data;
664         if (ibi_size > dev_ibi->max_len) {
665                 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
666                         ibi_size, dev_ibi->max_len);
667                 goto done;
668         }
669
670         /*
671          * This ring model is not suitable for zero-copy processing of IBIs.
672          * We have the data chunk ring wrap-around to deal with, meaning
673          * that the payload might span multiple chunks beginning at the
674          * end of the ring and wrap to the start of the ring. Furthermore
675          * there is no guarantee that those chunks will be released in order
676          * and in a timely manner by the upper driver. So let's just copy
677          * them to a discrete buffer. In practice they're supposed to be
678          * small anyway.
679          */
680         slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
681         if (!slot) {
682                 dev_err(&hci->master.dev, "no free slot for IBI\n");
683                 goto done;
684         }
685
686         /* copy first part of the payload */
687         ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
688         ring_ibi_data = rh->ibi_data + ibi_data_offset;
689         ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
690         first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
691                         * rh->ibi_chunk_sz;
692         if (first_part > ibi_size)
693                 first_part = ibi_size;
694         dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
695                                 first_part, DMA_FROM_DEVICE);
696         memcpy(slot->data, ring_ibi_data, first_part);
697
698         /* copy second part if any */
699         if (ibi_size > first_part) {
700                 /* we wrap back to the start and copy remaining data */
701                 ring_ibi_data = rh->ibi_data;
702                 ring_ibi_data_dma = rh->ibi_data_dma;
703                 dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
704                                         ibi_size - first_part, DMA_FROM_DEVICE);
705                 memcpy(slot->data + first_part, ring_ibi_data,
706                        ibi_size - first_part);
707         }
708
709         /* submit it */
710         slot->dev = dev;
711         slot->len = ibi_size;
712         i3c_master_queue_ibi(dev, slot);
713
714 done:
715         /* take care to update the ibi dequeue pointer atomically */
716         spin_lock(&rh->lock);
717         op1_val = rh_reg_read(RING_OPERATION1);
718         op1_val &= ~RING_OP1_IBI_DEQ_PTR;
719         op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
720         rh_reg_write(RING_OPERATION1, op1_val);
721         spin_unlock(&rh->lock);
722
723         /* update the chunk pointer */
724         rh->ibi_chunk_ptr += ibi_chunks;
725         rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
726
727         /* and tell the hardware about freed chunks */
728         rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
729 }
730
731 static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
732 {
733         struct hci_rings_data *rings = hci->io_data;
734         unsigned int i;
735         bool handled = false;
736
737         for (i = 0; mask && i < 8; i++) {
738                 struct hci_rh_data *rh;
739                 u32 status;
740
741                 if (!(mask & BIT(i)))
742                         continue;
743                 mask &= ~BIT(i);
744
745                 rh = &rings->headers[i];
746                 status = rh_reg_read(INTR_STATUS);
747                 DBG("rh%d status: %#x", i, status);
748                 if (!status)
749                         continue;
750                 rh_reg_write(INTR_STATUS, status);
751
752                 if (status & INTR_IBI_READY)
753                         hci_dma_process_ibi(hci, rh);
754                 if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
755                         hci_dma_xfer_done(hci, rh);
756                 if (status & INTR_RING_OP)
757                         complete(&rh->op_done);
758
759                 if (status & INTR_TRANSFER_ABORT)
760                         dev_notice_ratelimited(&hci->master.dev,
761                                 "ring %d: Transfer Aborted\n", i);
762                 if (status & INTR_WARN_INS_STOP_MODE)
763                         dev_warn_ratelimited(&hci->master.dev,
764                                 "ring %d: Inserted Stop on Mode Change\n", i);
765                 if (status & INTR_IBI_RING_FULL)
766                         dev_err_ratelimited(&hci->master.dev,
767                                 "ring %d: IBI Ring Full Condition\n", i);
768
769                 handled = true;
770         }
771
772         return handled;
773 }
774
775 const struct hci_io_ops mipi_i3c_hci_dma = {
776         .init                   = hci_dma_init,
777         .cleanup                = hci_dma_cleanup,
778         .queue_xfer             = hci_dma_queue_xfer,
779         .dequeue_xfer           = hci_dma_dequeue_xfer,
780         .irq_handler            = hci_dma_irq_handler,
781         .request_ibi            = hci_dma_request_ibi,
782         .free_ibi               = hci_dma_free_ibi,
783         .recycle_ibi_slot       = hci_dma_recycle_ibi_slot,
784 };