GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / bus / mhi / core / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19                               void __iomem *base, u32 offset, u32 *out)
20 {
21         return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
22 }
23
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25                                     void __iomem *base, u32 offset,
26                                     u32 mask, u32 shift, u32 *out)
27 {
28         u32 tmp;
29         int ret;
30
31         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
32         if (ret)
33                 return ret;
34
35         *out = (tmp & mask) >> shift;
36
37         return 0;
38 }
39
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
41                    u32 offset, u32 val)
42 {
43         mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
44 }
45
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47                          u32 offset, u32 mask, u32 shift, u32 val)
48 {
49         int ret;
50         u32 tmp;
51
52         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
53         if (ret)
54                 return;
55
56         tmp &= ~mask;
57         tmp |= (val << shift);
58         mhi_write_reg(mhi_cntrl, base, offset, tmp);
59 }
60
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
62                   dma_addr_t db_val)
63 {
64         mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65         mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
66 }
67
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69                      struct db_cfg *db_cfg,
70                      void __iomem *db_addr,
71                      dma_addr_t db_val)
72 {
73         if (db_cfg->db_mode) {
74                 db_cfg->db_val = db_val;
75                 mhi_write_db(mhi_cntrl, db_addr, db_val);
76                 db_cfg->db_mode = 0;
77         }
78 }
79
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81                              struct db_cfg *db_cfg,
82                              void __iomem *db_addr,
83                              dma_addr_t db_val)
84 {
85         db_cfg->db_val = db_val;
86         mhi_write_db(mhi_cntrl, db_addr, db_val);
87 }
88
89 void mhi_ring_er_db(struct mhi_event *mhi_event)
90 {
91         struct mhi_ring *ring = &mhi_event->ring;
92
93         mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94                                      ring->db_addr, *ring->ctxt_wp);
95 }
96
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
98 {
99         dma_addr_t db;
100         struct mhi_ring *ring = &mhi_cmd->ring;
101
102         db = ring->iommu_base + (ring->wp - ring->base);
103         *ring->ctxt_wp = db;
104         mhi_write_db(mhi_cntrl, ring->db_addr, db);
105 }
106
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108                       struct mhi_chan *mhi_chan)
109 {
110         struct mhi_ring *ring = &mhi_chan->tre_ring;
111         dma_addr_t db;
112
113         db = ring->iommu_base + (ring->wp - ring->base);
114         *ring->ctxt_wp = db;
115         mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
116                                     ring->db_addr, db);
117 }
118
119 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
120 {
121         u32 exec;
122         int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
123
124         return (ret) ? MHI_EE_MAX : exec;
125 }
126
127 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
128 {
129         u32 state;
130         int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
131                                      MHISTATUS_MHISTATE_MASK,
132                                      MHISTATUS_MHISTATE_SHIFT, &state);
133         return ret ? MHI_STATE_MAX : state;
134 }
135
136 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
137                          struct mhi_buf_info *buf_info)
138 {
139         buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
140                                           buf_info->v_addr, buf_info->len,
141                                           buf_info->dir);
142         if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
143                 return -ENOMEM;
144
145         return 0;
146 }
147
148 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
149                           struct mhi_buf_info *buf_info)
150 {
151         void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
152                                        &buf_info->p_addr, GFP_ATOMIC);
153
154         if (!buf)
155                 return -ENOMEM;
156
157         if (buf_info->dir == DMA_TO_DEVICE)
158                 memcpy(buf, buf_info->v_addr, buf_info->len);
159
160         buf_info->bb_addr = buf;
161
162         return 0;
163 }
164
165 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
166                             struct mhi_buf_info *buf_info)
167 {
168         dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
169                          buf_info->dir);
170 }
171
172 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
173                              struct mhi_buf_info *buf_info)
174 {
175         if (buf_info->dir == DMA_FROM_DEVICE)
176                 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
177
178         mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
179                           buf_info->p_addr);
180 }
181
182 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
183                                       struct mhi_ring *ring)
184 {
185         int nr_el;
186
187         if (ring->wp < ring->rp) {
188                 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
189         } else {
190                 nr_el = (ring->rp - ring->base) / ring->el_size;
191                 nr_el += ((ring->base + ring->len - ring->wp) /
192                           ring->el_size) - 1;
193         }
194
195         return nr_el;
196 }
197
198 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
199 {
200         return (addr - ring->iommu_base) + ring->base;
201 }
202
203 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
204                                  struct mhi_ring *ring)
205 {
206         ring->wp += ring->el_size;
207         if (ring->wp >= (ring->base + ring->len))
208                 ring->wp = ring->base;
209         /* smp update */
210         smp_wmb();
211 }
212
213 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
214                                  struct mhi_ring *ring)
215 {
216         ring->rp += ring->el_size;
217         if (ring->rp >= (ring->base + ring->len))
218                 ring->rp = ring->base;
219         /* smp update */
220         smp_wmb();
221 }
222
223 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
224 {
225         return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
226 }
227
228 int mhi_destroy_device(struct device *dev, void *data)
229 {
230         struct mhi_chan *ul_chan, *dl_chan;
231         struct mhi_device *mhi_dev;
232         struct mhi_controller *mhi_cntrl;
233         enum mhi_ee_type ee = MHI_EE_MAX;
234
235         if (dev->bus != &mhi_bus_type)
236                 return 0;
237
238         mhi_dev = to_mhi_device(dev);
239         mhi_cntrl = mhi_dev->mhi_cntrl;
240
241         /* Only destroy virtual devices thats attached to bus */
242         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
243                 return 0;
244
245         ul_chan = mhi_dev->ul_chan;
246         dl_chan = mhi_dev->dl_chan;
247
248         /*
249          * If execution environment is specified, remove only those devices that
250          * started in them based on ee_mask for the channels as we move on to a
251          * different execution environment
252          */
253         if (data)
254                 ee = *(enum mhi_ee_type *)data;
255
256         /*
257          * For the suspend and resume case, this function will get called
258          * without mhi_unregister_controller(). Hence, we need to drop the
259          * references to mhi_dev created for ul and dl channels. We can
260          * be sure that there will be no instances of mhi_dev left after
261          * this.
262          */
263         if (ul_chan) {
264                 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
265                         return 0;
266
267                 put_device(&ul_chan->mhi_dev->dev);
268         }
269
270         if (dl_chan) {
271                 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
272                         return 0;
273
274                 put_device(&dl_chan->mhi_dev->dev);
275         }
276
277         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
278                  mhi_dev->name);
279
280         /* Notify the client and remove the device from MHI bus */
281         device_del(dev);
282         put_device(dev);
283
284         return 0;
285 }
286
287 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
288 {
289         struct mhi_driver *mhi_drv;
290
291         if (!mhi_dev->dev.driver)
292                 return;
293
294         mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
295
296         if (mhi_drv->status_cb)
297                 mhi_drv->status_cb(mhi_dev, cb_reason);
298 }
299 EXPORT_SYMBOL_GPL(mhi_notify);
300
301 /* Bind MHI channels to MHI devices */
302 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
303 {
304         struct mhi_chan *mhi_chan;
305         struct mhi_device *mhi_dev;
306         struct device *dev = &mhi_cntrl->mhi_dev->dev;
307         int i, ret;
308
309         mhi_chan = mhi_cntrl->mhi_chan;
310         for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
311                 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
312                     !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
313                         continue;
314                 mhi_dev = mhi_alloc_device(mhi_cntrl);
315                 if (IS_ERR(mhi_dev))
316                         return;
317
318                 mhi_dev->dev_type = MHI_DEVICE_XFER;
319                 switch (mhi_chan->dir) {
320                 case DMA_TO_DEVICE:
321                         mhi_dev->ul_chan = mhi_chan;
322                         mhi_dev->ul_chan_id = mhi_chan->chan;
323                         break;
324                 case DMA_FROM_DEVICE:
325                         /* We use dl_chan as offload channels */
326                         mhi_dev->dl_chan = mhi_chan;
327                         mhi_dev->dl_chan_id = mhi_chan->chan;
328                         break;
329                 default:
330                         dev_err(dev, "Direction not supported\n");
331                         put_device(&mhi_dev->dev);
332                         return;
333                 }
334
335                 get_device(&mhi_dev->dev);
336                 mhi_chan->mhi_dev = mhi_dev;
337
338                 /* Check next channel if it matches */
339                 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
340                         if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
341                                 i++;
342                                 mhi_chan++;
343                                 if (mhi_chan->dir == DMA_TO_DEVICE) {
344                                         mhi_dev->ul_chan = mhi_chan;
345                                         mhi_dev->ul_chan_id = mhi_chan->chan;
346                                 } else {
347                                         mhi_dev->dl_chan = mhi_chan;
348                                         mhi_dev->dl_chan_id = mhi_chan->chan;
349                                 }
350                                 get_device(&mhi_dev->dev);
351                                 mhi_chan->mhi_dev = mhi_dev;
352                         }
353                 }
354
355                 /* Channel name is same for both UL and DL */
356                 mhi_dev->name = mhi_chan->name;
357                 dev_set_name(&mhi_dev->dev, "%s_%s",
358                              dev_name(mhi_cntrl->cntrl_dev),
359                              mhi_dev->name);
360
361                 /* Init wakeup source if available */
362                 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
363                         device_init_wakeup(&mhi_dev->dev, true);
364
365                 ret = device_add(&mhi_dev->dev);
366                 if (ret)
367                         put_device(&mhi_dev->dev);
368         }
369 }
370
371 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
372 {
373         struct mhi_event *mhi_event = dev;
374         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
375         struct mhi_event_ctxt *er_ctxt =
376                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
377         struct mhi_ring *ev_ring = &mhi_event->ring;
378         dma_addr_t ptr = er_ctxt->rp;
379         void *dev_rp;
380
381         if (!is_valid_ring_ptr(ev_ring, ptr)) {
382                 dev_err(&mhi_cntrl->mhi_dev->dev,
383                         "Event ring rp points outside of the event ring\n");
384                 return IRQ_HANDLED;
385         }
386
387         dev_rp = mhi_to_virtual(ev_ring, ptr);
388
389         /* Only proceed if event ring has pending events */
390         if (ev_ring->rp == dev_rp)
391                 return IRQ_HANDLED;
392
393         /* For client managed event ring, notify pending data */
394         if (mhi_event->cl_manage) {
395                 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
396                 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
397
398                 if (mhi_dev)
399                         mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
400         } else {
401                 tasklet_schedule(&mhi_event->task);
402         }
403
404         return IRQ_HANDLED;
405 }
406
407 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
408 {
409         struct mhi_controller *mhi_cntrl = priv;
410         struct device *dev = &mhi_cntrl->mhi_dev->dev;
411         enum mhi_state state = MHI_STATE_MAX;
412         enum mhi_pm_state pm_state = 0;
413         enum mhi_ee_type ee = 0;
414
415         write_lock_irq(&mhi_cntrl->pm_lock);
416         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
417                 write_unlock_irq(&mhi_cntrl->pm_lock);
418                 goto exit_intvec;
419         }
420
421         state = mhi_get_mhi_state(mhi_cntrl);
422         ee = mhi_cntrl->ee;
423         mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
424         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
425                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
426                 TO_MHI_STATE_STR(state));
427
428         if (state == MHI_STATE_SYS_ERR) {
429                 dev_dbg(dev, "System error detected\n");
430                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
431                                                MHI_PM_SYS_ERR_DETECT);
432         }
433         write_unlock_irq(&mhi_cntrl->pm_lock);
434
435          /* If device supports RDDM don't bother processing SYS error */
436         if (mhi_cntrl->rddm_image) {
437                 if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
438                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
439                         wake_up_all(&mhi_cntrl->state_event);
440                 }
441                 goto exit_intvec;
442         }
443
444         if (pm_state == MHI_PM_SYS_ERR_DETECT) {
445                 wake_up_all(&mhi_cntrl->state_event);
446
447                 /* For fatal errors, we let controller decide next step */
448                 if (MHI_IN_PBL(ee))
449                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
450                 else
451                         mhi_pm_sys_err_handler(mhi_cntrl);
452         }
453
454 exit_intvec:
455
456         return IRQ_HANDLED;
457 }
458
459 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
460 {
461         struct mhi_controller *mhi_cntrl = dev;
462
463         /* Wake up events waiting for state change */
464         wake_up_all(&mhi_cntrl->state_event);
465
466         return IRQ_WAKE_THREAD;
467 }
468
469 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
470                                         struct mhi_ring *ring)
471 {
472         dma_addr_t ctxt_wp;
473
474         /* Update the WP */
475         ring->wp += ring->el_size;
476         ctxt_wp = *ring->ctxt_wp + ring->el_size;
477
478         if (ring->wp >= (ring->base + ring->len)) {
479                 ring->wp = ring->base;
480                 ctxt_wp = ring->iommu_base;
481         }
482
483         *ring->ctxt_wp = ctxt_wp;
484
485         /* Update the RP */
486         ring->rp += ring->el_size;
487         if (ring->rp >= (ring->base + ring->len))
488                 ring->rp = ring->base;
489
490         /* Update to all cores */
491         smp_wmb();
492 }
493
494 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
495                             struct mhi_tre *event,
496                             struct mhi_chan *mhi_chan)
497 {
498         struct mhi_ring *buf_ring, *tre_ring;
499         struct device *dev = &mhi_cntrl->mhi_dev->dev;
500         struct mhi_result result;
501         unsigned long flags = 0;
502         u32 ev_code;
503
504         ev_code = MHI_TRE_GET_EV_CODE(event);
505         buf_ring = &mhi_chan->buf_ring;
506         tre_ring = &mhi_chan->tre_ring;
507
508         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
509                 -EOVERFLOW : 0;
510
511         /*
512          * If it's a DB Event then we need to grab the lock
513          * with preemption disabled and as a write because we
514          * have to update db register and there are chances that
515          * another thread could be doing the same.
516          */
517         if (ev_code >= MHI_EV_CC_OOB)
518                 write_lock_irqsave(&mhi_chan->lock, flags);
519         else
520                 read_lock_bh(&mhi_chan->lock);
521
522         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
523                 goto end_process_tx_event;
524
525         switch (ev_code) {
526         case MHI_EV_CC_OVERFLOW:
527         case MHI_EV_CC_EOB:
528         case MHI_EV_CC_EOT:
529         {
530                 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
531                 struct mhi_tre *local_rp, *ev_tre;
532                 void *dev_rp;
533                 struct mhi_buf_info *buf_info;
534                 u16 xfer_len;
535
536                 if (!is_valid_ring_ptr(tre_ring, ptr)) {
537                         dev_err(&mhi_cntrl->mhi_dev->dev,
538                                 "Event element points outside of the tre ring\n");
539                         break;
540                 }
541                 /* Get the TRB this event points to */
542                 ev_tre = mhi_to_virtual(tre_ring, ptr);
543
544                 dev_rp = ev_tre + 1;
545                 if (dev_rp >= (tre_ring->base + tre_ring->len))
546                         dev_rp = tre_ring->base;
547
548                 result.dir = mhi_chan->dir;
549
550                 local_rp = tre_ring->rp;
551                 while (local_rp != dev_rp) {
552                         buf_info = buf_ring->rp;
553                         /* If it's the last TRE, get length from the event */
554                         if (local_rp == ev_tre)
555                                 xfer_len = MHI_TRE_GET_EV_LEN(event);
556                         else
557                                 xfer_len = buf_info->len;
558
559                         /* Unmap if it's not pre-mapped by client */
560                         if (likely(!buf_info->pre_mapped))
561                                 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
562
563                         result.buf_addr = buf_info->cb_buf;
564
565                         /* truncate to buf len if xfer_len is larger */
566                         result.bytes_xferd =
567                                 min_t(u16, xfer_len, buf_info->len);
568                         mhi_del_ring_element(mhi_cntrl, buf_ring);
569                         mhi_del_ring_element(mhi_cntrl, tre_ring);
570                         local_rp = tre_ring->rp;
571
572                         /* notify client */
573                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
574
575                         if (mhi_chan->dir == DMA_TO_DEVICE)
576                                 atomic_dec(&mhi_cntrl->pending_pkts);
577
578                         /*
579                          * Recycle the buffer if buffer is pre-allocated,
580                          * if there is an error, not much we can do apart
581                          * from dropping the packet
582                          */
583                         if (mhi_chan->pre_alloc) {
584                                 if (mhi_queue_buf(mhi_chan->mhi_dev,
585                                                   mhi_chan->dir,
586                                                   buf_info->cb_buf,
587                                                   buf_info->len, MHI_EOT)) {
588                                         dev_err(dev,
589                                                 "Error recycling buffer for chan:%d\n",
590                                                 mhi_chan->chan);
591                                         kfree(buf_info->cb_buf);
592                                 }
593                         }
594                 }
595                 break;
596         } /* CC_EOT */
597         case MHI_EV_CC_OOB:
598         case MHI_EV_CC_DB_MODE:
599         {
600                 unsigned long flags;
601
602                 mhi_chan->db_cfg.db_mode = 1;
603                 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
604                 if (tre_ring->wp != tre_ring->rp &&
605                     MHI_DB_ACCESS_VALID(mhi_cntrl)) {
606                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
607                 }
608                 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
609                 break;
610         }
611         case MHI_EV_CC_BAD_TRE:
612         default:
613                 dev_err(dev, "Unknown event 0x%x\n", ev_code);
614                 break;
615         } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
616
617 end_process_tx_event:
618         if (ev_code >= MHI_EV_CC_OOB)
619                 write_unlock_irqrestore(&mhi_chan->lock, flags);
620         else
621                 read_unlock_bh(&mhi_chan->lock);
622
623         return 0;
624 }
625
626 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
627                            struct mhi_tre *event,
628                            struct mhi_chan *mhi_chan)
629 {
630         struct mhi_ring *buf_ring, *tre_ring;
631         struct mhi_buf_info *buf_info;
632         struct mhi_result result;
633         int ev_code;
634         u32 cookie; /* offset to local descriptor */
635         u16 xfer_len;
636
637         buf_ring = &mhi_chan->buf_ring;
638         tre_ring = &mhi_chan->tre_ring;
639
640         ev_code = MHI_TRE_GET_EV_CODE(event);
641         cookie = MHI_TRE_GET_EV_COOKIE(event);
642         xfer_len = MHI_TRE_GET_EV_LEN(event);
643
644         /* Received out of bound cookie */
645         WARN_ON(cookie >= buf_ring->len);
646
647         buf_info = buf_ring->base + cookie;
648
649         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
650                 -EOVERFLOW : 0;
651
652         /* truncate to buf len if xfer_len is larger */
653         result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
654         result.buf_addr = buf_info->cb_buf;
655         result.dir = mhi_chan->dir;
656
657         read_lock_bh(&mhi_chan->lock);
658
659         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
660                 goto end_process_rsc_event;
661
662         WARN_ON(!buf_info->used);
663
664         /* notify the client */
665         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
666
667         /*
668          * Note: We're arbitrarily incrementing RP even though, completion
669          * packet we processed might not be the same one, reason we can do this
670          * is because device guaranteed to cache descriptors in order it
671          * receive, so even though completion event is different we can re-use
672          * all descriptors in between.
673          * Example:
674          * Transfer Ring has descriptors: A, B, C, D
675          * Last descriptor host queue is D (WP) and first descriptor
676          * host queue is A (RP).
677          * The completion event we just serviced is descriptor C.
678          * Then we can safely queue descriptors to replace A, B, and C
679          * even though host did not receive any completions.
680          */
681         mhi_del_ring_element(mhi_cntrl, tre_ring);
682         buf_info->used = false;
683
684 end_process_rsc_event:
685         read_unlock_bh(&mhi_chan->lock);
686
687         return 0;
688 }
689
690 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
691                                        struct mhi_tre *tre)
692 {
693         dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
694         struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
695         struct mhi_ring *mhi_ring = &cmd_ring->ring;
696         struct mhi_tre *cmd_pkt;
697         struct mhi_chan *mhi_chan;
698         u32 chan;
699
700         if (!is_valid_ring_ptr(mhi_ring, ptr)) {
701                 dev_err(&mhi_cntrl->mhi_dev->dev,
702                         "Event element points outside of the cmd ring\n");
703                 return;
704         }
705
706         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
707
708         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
709
710         if (chan < mhi_cntrl->max_chan &&
711             mhi_cntrl->mhi_chan[chan].configured) {
712                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
713                 write_lock_bh(&mhi_chan->lock);
714                 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
715                 complete(&mhi_chan->completion);
716                 write_unlock_bh(&mhi_chan->lock);
717         } else {
718                 dev_err(&mhi_cntrl->mhi_dev->dev,
719                         "Completion packet for invalid channel ID: %d\n", chan);
720         }
721
722         mhi_del_ring_element(mhi_cntrl, mhi_ring);
723 }
724
725 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
726                              struct mhi_event *mhi_event,
727                              u32 event_quota)
728 {
729         struct mhi_tre *dev_rp, *local_rp;
730         struct mhi_ring *ev_ring = &mhi_event->ring;
731         struct mhi_event_ctxt *er_ctxt =
732                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
733         struct mhi_chan *mhi_chan;
734         struct device *dev = &mhi_cntrl->mhi_dev->dev;
735         u32 chan;
736         int count = 0;
737         dma_addr_t ptr = er_ctxt->rp;
738
739         /*
740          * This is a quick check to avoid unnecessary event processing
741          * in case MHI is already in error state, but it's still possible
742          * to transition to error state while processing events
743          */
744         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
745                 return -EIO;
746
747         if (!is_valid_ring_ptr(ev_ring, ptr)) {
748                 dev_err(&mhi_cntrl->mhi_dev->dev,
749                         "Event ring rp points outside of the event ring\n");
750                 return -EIO;
751         }
752
753         dev_rp = mhi_to_virtual(ev_ring, ptr);
754         local_rp = ev_ring->rp;
755
756         while (dev_rp != local_rp) {
757                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
758
759                 switch (type) {
760                 case MHI_PKT_TYPE_BW_REQ_EVENT:
761                 {
762                         struct mhi_link_info *link_info;
763
764                         link_info = &mhi_cntrl->mhi_link_info;
765                         write_lock_irq(&mhi_cntrl->pm_lock);
766                         link_info->target_link_speed =
767                                 MHI_TRE_GET_EV_LINKSPEED(local_rp);
768                         link_info->target_link_width =
769                                 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
770                         write_unlock_irq(&mhi_cntrl->pm_lock);
771                         dev_dbg(dev, "Received BW_REQ event\n");
772                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
773                         break;
774                 }
775                 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
776                 {
777                         enum mhi_state new_state;
778
779                         new_state = MHI_TRE_GET_EV_STATE(local_rp);
780
781                         dev_dbg(dev, "State change event to state: %s\n",
782                                 TO_MHI_STATE_STR(new_state));
783
784                         switch (new_state) {
785                         case MHI_STATE_M0:
786                                 mhi_pm_m0_transition(mhi_cntrl);
787                                 break;
788                         case MHI_STATE_M1:
789                                 mhi_pm_m1_transition(mhi_cntrl);
790                                 break;
791                         case MHI_STATE_M3:
792                                 mhi_pm_m3_transition(mhi_cntrl);
793                                 break;
794                         case MHI_STATE_SYS_ERR:
795                         {
796                                 enum mhi_pm_state new_state;
797
798                                 /* skip SYS_ERROR handling if RDDM supported */
799                                 if (mhi_cntrl->ee == MHI_EE_RDDM ||
800                                     mhi_cntrl->rddm_image)
801                                         break;
802
803                                 dev_dbg(dev, "System error detected\n");
804                                 write_lock_irq(&mhi_cntrl->pm_lock);
805                                 new_state = mhi_tryset_pm_state(mhi_cntrl,
806                                                         MHI_PM_SYS_ERR_DETECT);
807                                 write_unlock_irq(&mhi_cntrl->pm_lock);
808                                 if (new_state == MHI_PM_SYS_ERR_DETECT)
809                                         mhi_pm_sys_err_handler(mhi_cntrl);
810                                 break;
811                         }
812                         default:
813                                 dev_err(dev, "Invalid state: %s\n",
814                                         TO_MHI_STATE_STR(new_state));
815                         }
816
817                         break;
818                 }
819                 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
820                         mhi_process_cmd_completion(mhi_cntrl, local_rp);
821                         break;
822                 case MHI_PKT_TYPE_EE_EVENT:
823                 {
824                         enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
825                         enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
826
827                         dev_dbg(dev, "Received EE event: %s\n",
828                                 TO_MHI_EXEC_STR(event));
829                         switch (event) {
830                         case MHI_EE_SBL:
831                                 st = DEV_ST_TRANSITION_SBL;
832                                 break;
833                         case MHI_EE_WFW:
834                         case MHI_EE_AMSS:
835                                 st = DEV_ST_TRANSITION_MISSION_MODE;
836                                 break;
837                         case MHI_EE_RDDM:
838                                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
839                                 write_lock_irq(&mhi_cntrl->pm_lock);
840                                 mhi_cntrl->ee = event;
841                                 write_unlock_irq(&mhi_cntrl->pm_lock);
842                                 wake_up_all(&mhi_cntrl->state_event);
843                                 break;
844                         default:
845                                 dev_err(dev,
846                                         "Unhandled EE event: 0x%x\n", type);
847                         }
848                         if (st != DEV_ST_TRANSITION_MAX)
849                                 mhi_queue_state_transition(mhi_cntrl, st);
850
851                         break;
852                 }
853                 case MHI_PKT_TYPE_TX_EVENT:
854                         chan = MHI_TRE_GET_EV_CHID(local_rp);
855
856                         WARN_ON(chan >= mhi_cntrl->max_chan);
857
858                         /*
859                          * Only process the event ring elements whose channel
860                          * ID is within the maximum supported range.
861                          */
862                         if (chan < mhi_cntrl->max_chan) {
863                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
864                                 if (!mhi_chan->configured)
865                                         break;
866                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
867                                 event_quota--;
868                         }
869                         break;
870                 default:
871                         dev_err(dev, "Unhandled event type: %d\n", type);
872                         break;
873                 }
874
875                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
876                 local_rp = ev_ring->rp;
877
878                 ptr = er_ctxt->rp;
879                 if (!is_valid_ring_ptr(ev_ring, ptr)) {
880                         dev_err(&mhi_cntrl->mhi_dev->dev,
881                                 "Event ring rp points outside of the event ring\n");
882                         return -EIO;
883                 }
884
885                 dev_rp = mhi_to_virtual(ev_ring, ptr);
886                 count++;
887         }
888
889         read_lock_bh(&mhi_cntrl->pm_lock);
890         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
891                 mhi_ring_er_db(mhi_event);
892         read_unlock_bh(&mhi_cntrl->pm_lock);
893
894         return count;
895 }
896
897 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
898                                 struct mhi_event *mhi_event,
899                                 u32 event_quota)
900 {
901         struct mhi_tre *dev_rp, *local_rp;
902         struct mhi_ring *ev_ring = &mhi_event->ring;
903         struct mhi_event_ctxt *er_ctxt =
904                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
905         int count = 0;
906         u32 chan;
907         struct mhi_chan *mhi_chan;
908         dma_addr_t ptr = er_ctxt->rp;
909
910         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
911                 return -EIO;
912
913         if (!is_valid_ring_ptr(ev_ring, ptr)) {
914                 dev_err(&mhi_cntrl->mhi_dev->dev,
915                         "Event ring rp points outside of the event ring\n");
916                 return -EIO;
917         }
918
919         dev_rp = mhi_to_virtual(ev_ring, ptr);
920         local_rp = ev_ring->rp;
921
922         while (dev_rp != local_rp && event_quota > 0) {
923                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
924
925                 chan = MHI_TRE_GET_EV_CHID(local_rp);
926
927                 WARN_ON(chan >= mhi_cntrl->max_chan);
928
929                 /*
930                  * Only process the event ring elements whose channel
931                  * ID is within the maximum supported range.
932                  */
933                 if (chan < mhi_cntrl->max_chan &&
934                     mhi_cntrl->mhi_chan[chan].configured) {
935                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
936
937                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
938                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
939                                 event_quota--;
940                         } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
941                                 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
942                                 event_quota--;
943                         }
944                 }
945
946                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
947                 local_rp = ev_ring->rp;
948
949                 ptr = er_ctxt->rp;
950                 if (!is_valid_ring_ptr(ev_ring, ptr)) {
951                         dev_err(&mhi_cntrl->mhi_dev->dev,
952                                 "Event ring rp points outside of the event ring\n");
953                         return -EIO;
954                 }
955
956                 dev_rp = mhi_to_virtual(ev_ring, ptr);
957                 count++;
958         }
959         read_lock_bh(&mhi_cntrl->pm_lock);
960         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
961                 mhi_ring_er_db(mhi_event);
962         read_unlock_bh(&mhi_cntrl->pm_lock);
963
964         return count;
965 }
966
967 void mhi_ev_task(unsigned long data)
968 {
969         struct mhi_event *mhi_event = (struct mhi_event *)data;
970         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
971
972         /* process all pending events */
973         spin_lock_bh(&mhi_event->lock);
974         mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
975         spin_unlock_bh(&mhi_event->lock);
976 }
977
978 void mhi_ctrl_ev_task(unsigned long data)
979 {
980         struct mhi_event *mhi_event = (struct mhi_event *)data;
981         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
982         struct device *dev = &mhi_cntrl->mhi_dev->dev;
983         enum mhi_state state;
984         enum mhi_pm_state pm_state = 0;
985         int ret;
986
987         /*
988          * We can check PM state w/o a lock here because there is no way
989          * PM state can change from reg access valid to no access while this
990          * thread being executed.
991          */
992         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
993                 /*
994                  * We may have a pending event but not allowed to
995                  * process it since we are probably in a suspended state,
996                  * so trigger a resume.
997                  */
998                 mhi_trigger_resume(mhi_cntrl);
999
1000                 return;
1001         }
1002
1003         /* Process ctrl events events */
1004         ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1005
1006         /*
1007          * We received an IRQ but no events to process, maybe device went to
1008          * SYS_ERR state? Check the state to confirm.
1009          */
1010         if (!ret) {
1011                 write_lock_irq(&mhi_cntrl->pm_lock);
1012                 state = mhi_get_mhi_state(mhi_cntrl);
1013                 if (state == MHI_STATE_SYS_ERR) {
1014                         dev_dbg(dev, "System error detected\n");
1015                         pm_state = mhi_tryset_pm_state(mhi_cntrl,
1016                                                        MHI_PM_SYS_ERR_DETECT);
1017                 }
1018                 write_unlock_irq(&mhi_cntrl->pm_lock);
1019                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1020                         mhi_pm_sys_err_handler(mhi_cntrl);
1021         }
1022 }
1023
1024 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1025                              struct mhi_ring *ring)
1026 {
1027         void *tmp = ring->wp + ring->el_size;
1028
1029         if (tmp >= (ring->base + ring->len))
1030                 tmp = ring->base;
1031
1032         return (tmp == ring->rp);
1033 }
1034
1035 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1036                   struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1037 {
1038         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1039         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1040                                                              mhi_dev->dl_chan;
1041         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1042         struct mhi_buf_info buf_info = { };
1043         int ret;
1044
1045         /* If MHI host pre-allocates buffers then client drivers cannot queue */
1046         if (mhi_chan->pre_alloc)
1047                 return -EINVAL;
1048
1049         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1050                 return -ENOMEM;
1051
1052         read_lock_bh(&mhi_cntrl->pm_lock);
1053         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1054                 read_unlock_bh(&mhi_cntrl->pm_lock);
1055                 return -EIO;
1056         }
1057
1058         /* we're in M3 or transitioning to M3 */
1059         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1060                 mhi_trigger_resume(mhi_cntrl);
1061
1062         /* Toggle wake to exit out of M2 */
1063         mhi_cntrl->wake_toggle(mhi_cntrl);
1064
1065         buf_info.v_addr = skb->data;
1066         buf_info.cb_buf = skb;
1067         buf_info.len = len;
1068
1069         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1070         if (unlikely(ret)) {
1071                 read_unlock_bh(&mhi_cntrl->pm_lock);
1072                 return ret;
1073         }
1074
1075         if (mhi_chan->dir == DMA_TO_DEVICE)
1076                 atomic_inc(&mhi_cntrl->pending_pkts);
1077
1078         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1079                 read_lock_bh(&mhi_chan->lock);
1080                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1081                 read_unlock_bh(&mhi_chan->lock);
1082         }
1083
1084         read_unlock_bh(&mhi_cntrl->pm_lock);
1085
1086         return 0;
1087 }
1088 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1089
1090 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1091                   struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1092 {
1093         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1094         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1095                                                              mhi_dev->dl_chan;
1096         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1097         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1098         struct mhi_buf_info buf_info = { };
1099         int ret;
1100
1101         /* If MHI host pre-allocates buffers then client drivers cannot queue */
1102         if (mhi_chan->pre_alloc)
1103                 return -EINVAL;
1104
1105         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1106                 return -ENOMEM;
1107
1108         read_lock_bh(&mhi_cntrl->pm_lock);
1109         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1110                 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1111                         to_mhi_pm_state_str(mhi_cntrl->pm_state));
1112                 read_unlock_bh(&mhi_cntrl->pm_lock);
1113
1114                 return -EIO;
1115         }
1116
1117         /* we're in M3 or transitioning to M3 */
1118         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1119                 mhi_trigger_resume(mhi_cntrl);
1120
1121         /* Toggle wake to exit out of M2 */
1122         mhi_cntrl->wake_toggle(mhi_cntrl);
1123
1124         buf_info.p_addr = mhi_buf->dma_addr;
1125         buf_info.cb_buf = mhi_buf;
1126         buf_info.pre_mapped = true;
1127         buf_info.len = len;
1128
1129         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1130         if (unlikely(ret)) {
1131                 read_unlock_bh(&mhi_cntrl->pm_lock);
1132                 return ret;
1133         }
1134
1135         if (mhi_chan->dir == DMA_TO_DEVICE)
1136                 atomic_inc(&mhi_cntrl->pending_pkts);
1137
1138         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1139                 read_lock_bh(&mhi_chan->lock);
1140                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1141                 read_unlock_bh(&mhi_chan->lock);
1142         }
1143
1144         read_unlock_bh(&mhi_cntrl->pm_lock);
1145
1146         return 0;
1147 }
1148 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1149
1150 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1151                         struct mhi_buf_info *info, enum mhi_flags flags)
1152 {
1153         struct mhi_ring *buf_ring, *tre_ring;
1154         struct mhi_tre *mhi_tre;
1155         struct mhi_buf_info *buf_info;
1156         int eot, eob, chain, bei;
1157         int ret;
1158
1159         buf_ring = &mhi_chan->buf_ring;
1160         tre_ring = &mhi_chan->tre_ring;
1161
1162         buf_info = buf_ring->wp;
1163         WARN_ON(buf_info->used);
1164         buf_info->pre_mapped = info->pre_mapped;
1165         if (info->pre_mapped)
1166                 buf_info->p_addr = info->p_addr;
1167         else
1168                 buf_info->v_addr = info->v_addr;
1169         buf_info->cb_buf = info->cb_buf;
1170         buf_info->wp = tre_ring->wp;
1171         buf_info->dir = mhi_chan->dir;
1172         buf_info->len = info->len;
1173
1174         if (!info->pre_mapped) {
1175                 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1176                 if (ret)
1177                         return ret;
1178         }
1179
1180         eob = !!(flags & MHI_EOB);
1181         eot = !!(flags & MHI_EOT);
1182         chain = !!(flags & MHI_CHAIN);
1183         bei = !!(mhi_chan->intmod);
1184
1185         mhi_tre = tre_ring->wp;
1186         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1187         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1188         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1189
1190         /* increment WP */
1191         mhi_add_ring_element(mhi_cntrl, tre_ring);
1192         mhi_add_ring_element(mhi_cntrl, buf_ring);
1193
1194         return 0;
1195 }
1196
1197 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1198                   void *buf, size_t len, enum mhi_flags mflags)
1199 {
1200         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1201         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1202                                                              mhi_dev->dl_chan;
1203         struct mhi_ring *tre_ring;
1204         struct mhi_buf_info buf_info = { };
1205         unsigned long flags;
1206         int ret;
1207
1208         /*
1209          * this check here only as a guard, it's always
1210          * possible mhi can enter error while executing rest of function,
1211          * which is not fatal so we do not need to hold pm_lock
1212          */
1213         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1214                 return -EIO;
1215
1216         tre_ring = &mhi_chan->tre_ring;
1217         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1218                 return -ENOMEM;
1219
1220         buf_info.v_addr = buf;
1221         buf_info.cb_buf = buf;
1222         buf_info.len = len;
1223
1224         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1225         if (unlikely(ret))
1226                 return ret;
1227
1228         read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1229
1230         /* we're in M3 or transitioning to M3 */
1231         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1232                 mhi_trigger_resume(mhi_cntrl);
1233
1234         /* Toggle wake to exit out of M2 */
1235         mhi_cntrl->wake_toggle(mhi_cntrl);
1236
1237         if (mhi_chan->dir == DMA_TO_DEVICE)
1238                 atomic_inc(&mhi_cntrl->pending_pkts);
1239
1240         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1241                 unsigned long flags;
1242
1243                 read_lock_irqsave(&mhi_chan->lock, flags);
1244                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1245                 read_unlock_irqrestore(&mhi_chan->lock, flags);
1246         }
1247
1248         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1249
1250         return 0;
1251 }
1252 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1253
1254 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1255                  struct mhi_chan *mhi_chan,
1256                  enum mhi_cmd_type cmd)
1257 {
1258         struct mhi_tre *cmd_tre = NULL;
1259         struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1260         struct mhi_ring *ring = &mhi_cmd->ring;
1261         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1262         int chan = 0;
1263
1264         if (mhi_chan)
1265                 chan = mhi_chan->chan;
1266
1267         spin_lock_bh(&mhi_cmd->lock);
1268         if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1269                 spin_unlock_bh(&mhi_cmd->lock);
1270                 return -ENOMEM;
1271         }
1272
1273         /* prepare the cmd tre */
1274         cmd_tre = ring->wp;
1275         switch (cmd) {
1276         case MHI_CMD_RESET_CHAN:
1277                 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1278                 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1279                 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1280                 break;
1281         case MHI_CMD_START_CHAN:
1282                 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1283                 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1284                 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1285                 break;
1286         default:
1287                 dev_err(dev, "Command not supported\n");
1288                 break;
1289         }
1290
1291         /* queue to hardware */
1292         mhi_add_ring_element(mhi_cntrl, ring);
1293         read_lock_bh(&mhi_cntrl->pm_lock);
1294         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1295                 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1296         read_unlock_bh(&mhi_cntrl->pm_lock);
1297         spin_unlock_bh(&mhi_cmd->lock);
1298
1299         return 0;
1300 }
1301
1302 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1303                                     struct mhi_chan *mhi_chan)
1304 {
1305         int ret;
1306         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1307
1308         dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1309
1310         /* no more processing events for this channel */
1311         mutex_lock(&mhi_chan->mutex);
1312         write_lock_irq(&mhi_chan->lock);
1313         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1314                 write_unlock_irq(&mhi_chan->lock);
1315                 mutex_unlock(&mhi_chan->mutex);
1316                 return;
1317         }
1318
1319         mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1320         write_unlock_irq(&mhi_chan->lock);
1321
1322         reinit_completion(&mhi_chan->completion);
1323         read_lock_bh(&mhi_cntrl->pm_lock);
1324         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1325                 read_unlock_bh(&mhi_cntrl->pm_lock);
1326                 goto error_invalid_state;
1327         }
1328
1329         mhi_cntrl->wake_toggle(mhi_cntrl);
1330         read_unlock_bh(&mhi_cntrl->pm_lock);
1331
1332         mhi_cntrl->runtime_get(mhi_cntrl);
1333         mhi_cntrl->runtime_put(mhi_cntrl);
1334         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1335         if (ret)
1336                 goto error_invalid_state;
1337
1338         /* even if it fails we will still reset */
1339         ret = wait_for_completion_timeout(&mhi_chan->completion,
1340                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1341         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1342                 dev_err(dev,
1343                         "Failed to receive cmd completion, still resetting\n");
1344
1345 error_invalid_state:
1346         if (!mhi_chan->offload_ch) {
1347                 mhi_reset_chan(mhi_cntrl, mhi_chan);
1348                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1349         }
1350         dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1351         mutex_unlock(&mhi_chan->mutex);
1352 }
1353
1354 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1355                         struct mhi_chan *mhi_chan)
1356 {
1357         int ret = 0;
1358         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1359
1360         dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1361
1362         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1363                 dev_err(dev,
1364                         "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1365                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1366                         mhi_chan->name);
1367                 return -ENOTCONN;
1368         }
1369
1370         mutex_lock(&mhi_chan->mutex);
1371
1372         /* If channel is not in disable state, do not allow it to start */
1373         if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1374                 ret = -EIO;
1375                 dev_dbg(dev, "channel: %d is not in disabled state\n",
1376                         mhi_chan->chan);
1377                 goto error_init_chan;
1378         }
1379
1380         /* Check of client manages channel context for offload channels */
1381         if (!mhi_chan->offload_ch) {
1382                 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1383                 if (ret)
1384                         goto error_init_chan;
1385         }
1386
1387         reinit_completion(&mhi_chan->completion);
1388         read_lock_bh(&mhi_cntrl->pm_lock);
1389         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1390                 read_unlock_bh(&mhi_cntrl->pm_lock);
1391                 ret = -EIO;
1392                 goto error_pm_state;
1393         }
1394
1395         mhi_cntrl->wake_toggle(mhi_cntrl);
1396         read_unlock_bh(&mhi_cntrl->pm_lock);
1397         mhi_cntrl->runtime_get(mhi_cntrl);
1398         mhi_cntrl->runtime_put(mhi_cntrl);
1399
1400         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1401         if (ret)
1402                 goto error_pm_state;
1403
1404         ret = wait_for_completion_timeout(&mhi_chan->completion,
1405                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1406         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1407                 ret = -EIO;
1408                 goto error_pm_state;
1409         }
1410
1411         write_lock_irq(&mhi_chan->lock);
1412         mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1413         write_unlock_irq(&mhi_chan->lock);
1414
1415         /* Pre-allocate buffer for xfer ring */
1416         if (mhi_chan->pre_alloc) {
1417                 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1418                                                        &mhi_chan->tre_ring);
1419                 size_t len = mhi_cntrl->buffer_len;
1420
1421                 while (nr_el--) {
1422                         void *buf;
1423                         struct mhi_buf_info info = { };
1424                         buf = kmalloc(len, GFP_KERNEL);
1425                         if (!buf) {
1426                                 ret = -ENOMEM;
1427                                 goto error_pre_alloc;
1428                         }
1429
1430                         /* Prepare transfer descriptors */
1431                         info.v_addr = buf;
1432                         info.cb_buf = buf;
1433                         info.len = len;
1434                         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1435                         if (ret) {
1436                                 kfree(buf);
1437                                 goto error_pre_alloc;
1438                         }
1439                 }
1440
1441                 read_lock_bh(&mhi_cntrl->pm_lock);
1442                 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1443                         read_lock_irq(&mhi_chan->lock);
1444                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1445                         read_unlock_irq(&mhi_chan->lock);
1446                 }
1447                 read_unlock_bh(&mhi_cntrl->pm_lock);
1448         }
1449
1450         mutex_unlock(&mhi_chan->mutex);
1451
1452         dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1453                 mhi_chan->chan);
1454
1455         return 0;
1456
1457 error_pm_state:
1458         if (!mhi_chan->offload_ch)
1459                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1460
1461 error_init_chan:
1462         mutex_unlock(&mhi_chan->mutex);
1463
1464         return ret;
1465
1466 error_pre_alloc:
1467         mutex_unlock(&mhi_chan->mutex);
1468         __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1469
1470         return ret;
1471 }
1472
1473 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1474                                   struct mhi_event *mhi_event,
1475                                   struct mhi_event_ctxt *er_ctxt,
1476                                   int chan)
1477
1478 {
1479         struct mhi_tre *dev_rp, *local_rp;
1480         struct mhi_ring *ev_ring;
1481         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1482         unsigned long flags;
1483         dma_addr_t ptr;
1484
1485         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1486
1487         ev_ring = &mhi_event->ring;
1488
1489         /* mark all stale events related to channel as STALE event */
1490         spin_lock_irqsave(&mhi_event->lock, flags);
1491
1492         ptr = er_ctxt->rp;
1493         if (!is_valid_ring_ptr(ev_ring, ptr)) {
1494                 dev_err(&mhi_cntrl->mhi_dev->dev,
1495                         "Event ring rp points outside of the event ring\n");
1496                 dev_rp = ev_ring->rp;
1497         } else {
1498                 dev_rp = mhi_to_virtual(ev_ring, ptr);
1499         }
1500
1501         local_rp = ev_ring->rp;
1502         while (dev_rp != local_rp) {
1503                 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1504                     chan == MHI_TRE_GET_EV_CHID(local_rp))
1505                         local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1506                                         MHI_PKT_TYPE_STALE_EVENT);
1507                 local_rp++;
1508                 if (local_rp == (ev_ring->base + ev_ring->len))
1509                         local_rp = ev_ring->base;
1510         }
1511
1512         dev_dbg(dev, "Finished marking events as stale events\n");
1513         spin_unlock_irqrestore(&mhi_event->lock, flags);
1514 }
1515
1516 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1517                                 struct mhi_chan *mhi_chan)
1518 {
1519         struct mhi_ring *buf_ring, *tre_ring;
1520         struct mhi_result result;
1521
1522         /* Reset any pending buffers */
1523         buf_ring = &mhi_chan->buf_ring;
1524         tre_ring = &mhi_chan->tre_ring;
1525         result.transaction_status = -ENOTCONN;
1526         result.bytes_xferd = 0;
1527         while (tre_ring->rp != tre_ring->wp) {
1528                 struct mhi_buf_info *buf_info = buf_ring->rp;
1529
1530                 if (mhi_chan->dir == DMA_TO_DEVICE)
1531                         atomic_dec(&mhi_cntrl->pending_pkts);
1532
1533                 if (!buf_info->pre_mapped)
1534                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1535
1536                 mhi_del_ring_element(mhi_cntrl, buf_ring);
1537                 mhi_del_ring_element(mhi_cntrl, tre_ring);
1538
1539                 if (mhi_chan->pre_alloc) {
1540                         kfree(buf_info->cb_buf);
1541                 } else {
1542                         result.buf_addr = buf_info->cb_buf;
1543                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1544                 }
1545         }
1546 }
1547
1548 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1549 {
1550         struct mhi_event *mhi_event;
1551         struct mhi_event_ctxt *er_ctxt;
1552         int chan = mhi_chan->chan;
1553
1554         /* Nothing to reset, client doesn't queue buffers */
1555         if (mhi_chan->offload_ch)
1556                 return;
1557
1558         read_lock_bh(&mhi_cntrl->pm_lock);
1559         mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1560         er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1561
1562         mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1563
1564         mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1565
1566         read_unlock_bh(&mhi_cntrl->pm_lock);
1567 }
1568
1569 /* Move channel to start state */
1570 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1571 {
1572         int ret, dir;
1573         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1574         struct mhi_chan *mhi_chan;
1575
1576         for (dir = 0; dir < 2; dir++) {
1577                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1578                 if (!mhi_chan)
1579                         continue;
1580
1581                 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1582                 if (ret)
1583                         goto error_open_chan;
1584         }
1585
1586         return 0;
1587
1588 error_open_chan:
1589         for (--dir; dir >= 0; dir--) {
1590                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1591                 if (!mhi_chan)
1592                         continue;
1593
1594                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1595         }
1596
1597         return ret;
1598 }
1599 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1600
1601 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1602 {
1603         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1604         struct mhi_chan *mhi_chan;
1605         int dir;
1606
1607         for (dir = 0; dir < 2; dir++) {
1608                 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1609                 if (!mhi_chan)
1610                         continue;
1611
1612                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1613         }
1614 }
1615 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1616
1617 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1618 {
1619         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1620         struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1621         struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1622         int ret;
1623
1624         spin_lock_bh(&mhi_event->lock);
1625         ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1626         spin_unlock_bh(&mhi_event->lock);
1627
1628         return ret;
1629 }
1630 EXPORT_SYMBOL_GPL(mhi_poll);