GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / accel / qaic / qaic_data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/dma-buf.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/kref.h>
14 #include <linux/list.h>
15 #include <linux/math64.h>
16 #include <linux/mm.h>
17 #include <linux/moduleparam.h>
18 #include <linux/scatterlist.h>
19 #include <linux/spinlock.h>
20 #include <linux/srcu.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/wait.h>
24 #include <drm/drm_file.h>
25 #include <drm/drm_gem.h>
26 #include <drm/drm_prime.h>
27 #include <drm/drm_print.h>
28 #include <uapi/drm/qaic_accel.h>
29
30 #include "qaic.h"
31
32 #define SEM_VAL_MASK    GENMASK_ULL(11, 0)
33 #define SEM_INDEX_MASK  GENMASK_ULL(4, 0)
34 #define BULK_XFER       BIT(3)
35 #define GEN_COMPLETION  BIT(4)
36 #define INBOUND_XFER    1
37 #define OUTBOUND_XFER   2
38 #define REQHP_OFF       0x0 /* we read this */
39 #define REQTP_OFF       0x4 /* we write this */
40 #define RSPHP_OFF       0x8 /* we write this */
41 #define RSPTP_OFF       0xc /* we read this */
42
43 #define ENCODE_SEM(val, index, sync, cmd, flags)                        \
44                 ({                                                      \
45                         FIELD_PREP(GENMASK(11, 0), (val)) |             \
46                         FIELD_PREP(GENMASK(20, 16), (index)) |          \
47                         FIELD_PREP(BIT(22), (sync)) |                   \
48                         FIELD_PREP(GENMASK(26, 24), (cmd)) |            \
49                         FIELD_PREP(GENMASK(30, 29), (flags)) |          \
50                         FIELD_PREP(BIT(31), (cmd) ? 1 : 0);             \
51                 })
52 #define NUM_EVENTS      128
53 #define NUM_DELAYS      10
54 #define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size())
55
56 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
57 module_param(wait_exec_default_timeout_ms, uint, 0600);
58 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO");
59
60 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */
61 module_param(datapath_poll_interval_us, uint, 0600);
62 MODULE_PARM_DESC(datapath_poll_interval_us,
63                  "Amount of time to sleep between activity when datapath polling is enabled");
64
65 struct dbc_req {
66         /*
67          * A request ID is assigned to each memory handle going in DMA queue.
68          * As a single memory handle can enqueue multiple elements in DMA queue
69          * all of them will have the same request ID.
70          */
71         __le16  req_id;
72         /* Future use */
73         __u8    seq_id;
74         /*
75          * Special encoded variable
76          * 7    0 - Do not force to generate MSI after DMA is completed
77          *      1 - Force to generate MSI after DMA is completed
78          * 6:5  Reserved
79          * 4    1 - Generate completion element in the response queue
80          *      0 - No Completion Code
81          * 3    0 - DMA request is a Link list transfer
82          *      1 - DMA request is a Bulk transfer
83          * 2    Reserved
84          * 1:0  00 - No DMA transfer involved
85          *      01 - DMA transfer is part of inbound transfer
86          *      10 - DMA transfer has outbound transfer
87          *      11 - NA
88          */
89         __u8    cmd;
90         __le32  resv;
91         /* Source address for the transfer */
92         __le64  src_addr;
93         /* Destination address for the transfer */
94         __le64  dest_addr;
95         /* Length of transfer request */
96         __le32  len;
97         __le32  resv2;
98         /* Doorbell address */
99         __le64  db_addr;
100         /*
101          * Special encoded variable
102          * 7    1 - Doorbell(db) write
103          *      0 - No doorbell write
104          * 6:2  Reserved
105          * 1:0  00 - 32 bit access, db address must be aligned to 32bit-boundary
106          *      01 - 16 bit access, db address must be aligned to 16bit-boundary
107          *      10 - 8 bit access, db address must be aligned to 8bit-boundary
108          *      11 - Reserved
109          */
110         __u8    db_len;
111         __u8    resv3;
112         __le16  resv4;
113         /* 32 bit data written to doorbell address */
114         __le32  db_data;
115         /*
116          * Special encoded variable
117          * All the fields of sem_cmdX are passed from user and all are ORed
118          * together to form sem_cmd.
119          * 0:11         Semaphore value
120          * 15:12        Reserved
121          * 20:16        Semaphore index
122          * 21           Reserved
123          * 22           Semaphore Sync
124          * 23           Reserved
125          * 26:24        Semaphore command
126          * 28:27        Reserved
127          * 29           Semaphore DMA out bound sync fence
128          * 30           Semaphore DMA in bound sync fence
129          * 31           Enable semaphore command
130          */
131         __le32  sem_cmd0;
132         __le32  sem_cmd1;
133         __le32  sem_cmd2;
134         __le32  sem_cmd3;
135 } __packed;
136
137 struct dbc_rsp {
138         /* Request ID of the memory handle whose DMA transaction is completed */
139         __le16  req_id;
140         /* Status of the DMA transaction. 0 : Success otherwise failure */
141         __le16  status;
142 } __packed;
143
144 inline int get_dbc_req_elem_size(void)
145 {
146         return sizeof(struct dbc_req);
147 }
148
149 inline int get_dbc_rsp_elem_size(void)
150 {
151         return sizeof(struct dbc_rsp);
152 }
153
154 static void free_slice(struct kref *kref)
155 {
156         struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
157
158         slice->bo->total_slice_nents -= slice->nents;
159         list_del(&slice->slice);
160         drm_gem_object_put(&slice->bo->base);
161         sg_free_table(slice->sgt);
162         kfree(slice->sgt);
163         kfree(slice->reqs);
164         kfree(slice);
165 }
166
167 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
168                                         struct sg_table *sgt_in, u64 size, u64 offset)
169 {
170         int total_len, len, nents, offf = 0, offl = 0;
171         struct scatterlist *sg, *sgn, *sgf, *sgl;
172         struct sg_table *sgt;
173         int ret, j;
174
175         /* find out number of relevant nents needed for this mem */
176         total_len = 0;
177         sgf = NULL;
178         sgl = NULL;
179         nents = 0;
180
181         size = size ? size : PAGE_SIZE;
182         for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
183                 len = sg_dma_len(sg);
184
185                 if (!len)
186                         continue;
187                 if (offset >= total_len && offset < total_len + len) {
188                         sgf = sg;
189                         offf = offset - total_len;
190                 }
191                 if (sgf)
192                         nents++;
193                 if (offset + size >= total_len &&
194                     offset + size <= total_len + len) {
195                         sgl = sg;
196                         offl = offset + size - total_len;
197                         break;
198                 }
199                 total_len += len;
200         }
201
202         if (!sgf || !sgl) {
203                 ret = -EINVAL;
204                 goto out;
205         }
206
207         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
208         if (!sgt) {
209                 ret = -ENOMEM;
210                 goto out;
211         }
212
213         ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
214         if (ret)
215                 goto free_sgt;
216
217         /* copy relevant sg node and fix page and length */
218         sgn = sgf;
219         for_each_sgtable_sg(sgt, sg, j) {
220                 memcpy(sg, sgn, sizeof(*sg));
221                 if (sgn == sgf) {
222                         sg_dma_address(sg) += offf;
223                         sg_dma_len(sg) -= offf;
224                         sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf);
225                 } else {
226                         offf = 0;
227                 }
228                 if (sgn == sgl) {
229                         sg_dma_len(sg) = offl - offf;
230                         sg_set_page(sg, sg_page(sgn), offl - offf, offf);
231                         sg_mark_end(sg);
232                         break;
233                 }
234                 sgn = sg_next(sgn);
235         }
236
237         *sgt_out = sgt;
238         return ret;
239
240 free_sgt:
241         kfree(sgt);
242 out:
243         *sgt_out = NULL;
244         return ret;
245 }
246
247 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
248                        struct qaic_attach_slice_entry *req)
249 {
250         __le64 db_addr = cpu_to_le64(req->db_addr);
251         __le32 db_data = cpu_to_le32(req->db_data);
252         struct scatterlist *sg;
253         __u8 cmd = BULK_XFER;
254         int presync_sem;
255         u64 dev_addr;
256         __u8 db_len;
257         int i;
258
259         if (!slice->no_xfer)
260                 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
261
262         if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8))
263                 return -EINVAL;
264
265         presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync;
266         if (presync_sem > 1)
267                 return -EINVAL;
268
269         presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 |
270                       req->sem2.presync << 2 | req->sem3.presync << 3;
271
272         switch (req->db_len) {
273         case 32:
274                 db_len = BIT(7);
275                 break;
276         case 16:
277                 db_len = BIT(7) | 1;
278                 break;
279         case 8:
280                 db_len = BIT(7) | 2;
281                 break;
282         case 0:
283                 db_len = 0; /* doorbell is not active for this command */
284                 break;
285         default:
286                 return -EINVAL; /* should never hit this */
287         }
288
289         /*
290          * When we end up splitting up a single request (ie a buf slice) into
291          * multiple DMA requests, we have to manage the sync data carefully.
292          * There can only be one presync sem. That needs to be on every xfer
293          * so that the DMA engine doesn't transfer data before the receiver is
294          * ready. We only do the doorbell and postsync sems after the xfer.
295          * To guarantee previous xfers for the request are complete, we use a
296          * fence.
297          */
298         dev_addr = req->dev_addr;
299         for_each_sgtable_sg(slice->sgt, sg, i) {
300                 slice->reqs[i].cmd = cmd;
301                 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
302                                                       sg_dma_address(sg) : dev_addr);
303                 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
304                                                        dev_addr : sg_dma_address(sg));
305                 /*
306                  * sg_dma_len(sg) returns size of a DMA segment, maximum DMA
307                  * segment size is set to UINT_MAX by qaic and hence return
308                  * values of sg_dma_len(sg) can never exceed u32 range. So,
309                  * by down sizing we are not corrupting the value.
310                  */
311                 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
312                 switch (presync_sem) {
313                 case BIT(0):
314                         slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
315                                                                          req->sem0.index,
316                                                                          req->sem0.presync,
317                                                                          req->sem0.cmd,
318                                                                          req->sem0.flags));
319                         break;
320                 case BIT(1):
321                         slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
322                                                                          req->sem1.index,
323                                                                          req->sem1.presync,
324                                                                          req->sem1.cmd,
325                                                                          req->sem1.flags));
326                         break;
327                 case BIT(2):
328                         slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
329                                                                          req->sem2.index,
330                                                                          req->sem2.presync,
331                                                                          req->sem2.cmd,
332                                                                          req->sem2.flags));
333                         break;
334                 case BIT(3):
335                         slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
336                                                                          req->sem3.index,
337                                                                          req->sem3.presync,
338                                                                          req->sem3.cmd,
339                                                                          req->sem3.flags));
340                         break;
341                 }
342                 dev_addr += sg_dma_len(sg);
343         }
344         /* add post transfer stuff to last segment */
345         i--;
346         slice->reqs[i].cmd |= GEN_COMPLETION;
347         slice->reqs[i].db_addr = db_addr;
348         slice->reqs[i].db_len = db_len;
349         slice->reqs[i].db_data = db_data;
350         /*
351          * Add a fence if we have more than one request going to the hardware
352          * representing the entirety of the user request, and the user request
353          * has no presync condition.
354          * Fences are expensive, so we try to avoid them. We rely on the
355          * hardware behavior to avoid needing one when there is a presync
356          * condition. When a presync exists, all requests for that same
357          * presync will be queued into a fifo. Thus, since we queue the
358          * post xfer activity only on the last request we queue, the hardware
359          * will ensure that the last queued request is processed last, thus
360          * making sure the post xfer activity happens at the right time without
361          * a fence.
362          */
363         if (i && !presync_sem)
364                 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
365                                     QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE);
366         slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
367                                                          req->sem0.presync, req->sem0.cmd,
368                                                          req->sem0.flags));
369         slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
370                                                          req->sem1.presync, req->sem1.cmd,
371                                                          req->sem1.flags));
372         slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
373                                                          req->sem2.presync, req->sem2.cmd,
374                                                          req->sem2.flags));
375         slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
376                                                          req->sem3.presync, req->sem3.cmd,
377                                                          req->sem3.flags));
378
379         return 0;
380 }
381
382 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
383                               struct qaic_attach_slice_entry *slice_ent)
384 {
385         struct sg_table *sgt = NULL;
386         struct bo_slice *slice;
387         int ret;
388
389         ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset);
390         if (ret)
391                 goto out;
392
393         slice = kmalloc(sizeof(*slice), GFP_KERNEL);
394         if (!slice) {
395                 ret = -ENOMEM;
396                 goto free_sgt;
397         }
398
399         slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
400         if (!slice->reqs) {
401                 ret = -ENOMEM;
402                 goto free_slice;
403         }
404
405         slice->no_xfer = !slice_ent->size;
406         slice->sgt = sgt;
407         slice->nents = sgt->nents;
408         slice->dir = bo->dir;
409         slice->bo = bo;
410         slice->size = slice_ent->size;
411         slice->offset = slice_ent->offset;
412
413         ret = encode_reqs(qdev, slice, slice_ent);
414         if (ret)
415                 goto free_req;
416
417         bo->total_slice_nents += sgt->nents;
418         kref_init(&slice->ref_count);
419         drm_gem_object_get(&bo->base);
420         list_add_tail(&slice->slice, &bo->slices);
421
422         return 0;
423
424 free_req:
425         kfree(slice->reqs);
426 free_slice:
427         kfree(slice);
428 free_sgt:
429         sg_free_table(sgt);
430         kfree(sgt);
431 out:
432         return ret;
433 }
434
435 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
436 {
437         struct scatterlist *sg;
438         struct sg_table *sgt;
439         struct page **pages;
440         int *pages_order;
441         int buf_extra;
442         int max_order;
443         int nr_pages;
444         int ret = 0;
445         int i, j, k;
446         int order;
447
448         if (size) {
449                 nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
450                 /*
451                  * calculate how much extra we are going to allocate, to remove
452                  * later
453                  */
454                 buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
455                 max_order = min(MAX_PAGE_ORDER, get_order(size));
456         } else {
457                 /* allocate a single page for book keeping */
458                 nr_pages = 1;
459                 buf_extra = 0;
460                 max_order = 0;
461         }
462
463         pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL);
464         if (!pages) {
465                 ret = -ENOMEM;
466                 goto out;
467         }
468         pages_order = (void *)pages + sizeof(*pages) * nr_pages;
469
470         /*
471          * Allocate requested memory using alloc_pages. It is possible to allocate
472          * the requested memory in multiple chunks by calling alloc_pages
473          * multiple times. Use SG table to handle multiple allocated pages.
474          */
475         i = 0;
476         while (nr_pages > 0) {
477                 order = min(get_order(nr_pages * PAGE_SIZE), max_order);
478                 while (1) {
479                         pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER |
480                                                __GFP_NOWARN | __GFP_ZERO |
481                                                (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL),
482                                                order);
483                         if (pages[i])
484                                 break;
485                         if (!order--) {
486                                 ret = -ENOMEM;
487                                 goto free_partial_alloc;
488                         }
489                 }
490
491                 max_order = order;
492                 pages_order[i] = order;
493
494                 nr_pages -= 1 << order;
495                 if (nr_pages <= 0)
496                         /* account for over allocation */
497                         buf_extra += abs(nr_pages) * PAGE_SIZE;
498                 i++;
499         }
500
501         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
502         if (!sgt) {
503                 ret = -ENOMEM;
504                 goto free_partial_alloc;
505         }
506
507         if (sg_alloc_table(sgt, i, GFP_KERNEL)) {
508                 ret = -ENOMEM;
509                 goto free_sgt;
510         }
511
512         /* Populate the SG table with the allocated memory pages */
513         sg = sgt->sgl;
514         for (k = 0; k < i; k++, sg = sg_next(sg)) {
515                 /* Last entry requires special handling */
516                 if (k < i - 1) {
517                         sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
518                 } else {
519                         sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
520                         sg_mark_end(sg);
521                 }
522         }
523
524         kvfree(pages);
525         *sgt_out = sgt;
526         return ret;
527
528 free_sgt:
529         kfree(sgt);
530 free_partial_alloc:
531         for (j = 0; j < i; j++)
532                 __free_pages(pages[j], pages_order[j]);
533         kvfree(pages);
534 out:
535         *sgt_out = NULL;
536         return ret;
537 }
538
539 static bool invalid_sem(struct qaic_sem *sem)
540 {
541         if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK ||
542             !(sem->presync == 0 || sem->presync == 1) || sem->pad ||
543             sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) ||
544             sem->cmd > QAIC_SEM_WAIT_GT_0)
545                 return true;
546         return false;
547 }
548
549 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
550                              u32 count, u64 total_size)
551 {
552         int i;
553
554         for (i = 0; i < count; i++) {
555                 if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 ||
556                       slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) ||
557                       invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) ||
558                       invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
559                         return -EINVAL;
560
561                 if (slice_ent[i].offset + slice_ent[i].size > total_size)
562                         return -EINVAL;
563         }
564
565         return 0;
566 }
567
568 static void qaic_free_sgt(struct sg_table *sgt)
569 {
570         struct scatterlist *sg;
571
572         for (sg = sgt->sgl; sg; sg = sg_next(sg))
573                 if (sg_page(sg))
574                         __free_pages(sg_page(sg), get_order(sg->length));
575         sg_free_table(sgt);
576         kfree(sgt);
577 }
578
579 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
580                                 const struct drm_gem_object *obj)
581 {
582         struct qaic_bo *bo = to_qaic_bo(obj);
583
584         drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir);
585 }
586
587 static const struct vm_operations_struct drm_vm_ops = {
588         .open = drm_gem_vm_open,
589         .close = drm_gem_vm_close,
590 };
591
592 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
593 {
594         struct qaic_bo *bo = to_qaic_bo(obj);
595         unsigned long offset = 0;
596         struct scatterlist *sg;
597         int ret = 0;
598
599         if (obj->import_attach)
600                 return -EINVAL;
601
602         for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
603                 if (sg_page(sg)) {
604                         ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
605                                               sg->length, vma->vm_page_prot);
606                         if (ret)
607                                 goto out;
608                         offset += sg->length;
609                 }
610         }
611
612 out:
613         return ret;
614 }
615
616 static void qaic_free_object(struct drm_gem_object *obj)
617 {
618         struct qaic_bo *bo = to_qaic_bo(obj);
619
620         if (obj->import_attach) {
621                 /* DMABUF/PRIME Path */
622                 drm_prime_gem_destroy(obj, NULL);
623         } else {
624                 /* Private buffer allocation path */
625                 qaic_free_sgt(bo->sgt);
626         }
627
628         mutex_destroy(&bo->lock);
629         drm_gem_object_release(obj);
630         kfree(bo);
631 }
632
633 static const struct drm_gem_object_funcs qaic_gem_funcs = {
634         .free = qaic_free_object,
635         .print_info = qaic_gem_print_info,
636         .mmap = qaic_gem_object_mmap,
637         .vm_ops = &drm_vm_ops,
638 };
639
640 static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
641 {
642         if (reinit) {
643                 bo->sliced = false;
644                 reinit_completion(&bo->xfer_done);
645         } else {
646                 mutex_init(&bo->lock);
647                 init_completion(&bo->xfer_done);
648         }
649         complete_all(&bo->xfer_done);
650         INIT_LIST_HEAD(&bo->slices);
651 }
652
653 static struct qaic_bo *qaic_alloc_init_bo(void)
654 {
655         struct qaic_bo *bo;
656
657         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
658         if (!bo)
659                 return ERR_PTR(-ENOMEM);
660
661         qaic_init_bo(bo, false);
662
663         return bo;
664 }
665
666 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
667 {
668         struct qaic_create_bo *args = data;
669         int usr_rcu_id, qdev_rcu_id;
670         struct drm_gem_object *obj;
671         struct qaic_device *qdev;
672         struct qaic_user *usr;
673         struct qaic_bo *bo;
674         size_t size;
675         int ret;
676
677         if (args->pad)
678                 return -EINVAL;
679
680         size = PAGE_ALIGN(args->size);
681         if (size == 0)
682                 return -EINVAL;
683
684         usr = file_priv->driver_priv;
685         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
686         if (!usr->qddev) {
687                 ret = -ENODEV;
688                 goto unlock_usr_srcu;
689         }
690
691         qdev = usr->qddev->qdev;
692         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
693         if (qdev->dev_state != QAIC_ONLINE) {
694                 ret = -ENODEV;
695                 goto unlock_dev_srcu;
696         }
697
698         bo = qaic_alloc_init_bo();
699         if (IS_ERR(bo)) {
700                 ret = PTR_ERR(bo);
701                 goto unlock_dev_srcu;
702         }
703         obj = &bo->base;
704
705         drm_gem_private_object_init(dev, obj, size);
706
707         obj->funcs = &qaic_gem_funcs;
708         ret = create_sgt(qdev, &bo->sgt, size);
709         if (ret)
710                 goto free_bo;
711
712         ret = drm_gem_handle_create(file_priv, obj, &args->handle);
713         if (ret)
714                 goto free_sgt;
715
716         bo->handle = args->handle;
717         drm_gem_object_put(obj);
718         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
719         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
720
721         return 0;
722
723 free_sgt:
724         qaic_free_sgt(bo->sgt);
725 free_bo:
726         kfree(bo);
727 unlock_dev_srcu:
728         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
729 unlock_usr_srcu:
730         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
731         return ret;
732 }
733
734 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
735 {
736         struct qaic_mmap_bo *args = data;
737         int usr_rcu_id, qdev_rcu_id;
738         struct drm_gem_object *obj;
739         struct qaic_device *qdev;
740         struct qaic_user *usr;
741         int ret;
742
743         usr = file_priv->driver_priv;
744         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
745         if (!usr->qddev) {
746                 ret = -ENODEV;
747                 goto unlock_usr_srcu;
748         }
749
750         qdev = usr->qddev->qdev;
751         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
752         if (qdev->dev_state != QAIC_ONLINE) {
753                 ret = -ENODEV;
754                 goto unlock_dev_srcu;
755         }
756
757         obj = drm_gem_object_lookup(file_priv, args->handle);
758         if (!obj) {
759                 ret = -ENOENT;
760                 goto unlock_dev_srcu;
761         }
762
763         ret = drm_gem_create_mmap_offset(obj);
764         if (ret == 0)
765                 args->offset = drm_vma_node_offset_addr(&obj->vma_node);
766
767         drm_gem_object_put(obj);
768
769 unlock_dev_srcu:
770         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
771 unlock_usr_srcu:
772         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
773         return ret;
774 }
775
776 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
777 {
778         struct dma_buf_attachment *attach;
779         struct drm_gem_object *obj;
780         struct qaic_bo *bo;
781         int ret;
782
783         bo = qaic_alloc_init_bo();
784         if (IS_ERR(bo)) {
785                 ret = PTR_ERR(bo);
786                 goto out;
787         }
788
789         obj = &bo->base;
790         get_dma_buf(dma_buf);
791
792         attach = dma_buf_attach(dma_buf, dev->dev);
793         if (IS_ERR(attach)) {
794                 ret = PTR_ERR(attach);
795                 goto attach_fail;
796         }
797
798         if (!attach->dmabuf->size) {
799                 ret = -EINVAL;
800                 goto size_align_fail;
801         }
802
803         drm_gem_private_object_init(dev, obj, attach->dmabuf->size);
804         /*
805          * skipping dma_buf_map_attachment() as we do not know the direction
806          * just yet. Once the direction is known in the subsequent IOCTL to
807          * attach slicing, we can do it then.
808          */
809
810         obj->funcs = &qaic_gem_funcs;
811         obj->import_attach = attach;
812         obj->resv = dma_buf->resv;
813
814         return obj;
815
816 size_align_fail:
817         dma_buf_detach(dma_buf, attach);
818 attach_fail:
819         dma_buf_put(dma_buf);
820         kfree(bo);
821 out:
822         return ERR_PTR(ret);
823 }
824
825 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
826 {
827         struct drm_gem_object *obj = &bo->base;
828         struct sg_table *sgt;
829         int ret;
830
831         if (obj->import_attach->dmabuf->size < hdr->size)
832                 return -EINVAL;
833
834         sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
835         if (IS_ERR(sgt)) {
836                 ret = PTR_ERR(sgt);
837                 return ret;
838         }
839
840         bo->sgt = sgt;
841
842         return 0;
843 }
844
845 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
846                                   struct qaic_attach_slice_hdr *hdr)
847 {
848         int ret;
849
850         if (bo->base.size < hdr->size)
851                 return -EINVAL;
852
853         ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
854         if (ret)
855                 return -EFAULT;
856
857         return 0;
858 }
859
860 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
861                            struct qaic_attach_slice_hdr *hdr)
862 {
863         int ret;
864
865         if (bo->base.import_attach)
866                 ret = qaic_prepare_import_bo(bo, hdr);
867         else
868                 ret = qaic_prepare_export_bo(qdev, bo, hdr);
869         bo->dir = hdr->dir;
870         bo->dbc = &qdev->dbc[hdr->dbc_id];
871         bo->nr_slice = hdr->count;
872
873         return ret;
874 }
875
876 static void qaic_unprepare_import_bo(struct qaic_bo *bo)
877 {
878         dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
879         bo->sgt = NULL;
880 }
881
882 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
883 {
884         dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0);
885 }
886
887 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
888 {
889         if (bo->base.import_attach)
890                 qaic_unprepare_import_bo(bo);
891         else
892                 qaic_unprepare_export_bo(qdev, bo);
893
894         bo->dir = 0;
895         bo->dbc = NULL;
896         bo->nr_slice = 0;
897 }
898
899 static void qaic_free_slices_bo(struct qaic_bo *bo)
900 {
901         struct bo_slice *slice, *temp;
902
903         list_for_each_entry_safe(slice, temp, &bo->slices, slice)
904                 kref_put(&slice->ref_count, free_slice);
905         if (WARN_ON_ONCE(bo->total_slice_nents != 0))
906                 bo->total_slice_nents = 0;
907         bo->nr_slice = 0;
908 }
909
910 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
911                                   struct qaic_attach_slice_hdr *hdr,
912                                   struct qaic_attach_slice_entry *slice_ent)
913 {
914         int ret, i;
915
916         for (i = 0; i < hdr->count; i++) {
917                 ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]);
918                 if (ret) {
919                         qaic_free_slices_bo(bo);
920                         return ret;
921                 }
922         }
923
924         if (bo->total_slice_nents > bo->dbc->nelem) {
925                 qaic_free_slices_bo(bo);
926                 return -ENOSPC;
927         }
928
929         return 0;
930 }
931
932 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
933 {
934         struct qaic_attach_slice_entry *slice_ent;
935         struct qaic_attach_slice *args = data;
936         int rcu_id, usr_rcu_id, qdev_rcu_id;
937         struct dma_bridge_chan  *dbc;
938         struct drm_gem_object *obj;
939         struct qaic_device *qdev;
940         unsigned long arg_size;
941         struct qaic_user *usr;
942         u8 __user *user_data;
943         struct qaic_bo *bo;
944         int ret;
945
946         if (args->hdr.count == 0)
947                 return -EINVAL;
948
949         arg_size = args->hdr.count * sizeof(*slice_ent);
950         if (arg_size / args->hdr.count != sizeof(*slice_ent))
951                 return -EINVAL;
952
953         if (args->hdr.size == 0)
954                 return -EINVAL;
955
956         if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
957                 return -EINVAL;
958
959         if (args->data == 0)
960                 return -EINVAL;
961
962         usr = file_priv->driver_priv;
963         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
964         if (!usr->qddev) {
965                 ret = -ENODEV;
966                 goto unlock_usr_srcu;
967         }
968
969         qdev = usr->qddev->qdev;
970         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
971         if (qdev->dev_state != QAIC_ONLINE) {
972                 ret = -ENODEV;
973                 goto unlock_dev_srcu;
974         }
975
976         if (args->hdr.dbc_id >= qdev->num_dbc) {
977                 ret = -EINVAL;
978                 goto unlock_dev_srcu;
979         }
980
981         user_data = u64_to_user_ptr(args->data);
982
983         slice_ent = kzalloc(arg_size, GFP_KERNEL);
984         if (!slice_ent) {
985                 ret = -EINVAL;
986                 goto unlock_dev_srcu;
987         }
988
989         ret = copy_from_user(slice_ent, user_data, arg_size);
990         if (ret) {
991                 ret = -EFAULT;
992                 goto free_slice_ent;
993         }
994
995         ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
996         if (ret)
997                 goto free_slice_ent;
998
999         obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
1000         if (!obj) {
1001                 ret = -ENOENT;
1002                 goto free_slice_ent;
1003         }
1004
1005         bo = to_qaic_bo(obj);
1006         ret = mutex_lock_interruptible(&bo->lock);
1007         if (ret)
1008                 goto put_bo;
1009
1010         if (bo->sliced) {
1011                 ret = -EINVAL;
1012                 goto unlock_bo;
1013         }
1014
1015         dbc = &qdev->dbc[args->hdr.dbc_id];
1016         rcu_id = srcu_read_lock(&dbc->ch_lock);
1017         if (dbc->usr != usr) {
1018                 ret = -EINVAL;
1019                 goto unlock_ch_srcu;
1020         }
1021
1022         ret = qaic_prepare_bo(qdev, bo, &args->hdr);
1023         if (ret)
1024                 goto unlock_ch_srcu;
1025
1026         ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
1027         if (ret)
1028                 goto unprepare_bo;
1029
1030         if (args->hdr.dir == DMA_TO_DEVICE)
1031                 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
1032
1033         bo->sliced = true;
1034         list_add_tail(&bo->bo_list, &bo->dbc->bo_lists);
1035         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1036         mutex_unlock(&bo->lock);
1037         kfree(slice_ent);
1038         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1039         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1040
1041         return 0;
1042
1043 unprepare_bo:
1044         qaic_unprepare_bo(qdev, bo);
1045 unlock_ch_srcu:
1046         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1047 unlock_bo:
1048         mutex_unlock(&bo->lock);
1049 put_bo:
1050         drm_gem_object_put(obj);
1051 free_slice_ent:
1052         kfree(slice_ent);
1053 unlock_dev_srcu:
1054         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1055 unlock_usr_srcu:
1056         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1057         return ret;
1058 }
1059
1060 static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
1061 {
1062         u32 avail = head - tail - 1;
1063
1064         if (head <= tail)
1065                 avail += q_size;
1066
1067         return avail;
1068 }
1069
1070 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
1071                                  u32 head, u32 *ptail)
1072 {
1073         struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1074         struct dbc_req *reqs = slice->reqs;
1075         u32 tail = *ptail;
1076         u32 avail;
1077
1078         avail = fifo_space_avail(head, tail, dbc->nelem);
1079         if (avail < slice->nents)
1080                 return -EAGAIN;
1081
1082         if (tail + slice->nents > dbc->nelem) {
1083                 avail = dbc->nelem - tail;
1084                 avail = min_t(u32, avail, slice->nents);
1085                 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
1086                 reqs += avail;
1087                 avail = slice->nents - avail;
1088                 if (avail)
1089                         memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
1090         } else {
1091                 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
1092         }
1093
1094         *ptail = (tail + slice->nents) % dbc->nelem;
1095
1096         return 0;
1097 }
1098
1099 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
1100                                          u64 resize, struct dma_bridge_chan *dbc, u32 head,
1101                                          u32 *ptail)
1102 {
1103         struct dbc_req *reqs = slice->reqs;
1104         struct dbc_req *last_req;
1105         u32 tail = *ptail;
1106         u64 last_bytes;
1107         u32 first_n;
1108         u32 avail;
1109
1110         avail = fifo_space_avail(head, tail, dbc->nelem);
1111
1112         /*
1113          * After this for loop is complete, first_n represents the index
1114          * of the last DMA request of this slice that needs to be
1115          * transferred after resizing and last_bytes represents DMA size
1116          * of that request.
1117          */
1118         last_bytes = resize;
1119         for (first_n = 0; first_n < slice->nents; first_n++)
1120                 if (last_bytes > le32_to_cpu(reqs[first_n].len))
1121                         last_bytes -= le32_to_cpu(reqs[first_n].len);
1122                 else
1123                         break;
1124
1125         if (avail < (first_n + 1))
1126                 return -EAGAIN;
1127
1128         if (first_n) {
1129                 if (tail + first_n > dbc->nelem) {
1130                         avail = dbc->nelem - tail;
1131                         avail = min_t(u32, avail, first_n);
1132                         memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
1133                         last_req = reqs + avail;
1134                         avail = first_n - avail;
1135                         if (avail)
1136                                 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
1137                 } else {
1138                         memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n);
1139                 }
1140         }
1141
1142         /*
1143          * Copy over the last entry. Here we need to adjust len to the left over
1144          * size, and set src and dst to the entry it is copied to.
1145          */
1146         last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem);
1147         memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
1148
1149         /*
1150          * last_bytes holds size of a DMA segment, maximum DMA segment size is
1151          * set to UINT_MAX by qaic and hence last_bytes can never exceed u32
1152          * range. So, by down sizing we are not corrupting the value.
1153          */
1154         last_req->len = cpu_to_le32((u32)last_bytes);
1155         last_req->src_addr = reqs[first_n].src_addr;
1156         last_req->dest_addr = reqs[first_n].dest_addr;
1157         if (!last_bytes)
1158                 /* Disable DMA transfer */
1159                 last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd;
1160
1161         *ptail = (tail + first_n + 1) % dbc->nelem;
1162
1163         return 0;
1164 }
1165
1166 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
1167                                   struct qaic_execute_entry *exec, unsigned int count,
1168                                   bool is_partial, struct dma_bridge_chan *dbc, u32 head,
1169                                   u32 *tail)
1170 {
1171         struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1172         struct drm_gem_object *obj;
1173         struct bo_slice *slice;
1174         unsigned long flags;
1175         struct qaic_bo *bo;
1176         bool queued;
1177         int i, j;
1178         int ret;
1179
1180         for (i = 0; i < count; i++) {
1181                 /*
1182                  * ref count will be decremented when the transfer of this
1183                  * buffer is complete. It is inside dbc_irq_threaded_fn().
1184                  */
1185                 obj = drm_gem_object_lookup(file_priv,
1186                                             is_partial ? pexec[i].handle : exec[i].handle);
1187                 if (!obj) {
1188                         ret = -ENOENT;
1189                         goto failed_to_send_bo;
1190                 }
1191
1192                 bo = to_qaic_bo(obj);
1193                 ret = mutex_lock_interruptible(&bo->lock);
1194                 if (ret)
1195                         goto failed_to_send_bo;
1196
1197                 if (!bo->sliced) {
1198                         ret = -EINVAL;
1199                         goto unlock_bo;
1200                 }
1201
1202                 if (is_partial && pexec[i].resize > bo->base.size) {
1203                         ret = -EINVAL;
1204                         goto unlock_bo;
1205                 }
1206
1207                 spin_lock_irqsave(&dbc->xfer_lock, flags);
1208                 queued = bo->queued;
1209                 bo->queued = true;
1210                 if (queued) {
1211                         spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1212                         ret = -EINVAL;
1213                         goto unlock_bo;
1214                 }
1215
1216                 bo->req_id = dbc->next_req_id++;
1217
1218                 list_for_each_entry(slice, &bo->slices, slice) {
1219                         for (j = 0; j < slice->nents; j++)
1220                                 slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
1221
1222                         if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
1223                                 /* Configure the slice for no DMA transfer */
1224                                 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
1225                         else if (is_partial && pexec[i].resize < slice->offset + slice->size)
1226                                 /* Configure the slice to be partially DMA transferred */
1227                                 ret = copy_partial_exec_reqs(qdev, slice,
1228                                                              pexec[i].resize - slice->offset, dbc,
1229                                                              head, tail);
1230                         else
1231                                 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
1232                         if (ret) {
1233                                 bo->queued = false;
1234                                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1235                                 goto unlock_bo;
1236                         }
1237                 }
1238                 reinit_completion(&bo->xfer_done);
1239                 list_add_tail(&bo->xfer_list, &dbc->xfer_list);
1240                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1241                 dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
1242                 mutex_unlock(&bo->lock);
1243         }
1244
1245         return 0;
1246
1247 unlock_bo:
1248         mutex_unlock(&bo->lock);
1249 failed_to_send_bo:
1250         if (likely(obj))
1251                 drm_gem_object_put(obj);
1252         for (j = 0; j < i; j++) {
1253                 spin_lock_irqsave(&dbc->xfer_lock, flags);
1254                 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
1255                 obj = &bo->base;
1256                 bo->queued = false;
1257                 list_del(&bo->xfer_list);
1258                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1259                 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1260                 drm_gem_object_put(obj);
1261         }
1262         return ret;
1263 }
1264
1265 static void update_profiling_data(struct drm_file *file_priv,
1266                                   struct qaic_execute_entry *exec, unsigned int count,
1267                                   bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
1268 {
1269         struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1270         struct drm_gem_object *obj;
1271         struct qaic_bo *bo;
1272         int i;
1273
1274         for (i = 0; i < count; i++) {
1275                 /*
1276                  * Since we already committed the BO to hardware, the only way
1277                  * this should fail is a pending signal. We can't cancel the
1278                  * submit to hardware, so we have to just skip the profiling
1279                  * data. In case the signal is not fatal to the process, we
1280                  * return success so that the user doesn't try to resubmit.
1281                  */
1282                 obj = drm_gem_object_lookup(file_priv,
1283                                             is_partial ? pexec[i].handle : exec[i].handle);
1284                 if (!obj)
1285                         break;
1286                 bo = to_qaic_bo(obj);
1287                 bo->perf_stats.req_received_ts = received_ts;
1288                 bo->perf_stats.req_submit_ts = submit_ts;
1289                 bo->perf_stats.queue_level_before = queue_level;
1290                 queue_level += bo->total_slice_nents;
1291                 drm_gem_object_put(obj);
1292         }
1293 }
1294
1295 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
1296                                    bool is_partial)
1297 {
1298         struct qaic_execute *args = data;
1299         struct qaic_execute_entry *exec;
1300         struct dma_bridge_chan *dbc;
1301         int usr_rcu_id, qdev_rcu_id;
1302         struct qaic_device *qdev;
1303         struct qaic_user *usr;
1304         u8 __user *user_data;
1305         unsigned long n;
1306         u64 received_ts;
1307         u32 queue_level;
1308         u64 submit_ts;
1309         int rcu_id;
1310         u32 head;
1311         u32 tail;
1312         u64 size;
1313         int ret;
1314
1315         received_ts = ktime_get_ns();
1316
1317         size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
1318         n = (unsigned long)size * args->hdr.count;
1319         if (args->hdr.count == 0 || n / args->hdr.count != size)
1320                 return -EINVAL;
1321
1322         user_data = u64_to_user_ptr(args->data);
1323
1324         exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
1325         if (!exec)
1326                 return -ENOMEM;
1327
1328         if (copy_from_user(exec, user_data, n)) {
1329                 ret = -EFAULT;
1330                 goto free_exec;
1331         }
1332
1333         usr = file_priv->driver_priv;
1334         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1335         if (!usr->qddev) {
1336                 ret = -ENODEV;
1337                 goto unlock_usr_srcu;
1338         }
1339
1340         qdev = usr->qddev->qdev;
1341         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1342         if (qdev->dev_state != QAIC_ONLINE) {
1343                 ret = -ENODEV;
1344                 goto unlock_dev_srcu;
1345         }
1346
1347         if (args->hdr.dbc_id >= qdev->num_dbc) {
1348                 ret = -EINVAL;
1349                 goto unlock_dev_srcu;
1350         }
1351
1352         dbc = &qdev->dbc[args->hdr.dbc_id];
1353
1354         rcu_id = srcu_read_lock(&dbc->ch_lock);
1355         if (!dbc->usr || dbc->usr->handle != usr->handle) {
1356                 ret = -EPERM;
1357                 goto release_ch_rcu;
1358         }
1359
1360         head = readl(dbc->dbc_base + REQHP_OFF);
1361         tail = readl(dbc->dbc_base + REQTP_OFF);
1362
1363         if (head == U32_MAX || tail == U32_MAX) {
1364                 /* PCI link error */
1365                 ret = -ENODEV;
1366                 goto release_ch_rcu;
1367         }
1368
1369         queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
1370
1371         ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
1372                                      head, &tail);
1373         if (ret)
1374                 goto release_ch_rcu;
1375
1376         /* Finalize commit to hardware */
1377         submit_ts = ktime_get_ns();
1378         writel(tail, dbc->dbc_base + REQTP_OFF);
1379
1380         update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
1381                               submit_ts, queue_level);
1382
1383         if (datapath_polling)
1384                 schedule_work(&dbc->poll_work);
1385
1386 release_ch_rcu:
1387         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1388 unlock_dev_srcu:
1389         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1390 unlock_usr_srcu:
1391         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1392 free_exec:
1393         kfree(exec);
1394         return ret;
1395 }
1396
1397 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1398 {
1399         return __qaic_execute_bo_ioctl(dev, data, file_priv, false);
1400 }
1401
1402 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1403 {
1404         return __qaic_execute_bo_ioctl(dev, data, file_priv, true);
1405 }
1406
1407 /*
1408  * Our interrupt handling is a bit more complicated than a simple ideal, but
1409  * sadly necessary.
1410  *
1411  * Each dbc has a completion queue. Entries in the queue correspond to DMA
1412  * requests which the device has processed. The hardware already has a built
1413  * in irq mitigation. When the device puts an entry into the queue, it will
1414  * only trigger an interrupt if the queue was empty. Therefore, when adding
1415  * the Nth event to a non-empty queue, the hardware doesn't trigger an
1416  * interrupt. This means the host doesn't get additional interrupts signaling
1417  * the same thing - the queue has something to process.
1418  * This behavior can be overridden in the DMA request.
1419  * This means that when the host receives an interrupt, it is required to
1420  * drain the queue.
1421  *
1422  * This behavior is what NAPI attempts to accomplish, although we can't use
1423  * NAPI as we don't have a netdev. We use threaded irqs instead.
1424  *
1425  * However, there is a situation where the host drains the queue fast enough
1426  * that every event causes an interrupt. Typically this is not a problem as
1427  * the rate of events would be low. However, that is not the case with
1428  * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
1429  * lprnet, the host receives roughly 80k interrupts per second from the device
1430  * (per /proc/interrupts). While NAPI documentation indicates the host should
1431  * just chug along, sadly that behavior causes instability in some hosts.
1432  *
1433  * Therefore, we implement an interrupt disable scheme similar to NAPI. The
1434  * key difference is that we will delay after draining the queue for a small
1435  * time to allow additional events to come in via polling. Using the above
1436  * lprnet workload, this reduces the number of interrupts processed from
1437  * ~80k/sec to about 64 in 5 minutes and appears to solve the system
1438  * instability.
1439  */
1440 irqreturn_t dbc_irq_handler(int irq, void *data)
1441 {
1442         struct dma_bridge_chan *dbc = data;
1443         int rcu_id;
1444         u32 head;
1445         u32 tail;
1446
1447         rcu_id = srcu_read_lock(&dbc->ch_lock);
1448
1449         if (datapath_polling) {
1450                 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1451                 /*
1452                  * Normally datapath_polling will not have irqs enabled, but
1453                  * when running with only one MSI the interrupt is shared with
1454                  * MHI so it cannot be disabled. Return ASAP instead.
1455                  */
1456                 return IRQ_HANDLED;
1457         }
1458
1459         if (!dbc->usr) {
1460                 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1461                 return IRQ_HANDLED;
1462         }
1463
1464         head = readl(dbc->dbc_base + RSPHP_OFF);
1465         if (head == U32_MAX) { /* PCI link error */
1466                 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1467                 return IRQ_NONE;
1468         }
1469
1470         tail = readl(dbc->dbc_base + RSPTP_OFF);
1471         if (tail == U32_MAX) { /* PCI link error */
1472                 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1473                 return IRQ_NONE;
1474         }
1475
1476         if (head == tail) { /* queue empty */
1477                 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1478                 return IRQ_NONE;
1479         }
1480
1481         if (!dbc->qdev->single_msi)
1482                 disable_irq_nosync(irq);
1483         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1484         return IRQ_WAKE_THREAD;
1485 }
1486
1487 void irq_polling_work(struct work_struct *work)
1488 {
1489         struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan,  poll_work);
1490         unsigned long flags;
1491         int rcu_id;
1492         u32 head;
1493         u32 tail;
1494
1495         rcu_id = srcu_read_lock(&dbc->ch_lock);
1496
1497         while (1) {
1498                 if (dbc->qdev->dev_state != QAIC_ONLINE) {
1499                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1500                         return;
1501                 }
1502                 if (!dbc->usr) {
1503                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1504                         return;
1505                 }
1506                 spin_lock_irqsave(&dbc->xfer_lock, flags);
1507                 if (list_empty(&dbc->xfer_list)) {
1508                         spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1509                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1510                         return;
1511                 }
1512                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1513
1514                 head = readl(dbc->dbc_base + RSPHP_OFF);
1515                 if (head == U32_MAX) { /* PCI link error */
1516                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1517                         return;
1518                 }
1519
1520                 tail = readl(dbc->dbc_base + RSPTP_OFF);
1521                 if (tail == U32_MAX) { /* PCI link error */
1522                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1523                         return;
1524                 }
1525
1526                 if (head != tail) {
1527                         irq_wake_thread(dbc->irq, dbc);
1528                         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1529                         return;
1530                 }
1531
1532                 cond_resched();
1533                 usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
1534         }
1535 }
1536
1537 irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
1538 {
1539         struct dma_bridge_chan *dbc = data;
1540         int event_count = NUM_EVENTS;
1541         int delay_count = NUM_DELAYS;
1542         struct qaic_device *qdev;
1543         struct qaic_bo *bo, *i;
1544         struct dbc_rsp *rsp;
1545         unsigned long flags;
1546         int rcu_id;
1547         u16 status;
1548         u16 req_id;
1549         u32 head;
1550         u32 tail;
1551
1552         rcu_id = srcu_read_lock(&dbc->ch_lock);
1553         qdev = dbc->qdev;
1554
1555         head = readl(dbc->dbc_base + RSPHP_OFF);
1556         if (head == U32_MAX) /* PCI link error */
1557                 goto error_out;
1558
1559 read_fifo:
1560
1561         if (!event_count) {
1562                 event_count = NUM_EVENTS;
1563                 cond_resched();
1564         }
1565
1566         /*
1567          * if this channel isn't assigned or gets unassigned during processing
1568          * we have nothing further to do
1569          */
1570         if (!dbc->usr)
1571                 goto error_out;
1572
1573         tail = readl(dbc->dbc_base + RSPTP_OFF);
1574         if (tail == U32_MAX) /* PCI link error */
1575                 goto error_out;
1576
1577         if (head == tail) { /* queue empty */
1578                 if (delay_count) {
1579                         --delay_count;
1580                         usleep_range(100, 200);
1581                         goto read_fifo; /* check for a new event */
1582                 }
1583                 goto normal_out;
1584         }
1585
1586         delay_count = NUM_DELAYS;
1587         while (head != tail) {
1588                 if (!event_count)
1589                         break;
1590                 --event_count;
1591                 rsp = dbc->rsp_q_base + head * sizeof(*rsp);
1592                 req_id = le16_to_cpu(rsp->req_id);
1593                 status = le16_to_cpu(rsp->status);
1594                 if (status)
1595                         pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status);
1596                 spin_lock_irqsave(&dbc->xfer_lock, flags);
1597                 /*
1598                  * A BO can receive multiple interrupts, since a BO can be
1599                  * divided into multiple slices and a buffer receives as many
1600                  * interrupts as slices. So until it receives interrupts for
1601                  * all the slices we cannot mark that buffer complete.
1602                  */
1603                 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
1604                         if (bo->req_id == req_id)
1605                                 bo->nr_slice_xfer_done++;
1606                         else
1607                                 continue;
1608
1609                         if (bo->nr_slice_xfer_done < bo->nr_slice)
1610                                 break;
1611
1612                         /*
1613                          * At this point we have received all the interrupts for
1614                          * BO, which means BO execution is complete.
1615                          */
1616                         dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1617                         bo->nr_slice_xfer_done = 0;
1618                         bo->queued = false;
1619                         list_del(&bo->xfer_list);
1620                         bo->perf_stats.req_processed_ts = ktime_get_ns();
1621                         complete_all(&bo->xfer_done);
1622                         drm_gem_object_put(&bo->base);
1623                         break;
1624                 }
1625                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1626                 head = (head + 1) % dbc->nelem;
1627         }
1628
1629         /*
1630          * Update the head pointer of response queue and let the device know
1631          * that we have consumed elements from the queue.
1632          */
1633         writel(head, dbc->dbc_base + RSPHP_OFF);
1634
1635         /* elements might have been put in the queue while we were processing */
1636         goto read_fifo;
1637
1638 normal_out:
1639         if (!qdev->single_msi && likely(!datapath_polling))
1640                 enable_irq(irq);
1641         else if (unlikely(datapath_polling))
1642                 schedule_work(&dbc->poll_work);
1643         /* checking the fifo and enabling irqs is a race, missed event check */
1644         tail = readl(dbc->dbc_base + RSPTP_OFF);
1645         if (tail != U32_MAX && head != tail) {
1646                 if (!qdev->single_msi && likely(!datapath_polling))
1647                         disable_irq_nosync(irq);
1648                 goto read_fifo;
1649         }
1650         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1651         return IRQ_HANDLED;
1652
1653 error_out:
1654         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1655         if (!qdev->single_msi && likely(!datapath_polling))
1656                 enable_irq(irq);
1657         else if (unlikely(datapath_polling))
1658                 schedule_work(&dbc->poll_work);
1659
1660         return IRQ_HANDLED;
1661 }
1662
1663 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1664 {
1665         struct qaic_wait *args = data;
1666         int usr_rcu_id, qdev_rcu_id;
1667         struct dma_bridge_chan *dbc;
1668         struct drm_gem_object *obj;
1669         struct qaic_device *qdev;
1670         unsigned long timeout;
1671         struct qaic_user *usr;
1672         struct qaic_bo *bo;
1673         int rcu_id;
1674         int ret;
1675
1676         if (args->pad != 0)
1677                 return -EINVAL;
1678
1679         usr = file_priv->driver_priv;
1680         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1681         if (!usr->qddev) {
1682                 ret = -ENODEV;
1683                 goto unlock_usr_srcu;
1684         }
1685
1686         qdev = usr->qddev->qdev;
1687         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1688         if (qdev->dev_state != QAIC_ONLINE) {
1689                 ret = -ENODEV;
1690                 goto unlock_dev_srcu;
1691         }
1692
1693         if (args->dbc_id >= qdev->num_dbc) {
1694                 ret = -EINVAL;
1695                 goto unlock_dev_srcu;
1696         }
1697
1698         dbc = &qdev->dbc[args->dbc_id];
1699
1700         rcu_id = srcu_read_lock(&dbc->ch_lock);
1701         if (dbc->usr != usr) {
1702                 ret = -EPERM;
1703                 goto unlock_ch_srcu;
1704         }
1705
1706         obj = drm_gem_object_lookup(file_priv, args->handle);
1707         if (!obj) {
1708                 ret = -ENOENT;
1709                 goto unlock_ch_srcu;
1710         }
1711
1712         bo = to_qaic_bo(obj);
1713         timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms;
1714         timeout = msecs_to_jiffies(timeout);
1715         ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout);
1716         if (!ret) {
1717                 ret = -ETIMEDOUT;
1718                 goto put_obj;
1719         }
1720         if (ret > 0)
1721                 ret = 0;
1722
1723         if (!dbc->usr)
1724                 ret = -EPERM;
1725
1726 put_obj:
1727         drm_gem_object_put(obj);
1728 unlock_ch_srcu:
1729         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1730 unlock_dev_srcu:
1731         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1732 unlock_usr_srcu:
1733         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1734         return ret;
1735 }
1736
1737 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1738 {
1739         struct qaic_perf_stats_entry *ent = NULL;
1740         struct qaic_perf_stats *args = data;
1741         int usr_rcu_id, qdev_rcu_id;
1742         struct drm_gem_object *obj;
1743         struct qaic_device *qdev;
1744         struct qaic_user *usr;
1745         struct qaic_bo *bo;
1746         int ret, i;
1747
1748         usr = file_priv->driver_priv;
1749         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1750         if (!usr->qddev) {
1751                 ret = -ENODEV;
1752                 goto unlock_usr_srcu;
1753         }
1754
1755         qdev = usr->qddev->qdev;
1756         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1757         if (qdev->dev_state != QAIC_ONLINE) {
1758                 ret = -ENODEV;
1759                 goto unlock_dev_srcu;
1760         }
1761
1762         if (args->hdr.dbc_id >= qdev->num_dbc) {
1763                 ret = -EINVAL;
1764                 goto unlock_dev_srcu;
1765         }
1766
1767         ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
1768         if (!ent) {
1769                 ret = -EINVAL;
1770                 goto unlock_dev_srcu;
1771         }
1772
1773         ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
1774         if (ret) {
1775                 ret = -EFAULT;
1776                 goto free_ent;
1777         }
1778
1779         for (i = 0; i < args->hdr.count; i++) {
1780                 obj = drm_gem_object_lookup(file_priv, ent[i].handle);
1781                 if (!obj) {
1782                         ret = -ENOENT;
1783                         goto free_ent;
1784                 }
1785                 bo = to_qaic_bo(obj);
1786                 /*
1787                  * perf stats ioctl is called before wait ioctl is complete then
1788                  * the latency information is invalid.
1789                  */
1790                 if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) {
1791                         ent[i].device_latency_us = 0;
1792                 } else {
1793                         ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts -
1794                                                             bo->perf_stats.req_submit_ts), 1000);
1795                 }
1796                 ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts -
1797                                                     bo->perf_stats.req_received_ts), 1000);
1798                 ent[i].queue_level_before = bo->perf_stats.queue_level_before;
1799                 ent[i].num_queue_element = bo->total_slice_nents;
1800                 drm_gem_object_put(obj);
1801         }
1802
1803         if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent)))
1804                 ret = -EFAULT;
1805
1806 free_ent:
1807         kfree(ent);
1808 unlock_dev_srcu:
1809         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1810 unlock_usr_srcu:
1811         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1812         return ret;
1813 }
1814
1815 static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
1816 {
1817         qaic_free_slices_bo(bo);
1818         qaic_unprepare_bo(qdev, bo);
1819         qaic_init_bo(bo, true);
1820         list_del(&bo->bo_list);
1821         drm_gem_object_put(&bo->base);
1822 }
1823
1824 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1825 {
1826         struct qaic_detach_slice *args = data;
1827         int rcu_id, usr_rcu_id, qdev_rcu_id;
1828         struct dma_bridge_chan *dbc;
1829         struct drm_gem_object *obj;
1830         struct qaic_device *qdev;
1831         struct qaic_user *usr;
1832         unsigned long flags;
1833         struct qaic_bo *bo;
1834         int ret;
1835
1836         if (args->pad != 0)
1837                 return -EINVAL;
1838
1839         usr = file_priv->driver_priv;
1840         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1841         if (!usr->qddev) {
1842                 ret = -ENODEV;
1843                 goto unlock_usr_srcu;
1844         }
1845
1846         qdev = usr->qddev->qdev;
1847         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1848         if (qdev->dev_state != QAIC_ONLINE) {
1849                 ret = -ENODEV;
1850                 goto unlock_dev_srcu;
1851         }
1852
1853         obj = drm_gem_object_lookup(file_priv, args->handle);
1854         if (!obj) {
1855                 ret = -ENOENT;
1856                 goto unlock_dev_srcu;
1857         }
1858
1859         bo = to_qaic_bo(obj);
1860         ret = mutex_lock_interruptible(&bo->lock);
1861         if (ret)
1862                 goto put_bo;
1863
1864         if (!bo->sliced) {
1865                 ret = -EINVAL;
1866                 goto unlock_bo;
1867         }
1868
1869         dbc = bo->dbc;
1870         rcu_id = srcu_read_lock(&dbc->ch_lock);
1871         if (dbc->usr != usr) {
1872                 ret = -EINVAL;
1873                 goto unlock_ch_srcu;
1874         }
1875
1876         /* Check if BO is committed to H/W for DMA */
1877         spin_lock_irqsave(&dbc->xfer_lock, flags);
1878         if (bo->queued) {
1879                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1880                 ret = -EBUSY;
1881                 goto unlock_ch_srcu;
1882         }
1883         spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1884
1885         detach_slice_bo(qdev, bo);
1886
1887 unlock_ch_srcu:
1888         srcu_read_unlock(&dbc->ch_lock, rcu_id);
1889 unlock_bo:
1890         mutex_unlock(&bo->lock);
1891 put_bo:
1892         drm_gem_object_put(obj);
1893 unlock_dev_srcu:
1894         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1895 unlock_usr_srcu:
1896         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1897         return ret;
1898 }
1899
1900 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
1901 {
1902         unsigned long flags;
1903         struct qaic_bo *bo;
1904
1905         spin_lock_irqsave(&dbc->xfer_lock, flags);
1906         while (!list_empty(&dbc->xfer_list)) {
1907                 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
1908                 bo->queued = false;
1909                 list_del(&bo->xfer_list);
1910                 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1911                 bo->nr_slice_xfer_done = 0;
1912                 bo->req_id = 0;
1913                 bo->perf_stats.req_received_ts = 0;
1914                 bo->perf_stats.req_submit_ts = 0;
1915                 bo->perf_stats.req_processed_ts = 0;
1916                 bo->perf_stats.queue_level_before = 0;
1917                 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1918                 complete_all(&bo->xfer_done);
1919                 drm_gem_object_put(&bo->base);
1920                 spin_lock_irqsave(&dbc->xfer_lock, flags);
1921         }
1922         spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1923 }
1924
1925 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
1926 {
1927         if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
1928                 return -EPERM;
1929
1930         qdev->dbc[dbc_id].usr = NULL;
1931         synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
1932         return 0;
1933 }
1934
1935 /**
1936  * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
1937  * user. Add user context back to DBC to enable it. This function trusts the
1938  * DBC ID passed and expects the DBC to be disabled.
1939  * @qdev: Qranium device handle
1940  * @dbc_id: ID of the DBC
1941  * @usr: User context
1942  */
1943 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
1944 {
1945         qdev->dbc[dbc_id].usr = usr;
1946 }
1947
1948 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
1949 {
1950         struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1951
1952         dbc->usr = NULL;
1953         empty_xfer_list(qdev, dbc);
1954         synchronize_srcu(&dbc->ch_lock);
1955         /*
1956          * Threads holding channel lock, may add more elements in the xfer_list.
1957          * Flush out these elements from xfer_list.
1958          */
1959         empty_xfer_list(qdev, dbc);
1960 }
1961
1962 void release_dbc(struct qaic_device *qdev, u32 dbc_id)
1963 {
1964         struct qaic_bo *bo, *bo_temp;
1965         struct dma_bridge_chan *dbc;
1966
1967         dbc = &qdev->dbc[dbc_id];
1968         if (!dbc->in_use)
1969                 return;
1970
1971         wakeup_dbc(qdev, dbc_id);
1972
1973         dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
1974         dbc->total_size = 0;
1975         dbc->req_q_base = NULL;
1976         dbc->dma_addr = 0;
1977         dbc->nelem = 0;
1978         dbc->usr = NULL;
1979
1980         list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
1981                 drm_gem_object_get(&bo->base);
1982                 mutex_lock(&bo->lock);
1983                 detach_slice_bo(qdev, bo);
1984                 mutex_unlock(&bo->lock);
1985                 drm_gem_object_put(&bo->base);
1986         }
1987
1988         dbc->in_use = false;
1989         wake_up(&dbc->dbc_release);
1990 }