GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / gpu / drm / omapdrm / omap_dmm_tiler.c
1 /*
2  * DMM IOMMU driver support functions for TI OMAP processors.
3  *
4  * Author: Rob Clark <rob@ti.com>
5  *         Andy Gross <andy.gross@ti.com>
6  *
7  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation version 2.
12  *
13  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14  * kind, whether express or implied; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h> /* platform_device() */
29 #include <linux/sched.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/time.h>
33 #include <linux/vmalloc.h>
34 #include <linux/wait.h>
35
36 #include "omap_dmm_tiler.h"
37 #include "omap_dmm_priv.h"
38
39 #define DMM_DRIVER_NAME "dmm"
40
41 /* mappings for associating views to luts */
42 static struct tcm *containers[TILFMT_NFORMATS];
43 static struct dmm *omap_dmm;
44
45 #if defined(CONFIG_OF)
46 static const struct of_device_id dmm_of_match[];
47 #endif
48
49 /* global spinlock for protecting lists */
50 static DEFINE_SPINLOCK(list_lock);
51
52 /* Geometry table */
53 #define GEOM(xshift, yshift, bytes_per_pixel) { \
54                 .x_shft = (xshift), \
55                 .y_shft = (yshift), \
56                 .cpp    = (bytes_per_pixel), \
57                 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
58                 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
59         }
60
61 static const struct {
62         uint32_t x_shft;        /* unused X-bits (as part of bpp) */
63         uint32_t y_shft;        /* unused Y-bits (as part of bpp) */
64         uint32_t cpp;           /* bytes/chars per pixel */
65         uint32_t slot_w;        /* width of each slot (in pixels) */
66         uint32_t slot_h;        /* height of each slot (in pixels) */
67 } geom[TILFMT_NFORMATS] = {
68         [TILFMT_8BIT]  = GEOM(0, 0, 1),
69         [TILFMT_16BIT] = GEOM(0, 1, 2),
70         [TILFMT_32BIT] = GEOM(1, 1, 4),
71         [TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
72 };
73
74
75 /* lookup table for registers w/ per-engine instances */
76 static const uint32_t reg[][4] = {
77         [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
78                         DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
79         [PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
80                         DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
81 };
82
83 static u32 dmm_read(struct dmm *dmm, u32 reg)
84 {
85         return readl(dmm->base + reg);
86 }
87
88 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
89 {
90         writel(val, dmm->base + reg);
91 }
92
93 /* simple allocator to grab next 16 byte aligned memory from txn */
94 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
95 {
96         void *ptr;
97         struct refill_engine *engine = txn->engine_handle;
98
99         /* dmm programming requires 16 byte aligned addresses */
100         txn->current_pa = round_up(txn->current_pa, 16);
101         txn->current_va = (void *)round_up((long)txn->current_va, 16);
102
103         ptr = txn->current_va;
104         *pa = txn->current_pa;
105
106         txn->current_pa += sz;
107         txn->current_va += sz;
108
109         BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
110
111         return ptr;
112 }
113
114 /* check status and spin until wait_mask comes true */
115 static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
116 {
117         struct dmm *dmm = engine->dmm;
118         uint32_t r = 0, err, i;
119
120         i = DMM_FIXED_RETRY_COUNT;
121         while (true) {
122                 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
123                 err = r & DMM_PATSTATUS_ERR;
124                 if (err)
125                         return -EFAULT;
126
127                 if ((r & wait_mask) == wait_mask)
128                         break;
129
130                 if (--i == 0)
131                         return -ETIMEDOUT;
132
133                 udelay(1);
134         }
135
136         return 0;
137 }
138
139 static void release_engine(struct refill_engine *engine)
140 {
141         unsigned long flags;
142
143         spin_lock_irqsave(&list_lock, flags);
144         list_add(&engine->idle_node, &omap_dmm->idle_head);
145         spin_unlock_irqrestore(&list_lock, flags);
146
147         atomic_inc(&omap_dmm->engine_counter);
148         wake_up_interruptible(&omap_dmm->engine_queue);
149 }
150
151 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
152 {
153         struct dmm *dmm = arg;
154         uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
155         int i;
156
157         /* ack IRQ */
158         dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
159
160         for (i = 0; i < dmm->num_engines; i++) {
161                 if (status & DMM_IRQSTAT_LST) {
162                         if (dmm->engines[i].async)
163                                 release_engine(&dmm->engines[i]);
164
165                         complete(&dmm->engines[i].compl);
166                 }
167
168                 status >>= 8;
169         }
170
171         return IRQ_HANDLED;
172 }
173
174 /**
175  * Get a handle for a DMM transaction
176  */
177 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
178 {
179         struct dmm_txn *txn = NULL;
180         struct refill_engine *engine = NULL;
181         int ret;
182         unsigned long flags;
183
184
185         /* wait until an engine is available */
186         ret = wait_event_interruptible(omap_dmm->engine_queue,
187                 atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
188         if (ret)
189                 return ERR_PTR(ret);
190
191         /* grab an idle engine */
192         spin_lock_irqsave(&list_lock, flags);
193         if (!list_empty(&dmm->idle_head)) {
194                 engine = list_entry(dmm->idle_head.next, struct refill_engine,
195                                         idle_node);
196                 list_del(&engine->idle_node);
197         }
198         spin_unlock_irqrestore(&list_lock, flags);
199
200         BUG_ON(!engine);
201
202         txn = &engine->txn;
203         engine->tcm = tcm;
204         txn->engine_handle = engine;
205         txn->last_pat = NULL;
206         txn->current_va = engine->refill_va;
207         txn->current_pa = engine->refill_pa;
208
209         return txn;
210 }
211
212 /**
213  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
214  * corresponding slot is cleared (ie. dummy_pa is programmed)
215  */
216 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
217                 struct page **pages, uint32_t npages, uint32_t roll)
218 {
219         dma_addr_t pat_pa = 0, data_pa = 0;
220         uint32_t *data;
221         struct pat *pat;
222         struct refill_engine *engine = txn->engine_handle;
223         int columns = (1 + area->x1 - area->x0);
224         int rows = (1 + area->y1 - area->y0);
225         int i = columns*rows;
226
227         pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
228
229         if (txn->last_pat)
230                 txn->last_pat->next_pa = (uint32_t)pat_pa;
231
232         pat->area = *area;
233
234         /* adjust Y coordinates based off of container parameters */
235         pat->area.y0 += engine->tcm->y_offset;
236         pat->area.y1 += engine->tcm->y_offset;
237
238         pat->ctrl = (struct pat_ctrl){
239                         .start = 1,
240                         .lut_id = engine->tcm->lut_id,
241                 };
242
243         data = alloc_dma(txn, 4*i, &data_pa);
244         /* FIXME: what if data_pa is more than 32-bit ? */
245         pat->data_pa = data_pa;
246
247         while (i--) {
248                 int n = i + roll;
249                 if (n >= npages)
250                         n -= npages;
251                 data[i] = (pages && pages[n]) ?
252                         page_to_phys(pages[n]) : engine->dmm->dummy_pa;
253         }
254
255         txn->last_pat = pat;
256
257         return;
258 }
259
260 /**
261  * Commit the DMM transaction.
262  */
263 static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
264 {
265         int ret = 0;
266         struct refill_engine *engine = txn->engine_handle;
267         struct dmm *dmm = engine->dmm;
268
269         if (!txn->last_pat) {
270                 dev_err(engine->dmm->dev, "need at least one txn\n");
271                 ret = -EINVAL;
272                 goto cleanup;
273         }
274
275         txn->last_pat->next_pa = 0;
276         /* ensure that the written descriptors are visible to DMM */
277         wmb();
278
279         /*
280          * NOTE: the wmb() above should be enough, but there seems to be a bug
281          * in OMAP's memory barrier implementation, which in some rare cases may
282          * cause the writes not to be observable after wmb().
283          */
284
285         /* read back to ensure the data is in RAM */
286         readl(&txn->last_pat->next_pa);
287
288         /* write to PAT_DESCR to clear out any pending transaction */
289         dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
290
291         /* wait for engine ready: */
292         ret = wait_status(engine, DMM_PATSTATUS_READY);
293         if (ret) {
294                 ret = -EFAULT;
295                 goto cleanup;
296         }
297
298         /* mark whether it is async to denote list management in IRQ handler */
299         engine->async = wait ? false : true;
300         reinit_completion(&engine->compl);
301         /* verify that the irq handler sees the 'async' and completion value */
302         smp_mb();
303
304         /* kick reload */
305         dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
306
307         if (wait) {
308                 if (!wait_for_completion_timeout(&engine->compl,
309                                 msecs_to_jiffies(100))) {
310                         dev_err(dmm->dev, "timed out waiting for done\n");
311                         ret = -ETIMEDOUT;
312                         goto cleanup;
313                 }
314
315                 /* Check the engine status before continue */
316                 ret = wait_status(engine, DMM_PATSTATUS_READY |
317                                   DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
318         }
319
320 cleanup:
321         /* only place engine back on list if we are done with it */
322         if (ret || wait)
323                 release_engine(engine);
324
325         return ret;
326 }
327
328 /*
329  * DMM programming
330  */
331 static int fill(struct tcm_area *area, struct page **pages,
332                 uint32_t npages, uint32_t roll, bool wait)
333 {
334         int ret = 0;
335         struct tcm_area slice, area_s;
336         struct dmm_txn *txn;
337
338         /*
339          * FIXME
340          *
341          * Asynchronous fill does not work reliably, as the driver does not
342          * handle errors in the async code paths. The fill operation may
343          * silently fail, leading to leaking DMM engines, which may eventually
344          * lead to deadlock if we run out of DMM engines.
345          *
346          * For now, always set 'wait' so that we only use sync fills. Async
347          * fills should be fixed, or alternatively we could decide to only
348          * support sync fills and so the whole async code path could be removed.
349          */
350
351         wait = true;
352
353         txn = dmm_txn_init(omap_dmm, area->tcm);
354         if (IS_ERR_OR_NULL(txn))
355                 return -ENOMEM;
356
357         tcm_for_each_slice(slice, *area, area_s) {
358                 struct pat_area p_area = {
359                                 .x0 = slice.p0.x,  .y0 = slice.p0.y,
360                                 .x1 = slice.p1.x,  .y1 = slice.p1.y,
361                 };
362
363                 dmm_txn_append(txn, &p_area, pages, npages, roll);
364
365                 roll += tcm_sizeof(slice);
366         }
367
368         ret = dmm_txn_commit(txn, wait);
369
370         return ret;
371 }
372
373 /*
374  * Pin/unpin
375  */
376
377 /* note: slots for which pages[i] == NULL are filled w/ dummy page
378  */
379 int tiler_pin(struct tiler_block *block, struct page **pages,
380                 uint32_t npages, uint32_t roll, bool wait)
381 {
382         int ret;
383
384         ret = fill(&block->area, pages, npages, roll, wait);
385
386         if (ret)
387                 tiler_unpin(block);
388
389         return ret;
390 }
391
392 int tiler_unpin(struct tiler_block *block)
393 {
394         return fill(&block->area, NULL, 0, 0, false);
395 }
396
397 /*
398  * Reserve/release
399  */
400 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
401                 uint16_t h, uint16_t align)
402 {
403         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
404         u32 min_align = 128;
405         int ret;
406         unsigned long flags;
407         size_t slot_bytes;
408
409         BUG_ON(!validfmt(fmt));
410
411         /* convert width/height to slots */
412         w = DIV_ROUND_UP(w, geom[fmt].slot_w);
413         h = DIV_ROUND_UP(h, geom[fmt].slot_h);
414
415         /* convert alignment to slots */
416         slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
417         min_align = max(min_align, slot_bytes);
418         align = (align > min_align) ? ALIGN(align, min_align) : min_align;
419         align /= slot_bytes;
420
421         block->fmt = fmt;
422
423         ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
424                         &block->area);
425         if (ret) {
426                 kfree(block);
427                 return ERR_PTR(-ENOMEM);
428         }
429
430         /* add to allocation list */
431         spin_lock_irqsave(&list_lock, flags);
432         list_add(&block->alloc_node, &omap_dmm->alloc_head);
433         spin_unlock_irqrestore(&list_lock, flags);
434
435         return block;
436 }
437
438 struct tiler_block *tiler_reserve_1d(size_t size)
439 {
440         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
441         int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
442         unsigned long flags;
443
444         if (!block)
445                 return ERR_PTR(-ENOMEM);
446
447         block->fmt = TILFMT_PAGE;
448
449         if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
450                                 &block->area)) {
451                 kfree(block);
452                 return ERR_PTR(-ENOMEM);
453         }
454
455         spin_lock_irqsave(&list_lock, flags);
456         list_add(&block->alloc_node, &omap_dmm->alloc_head);
457         spin_unlock_irqrestore(&list_lock, flags);
458
459         return block;
460 }
461
462 /* note: if you have pin'd pages, you should have already unpin'd first! */
463 int tiler_release(struct tiler_block *block)
464 {
465         int ret = tcm_free(&block->area);
466         unsigned long flags;
467
468         if (block->area.tcm)
469                 dev_err(omap_dmm->dev, "failed to release block\n");
470
471         spin_lock_irqsave(&list_lock, flags);
472         list_del(&block->alloc_node);
473         spin_unlock_irqrestore(&list_lock, flags);
474
475         kfree(block);
476         return ret;
477 }
478
479 /*
480  * Utils
481  */
482
483 /* calculate the tiler space address of a pixel in a view orientation...
484  * below description copied from the display subsystem section of TRM:
485  *
486  * When the TILER is addressed, the bits:
487  *   [28:27] = 0x0 for 8-bit tiled
488  *             0x1 for 16-bit tiled
489  *             0x2 for 32-bit tiled
490  *             0x3 for page mode
491  *   [31:29] = 0x0 for 0-degree view
492  *             0x1 for 180-degree view + mirroring
493  *             0x2 for 0-degree view + mirroring
494  *             0x3 for 180-degree view
495  *             0x4 for 270-degree view + mirroring
496  *             0x5 for 270-degree view
497  *             0x6 for 90-degree view
498  *             0x7 for 90-degree view + mirroring
499  * Otherwise the bits indicated the corresponding bit address to access
500  * the SDRAM.
501  */
502 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
503 {
504         u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
505
506         x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
507         y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
508         alignment = geom[fmt].x_shft + geom[fmt].y_shft;
509
510         /* validate coordinate */
511         x_mask = MASK(x_bits);
512         y_mask = MASK(y_bits);
513
514         if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
515                 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
516                                 x, x, x_mask, y, y, y_mask);
517                 return 0;
518         }
519
520         /* account for mirroring */
521         if (orient & MASK_X_INVERT)
522                 x ^= x_mask;
523         if (orient & MASK_Y_INVERT)
524                 y ^= y_mask;
525
526         /* get coordinate address */
527         if (orient & MASK_XY_FLIP)
528                 tmp = ((x << y_bits) + y);
529         else
530                 tmp = ((y << x_bits) + x);
531
532         return TIL_ADDR((tmp << alignment), orient, fmt);
533 }
534
535 dma_addr_t tiler_ssptr(struct tiler_block *block)
536 {
537         BUG_ON(!validfmt(block->fmt));
538
539         return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
540                         block->area.p0.x * geom[block->fmt].slot_w,
541                         block->area.p0.y * geom[block->fmt].slot_h);
542 }
543
544 dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
545                 uint32_t x, uint32_t y)
546 {
547         struct tcm_pt *p = &block->area.p0;
548         BUG_ON(!validfmt(block->fmt));
549
550         return tiler_get_address(block->fmt, orient,
551                         (p->x * geom[block->fmt].slot_w) + x,
552                         (p->y * geom[block->fmt].slot_h) + y);
553 }
554
555 void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
556 {
557         BUG_ON(!validfmt(fmt));
558         *w = round_up(*w, geom[fmt].slot_w);
559         *h = round_up(*h, geom[fmt].slot_h);
560 }
561
562 uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
563 {
564         BUG_ON(!validfmt(fmt));
565
566         if (orient & MASK_XY_FLIP)
567                 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
568         else
569                 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
570 }
571
572 size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
573 {
574         tiler_align(fmt, &w, &h);
575         return geom[fmt].cpp * w * h;
576 }
577
578 size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
579 {
580         BUG_ON(!validfmt(fmt));
581         return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
582 }
583
584 uint32_t tiler_get_cpu_cache_flags(void)
585 {
586         return omap_dmm->plat_data->cpu_cache_flags;
587 }
588
589 bool dmm_is_available(void)
590 {
591         return omap_dmm ? true : false;
592 }
593
594 static int omap_dmm_remove(struct platform_device *dev)
595 {
596         struct tiler_block *block, *_block;
597         int i;
598         unsigned long flags;
599
600         if (omap_dmm) {
601                 /* free all area regions */
602                 spin_lock_irqsave(&list_lock, flags);
603                 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
604                                         alloc_node) {
605                         list_del(&block->alloc_node);
606                         kfree(block);
607                 }
608                 spin_unlock_irqrestore(&list_lock, flags);
609
610                 for (i = 0; i < omap_dmm->num_lut; i++)
611                         if (omap_dmm->tcm && omap_dmm->tcm[i])
612                                 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
613                 kfree(omap_dmm->tcm);
614
615                 kfree(omap_dmm->engines);
616                 if (omap_dmm->refill_va)
617                         dma_free_wc(omap_dmm->dev,
618                                     REFILL_BUFFER_SIZE * omap_dmm->num_engines,
619                                     omap_dmm->refill_va, omap_dmm->refill_pa);
620                 if (omap_dmm->dummy_page)
621                         __free_page(omap_dmm->dummy_page);
622
623                 if (omap_dmm->irq > 0)
624                         free_irq(omap_dmm->irq, omap_dmm);
625
626                 iounmap(omap_dmm->base);
627                 kfree(omap_dmm);
628                 omap_dmm = NULL;
629         }
630
631         return 0;
632 }
633
634 static int omap_dmm_probe(struct platform_device *dev)
635 {
636         int ret = -EFAULT, i;
637         struct tcm_area area = {0};
638         u32 hwinfo, pat_geom;
639         struct resource *mem;
640
641         omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
642         if (!omap_dmm)
643                 goto fail;
644
645         /* initialize lists */
646         INIT_LIST_HEAD(&omap_dmm->alloc_head);
647         INIT_LIST_HEAD(&omap_dmm->idle_head);
648
649         init_waitqueue_head(&omap_dmm->engine_queue);
650
651         if (dev->dev.of_node) {
652                 const struct of_device_id *match;
653
654                 match = of_match_node(dmm_of_match, dev->dev.of_node);
655                 if (!match) {
656                         dev_err(&dev->dev, "failed to find matching device node\n");
657                         ret = -ENODEV;
658                         goto fail;
659                 }
660
661                 omap_dmm->plat_data = match->data;
662         }
663
664         /* lookup hwmod data - base address and irq */
665         mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
666         if (!mem) {
667                 dev_err(&dev->dev, "failed to get base address resource\n");
668                 goto fail;
669         }
670
671         omap_dmm->base = ioremap(mem->start, SZ_2K);
672
673         if (!omap_dmm->base) {
674                 dev_err(&dev->dev, "failed to get dmm base address\n");
675                 goto fail;
676         }
677
678         omap_dmm->irq = platform_get_irq(dev, 0);
679         if (omap_dmm->irq < 0) {
680                 dev_err(&dev->dev, "failed to get IRQ resource\n");
681                 goto fail;
682         }
683
684         omap_dmm->dev = &dev->dev;
685
686         hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
687         omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
688         omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
689         omap_dmm->container_width = 256;
690         omap_dmm->container_height = 128;
691
692         atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
693
694         /* read out actual LUT width and height */
695         pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
696         omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
697         omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
698
699         /* increment LUT by one if on OMAP5 */
700         /* LUT has twice the height, and is split into a separate container */
701         if (omap_dmm->lut_height != omap_dmm->container_height)
702                 omap_dmm->num_lut++;
703
704         /* initialize DMM registers */
705         dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
706         dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
707         dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
708         dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
709         dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
710         dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
711
712         ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
713                                 "omap_dmm_irq_handler", omap_dmm);
714
715         if (ret) {
716                 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
717                         omap_dmm->irq, ret);
718                 omap_dmm->irq = -1;
719                 goto fail;
720         }
721
722         /* Enable all interrupts for each refill engine except
723          * ERR_LUT_MISS<n> (which is just advisory, and we don't care
724          * about because we want to be able to refill live scanout
725          * buffers for accelerated pan/scroll) and FILL_DSC<n> which
726          * we just generally don't care about.
727          */
728         dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
729
730         omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
731         if (!omap_dmm->dummy_page) {
732                 dev_err(&dev->dev, "could not allocate dummy page\n");
733                 ret = -ENOMEM;
734                 goto fail;
735         }
736
737         /* set dma mask for device */
738         ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
739         if (ret)
740                 goto fail;
741
742         omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
743
744         /* alloc refill memory */
745         omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
746                                            REFILL_BUFFER_SIZE * omap_dmm->num_engines,
747                                            &omap_dmm->refill_pa, GFP_KERNEL);
748         if (!omap_dmm->refill_va) {
749                 dev_err(&dev->dev, "could not allocate refill memory\n");
750                 ret = -ENOMEM;
751                 goto fail;
752         }
753
754         /* alloc engines */
755         omap_dmm->engines = kcalloc(omap_dmm->num_engines,
756                                     sizeof(struct refill_engine), GFP_KERNEL);
757         if (!omap_dmm->engines) {
758                 ret = -ENOMEM;
759                 goto fail;
760         }
761
762         for (i = 0; i < omap_dmm->num_engines; i++) {
763                 omap_dmm->engines[i].id = i;
764                 omap_dmm->engines[i].dmm = omap_dmm;
765                 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
766                                                 (REFILL_BUFFER_SIZE * i);
767                 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
768                                                 (REFILL_BUFFER_SIZE * i);
769                 init_completion(&omap_dmm->engines[i].compl);
770
771                 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
772         }
773
774         omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
775                                 GFP_KERNEL);
776         if (!omap_dmm->tcm) {
777                 ret = -ENOMEM;
778                 goto fail;
779         }
780
781         /* init containers */
782         /* Each LUT is associated with a TCM (container manager).  We use the
783            lut_id to denote the lut_id used to identify the correct LUT for
784            programming during reill operations */
785         for (i = 0; i < omap_dmm->num_lut; i++) {
786                 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
787                                                 omap_dmm->container_height);
788
789                 if (!omap_dmm->tcm[i]) {
790                         dev_err(&dev->dev, "failed to allocate container\n");
791                         ret = -ENOMEM;
792                         goto fail;
793                 }
794
795                 omap_dmm->tcm[i]->lut_id = i;
796         }
797
798         /* assign access mode containers to applicable tcm container */
799         /* OMAP 4 has 1 container for all 4 views */
800         /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
801         containers[TILFMT_8BIT] = omap_dmm->tcm[0];
802         containers[TILFMT_16BIT] = omap_dmm->tcm[0];
803         containers[TILFMT_32BIT] = omap_dmm->tcm[0];
804
805         if (omap_dmm->container_height != omap_dmm->lut_height) {
806                 /* second LUT is used for PAGE mode.  Programming must use
807                    y offset that is added to all y coordinates.  LUT id is still
808                    0, because it is the same LUT, just the upper 128 lines */
809                 containers[TILFMT_PAGE] = omap_dmm->tcm[1];
810                 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
811                 omap_dmm->tcm[1]->lut_id = 0;
812         } else {
813                 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
814         }
815
816         area = (struct tcm_area) {
817                 .tcm = NULL,
818                 .p1.x = omap_dmm->container_width - 1,
819                 .p1.y = omap_dmm->container_height - 1,
820         };
821
822         /* initialize all LUTs to dummy page entries */
823         for (i = 0; i < omap_dmm->num_lut; i++) {
824                 area.tcm = omap_dmm->tcm[i];
825                 if (fill(&area, NULL, 0, 0, true))
826                         dev_err(omap_dmm->dev, "refill failed");
827         }
828
829         dev_info(omap_dmm->dev, "initialized all PAT entries\n");
830
831         return 0;
832
833 fail:
834         if (omap_dmm_remove(dev))
835                 dev_err(&dev->dev, "cleanup failed\n");
836         return ret;
837 }
838
839 /*
840  * debugfs support
841  */
842
843 #ifdef CONFIG_DEBUG_FS
844
845 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
846                                 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
847 static const char *special = ".,:;'\"`~!^-+";
848
849 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
850                                                         char c, bool ovw)
851 {
852         int x, y;
853         for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
854                 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
855                         if (map[y][x] == ' ' || ovw)
856                                 map[y][x] = c;
857 }
858
859 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
860                                                                         char c)
861 {
862         map[p->y / ydiv][p->x / xdiv] = c;
863 }
864
865 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
866 {
867         return map[p->y / ydiv][p->x / xdiv];
868 }
869
870 static int map_width(int xdiv, int x0, int x1)
871 {
872         return (x1 / xdiv) - (x0 / xdiv) + 1;
873 }
874
875 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
876 {
877         char *p = map[yd] + (x0 / xdiv);
878         int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
879         if (w >= 0) {
880                 p += w;
881                 while (*nice)
882                         *p++ = *nice++;
883         }
884 }
885
886 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
887                                                         struct tcm_area *a)
888 {
889         sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
890         if (a->p0.y + 1 < a->p1.y) {
891                 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
892                                                         256 - 1);
893         } else if (a->p0.y < a->p1.y) {
894                 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
895                         text_map(map, xdiv, nice, a->p0.y / ydiv,
896                                         a->p0.x + xdiv, 256 - 1);
897                 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
898                         text_map(map, xdiv, nice, a->p1.y / ydiv,
899                                         0, a->p1.y - xdiv);
900         } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
901                 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
902         }
903 }
904
905 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
906                                                         struct tcm_area *a)
907 {
908         sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
909         if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
910                 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
911                                                         a->p0.x, a->p1.x);
912 }
913
914 int tiler_map_show(struct seq_file *s, void *arg)
915 {
916         int xdiv = 2, ydiv = 1;
917         char **map = NULL, *global_map;
918         struct tiler_block *block;
919         struct tcm_area a, p;
920         int i;
921         const char *m2d = alphabet;
922         const char *a2d = special;
923         const char *m2dp = m2d, *a2dp = a2d;
924         char nice[128];
925         int h_adj;
926         int w_adj;
927         unsigned long flags;
928         int lut_idx;
929
930
931         if (!omap_dmm) {
932                 /* early return if dmm/tiler device is not initialized */
933                 return 0;
934         }
935
936         h_adj = omap_dmm->container_height / ydiv;
937         w_adj = omap_dmm->container_width / xdiv;
938
939         map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
940         global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
941
942         if (!map || !global_map)
943                 goto error;
944
945         for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
946                 memset(map, 0, h_adj * sizeof(*map));
947                 memset(global_map, ' ', (w_adj + 1) * h_adj);
948
949                 for (i = 0; i < omap_dmm->container_height; i++) {
950                         map[i] = global_map + i * (w_adj + 1);
951                         map[i][w_adj] = 0;
952                 }
953
954                 spin_lock_irqsave(&list_lock, flags);
955
956                 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
957                         if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
958                                 if (block->fmt != TILFMT_PAGE) {
959                                         fill_map(map, xdiv, ydiv, &block->area,
960                                                 *m2dp, true);
961                                         if (!*++a2dp)
962                                                 a2dp = a2d;
963                                         if (!*++m2dp)
964                                                 m2dp = m2d;
965                                         map_2d_info(map, xdiv, ydiv, nice,
966                                                         &block->area);
967                                 } else {
968                                         bool start = read_map_pt(map, xdiv,
969                                                 ydiv, &block->area.p0) == ' ';
970                                         bool end = read_map_pt(map, xdiv, ydiv,
971                                                         &block->area.p1) == ' ';
972
973                                         tcm_for_each_slice(a, block->area, p)
974                                                 fill_map(map, xdiv, ydiv, &a,
975                                                         '=', true);
976                                         fill_map_pt(map, xdiv, ydiv,
977                                                         &block->area.p0,
978                                                         start ? '<' : 'X');
979                                         fill_map_pt(map, xdiv, ydiv,
980                                                         &block->area.p1,
981                                                         end ? '>' : 'X');
982                                         map_1d_info(map, xdiv, ydiv, nice,
983                                                         &block->area);
984                                 }
985                         }
986                 }
987
988                 spin_unlock_irqrestore(&list_lock, flags);
989
990                 if (s) {
991                         seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
992                         for (i = 0; i < 128; i++)
993                                 seq_printf(s, "%03d:%s\n", i, map[i]);
994                         seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
995                 } else {
996                         dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
997                                 lut_idx);
998                         for (i = 0; i < 128; i++)
999                                 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
1000                         dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
1001                                 lut_idx);
1002                 }
1003         }
1004
1005 error:
1006         kfree(map);
1007         kfree(global_map);
1008
1009         return 0;
1010 }
1011 #endif
1012
1013 #ifdef CONFIG_PM_SLEEP
1014 static int omap_dmm_resume(struct device *dev)
1015 {
1016         struct tcm_area area;
1017         int i;
1018
1019         if (!omap_dmm)
1020                 return -ENODEV;
1021
1022         area = (struct tcm_area) {
1023                 .tcm = NULL,
1024                 .p1.x = omap_dmm->container_width - 1,
1025                 .p1.y = omap_dmm->container_height - 1,
1026         };
1027
1028         /* initialize all LUTs to dummy page entries */
1029         for (i = 0; i < omap_dmm->num_lut; i++) {
1030                 area.tcm = omap_dmm->tcm[i];
1031                 if (fill(&area, NULL, 0, 0, true))
1032                         dev_err(dev, "refill failed");
1033         }
1034
1035         return 0;
1036 }
1037 #endif
1038
1039 static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1040
1041 #if defined(CONFIG_OF)
1042 static const struct dmm_platform_data dmm_omap4_platform_data = {
1043         .cpu_cache_flags = OMAP_BO_WC,
1044 };
1045
1046 static const struct dmm_platform_data dmm_omap5_platform_data = {
1047         .cpu_cache_flags = OMAP_BO_UNCACHED,
1048 };
1049
1050 static const struct of_device_id dmm_of_match[] = {
1051         {
1052                 .compatible = "ti,omap4-dmm",
1053                 .data = &dmm_omap4_platform_data,
1054         },
1055         {
1056                 .compatible = "ti,omap5-dmm",
1057                 .data = &dmm_omap5_platform_data,
1058         },
1059         {},
1060 };
1061 #endif
1062
1063 struct platform_driver omap_dmm_driver = {
1064         .probe = omap_dmm_probe,
1065         .remove = omap_dmm_remove,
1066         .driver = {
1067                 .owner = THIS_MODULE,
1068                 .name = DMM_DRIVER_NAME,
1069                 .of_match_table = of_match_ptr(dmm_of_match),
1070                 .pm = &omap_dmm_pm_ops,
1071         },
1072 };
1073
1074 MODULE_LICENSE("GPL v2");
1075 MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1076 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");